[VOL-3069]Pass Context in methods which are performing logging and need the context

Change-Id: Ie84f9e240aa4f47d0046acaac0d82d21b17252e5
diff --git a/cmd/openolt-adapter/common.go b/cmd/openolt-adapter/common.go
index 14a91af..3dead9d 100644
--- a/cmd/openolt-adapter/common.go
+++ b/cmd/openolt-adapter/common.go
@@ -21,12 +21,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "main"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "main"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/cmd/openolt-adapter/main.go b/cmd/openolt-adapter/main.go
index 7650ad3..b92d989 100644
--- a/cmd/openolt-adapter/main.go
+++ b/cmd/openolt-adapter/main.go
@@ -68,7 +68,7 @@
 }
 
 func (a *adapter) start(ctx context.Context) {
-	logger.Info("Starting Core Adapter components")
+	logger.Info(ctx, "Starting Core Adapter components")
 	var err error
 
 	var p *probe.Probe
@@ -76,6 +76,7 @@
 		if _, ok := value.(*probe.Probe); ok {
 			p = value.(*probe.Probe)
 			p.RegisterService(
+				ctx,
 				"message-bus",
 				"kv-store",
 				"container-proxy",
@@ -86,57 +87,57 @@
 	}
 
 	// Setup KV Client
-	logger.Debugw("create-kv-client", log.Fields{"kvstore": a.config.KVStoreType})
-	if err = a.setKVClient(); err != nil {
-		logger.Fatalw("error-setting-kv-client", log.Fields{"error": err})
+	logger.Debugw(ctx, "create-kv-client", log.Fields{"kvstore": a.config.KVStoreType})
+	if err = a.setKVClient(ctx); err != nil {
+		logger.Fatalw(ctx, "error-setting-kv-client", log.Fields{"error": err})
 	}
 
 	if p != nil {
-		p.UpdateStatus("kv-store", probe.ServiceStatusRunning)
+		p.UpdateStatus(ctx, "kv-store", probe.ServiceStatusRunning)
 	}
 
 	// Setup Log Config
-	cm := conf.NewConfigManager(a.kvClient, a.config.KVStoreType, a.config.KVStoreAddress, a.config.KVStoreTimeout)
+	cm := conf.NewConfigManager(ctx, a.kvClient, a.config.KVStoreType, a.config.KVStoreAddress, a.config.KVStoreTimeout)
 	go conf.StartLogLevelConfigProcessing(cm, ctx)
 
 	// Setup Kafka Client
-	if a.kafkaClient, err = newKafkaClient("sarama", a.config.KafkaAdapterAddress); err != nil {
-		logger.Fatalw("Unsupported-common-client", log.Fields{"error": err})
+	if a.kafkaClient, err = newKafkaClient(ctx, "sarama", a.config.KafkaAdapterAddress); err != nil {
+		logger.Fatalw(ctx, "Unsupported-common-client", log.Fields{"error": err})
 	}
 
 	if p != nil {
-		p.UpdateStatus("message-bus", probe.ServiceStatusRunning)
+		p.UpdateStatus(ctx, "message-bus", probe.ServiceStatusRunning)
 	}
 
 	// setup endpointManager
 
 	// Start the common InterContainer Proxy - retries indefinitely
 	if a.kip, err = a.startInterContainerProxy(ctx, -1); err != nil {
-		logger.Fatal("error-starting-inter-container-proxy")
+		logger.Fatal(ctx, "error-starting-inter-container-proxy")
 	}
 
 	// Create the core proxy to handle requests to the Core
-	a.coreProxy = com.NewCoreProxy(a.kip, a.config.Topic, a.config.CoreTopic)
+	a.coreProxy = com.NewCoreProxy(ctx, a.kip, a.config.Topic, a.config.CoreTopic)
 
 	// Create the adaptor proxy to handle request between olt and onu
-	a.adapterProxy = com.NewAdapterProxy(a.kip, "brcm_openomci_onu", a.config.CoreTopic, cm.Backend)
+	a.adapterProxy = com.NewAdapterProxy(ctx, a.kip, "brcm_openomci_onu", a.config.CoreTopic, cm.Backend)
 
 	// Create the event proxy to post events to KAFKA
 	a.eventProxy = com.NewEventProxy(com.MsgClient(a.kafkaClient), com.MsgTopic(kafka.Topic{Name: a.config.EventTopic}))
 
 	// Create the open OLT adapter
 	if a.iAdapter, err = a.startOpenOLT(ctx, a.kip, a.coreProxy, a.adapterProxy, a.eventProxy, a.config); err != nil {
-		logger.Fatalw("error-starting-openolt", log.Fields{"error": err})
+		logger.Fatalw(ctx, "error-starting-openolt", log.Fields{"error": err})
 	}
 
 	// Register the core request handler
 	if err = a.setupRequestHandler(ctx, a.instanceID, a.iAdapter); err != nil {
-		logger.Fatalw("error-setting-core-request-handler", log.Fields{"error": err})
+		logger.Fatalw(ctx, "error-setting-core-request-handler", log.Fields{"error": err})
 	}
 
 	// Register this adapter to the Core - retries indefinitely
 	if err = a.registerWithCore(ctx, -1); err != nil {
-		logger.Fatal("error-registering-with-core")
+		logger.Fatal(ctx, "error-registering-with-core")
 	}
 
 	// check the readiness and liveliness and update the probe status
@@ -185,7 +186,7 @@
 			}
 		case <-timeoutTimer.C:
 			// Check the status of the kv-store. Use timeout of 2 seconds to avoid forever blocking
-			logger.Info("kv-store liveliness-recheck")
+			logger.Info(ctx, "kv-store liveliness-recheck")
 			timeoutCtx, cancelFunc := context.WithTimeout(ctx, 2*time.Second)
 
 			kvStoreChannel <- a.kvClient.IsConnectionUp(timeoutCtx)
@@ -200,8 +201,8 @@
 and update the status in the probe.
 */
 func (a *adapter) checkKafkaReadiness(ctx context.Context) {
-	livelinessChannel := a.kafkaClient.EnableLivenessChannel(true)
-	healthinessChannel := a.kafkaClient.EnableHealthinessChannel(true)
+	livelinessChannel := a.kafkaClient.EnableLivenessChannel(ctx, true)
+	healthinessChannel := a.kafkaClient.EnableHealthinessChannel(ctx, true)
 	timeout := a.config.LiveProbeInterval
 	failed := false
 	for {
@@ -238,13 +239,13 @@
 				<-timeoutTimer.C
 			}
 		case <-timeoutTimer.C:
-			logger.Info("kafka-proxy-liveness-recheck")
+			logger.Info(ctx, "kafka-proxy-liveness-recheck")
 			// send the liveness probe in a goroutine; we don't want to deadlock ourselves as
 			// the liveness probe may wait (and block) writing to our channel.
-			err := a.kafkaClient.SendLiveness()
+			err := a.kafkaClient.SendLiveness(ctx)
 			if err != nil {
 				// Catch possible error case if sending liveness after Sarama has been stopped.
-				logger.Warnw("error-kafka-send-liveness", log.Fields{"error": err})
+				logger.Warnw(ctx, "error-kafka-send-liveness", log.Fields{"error": err})
 			}
 		}
 	}
@@ -261,34 +262,34 @@
 	if a.kvClient != nil {
 		// Release all reservations
 		if err := a.kvClient.ReleaseAllReservations(ctx); err != nil {
-			logger.Infow("fail-to-release-all-reservations", log.Fields{"error": err})
+			logger.Infow(ctx, "fail-to-release-all-reservations", log.Fields{"error": err})
 		}
 		// Close the DB connection
-		a.kvClient.Close()
+		a.kvClient.Close(ctx)
 	}
 
 	if a.kip != nil {
-		a.kip.Stop()
+		a.kip.Stop(ctx)
 	}
 
 	// TODO:  More cleanup
 }
 
-func newKVClient(storeType, address string, timeout time.Duration) (kvstore.Client, error) {
+func newKVClient(ctx context.Context, storeType, address string, timeout time.Duration) (kvstore.Client, error) {
 
-	logger.Infow("kv-store-type", log.Fields{"store": storeType})
+	logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.FatalLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func newKafkaClient(clientType, address string) (kafka.Client, error) {
+func newKafkaClient(ctx context.Context, clientType, address string) (kafka.Client, error) {
 
-	logger.Infow("common-client-type", log.Fields{"client": clientType})
+	logger.Infow(ctx, "common-client-type", log.Fields{"client": clientType})
 	switch clientType {
 	case "sarama":
 		return kafka.NewSaramaClient(
@@ -303,8 +304,8 @@
 	return nil, errors.New("unsupported-client-type")
 }
 
-func (a *adapter) setKVClient() error {
-	client, err := newKVClient(a.config.KVStoreType, a.config.KVStoreAddress, a.config.KVStoreTimeout)
+func (a *adapter) setKVClient(ctx context.Context) error {
+	client, err := newKVClient(ctx, a.config.KVStoreType, a.config.KVStoreAddress, a.config.KVStoreTimeout)
 	if err != nil {
 		a.kvClient = nil
 		return err
@@ -315,7 +316,7 @@
 }
 
 func (a *adapter) startInterContainerProxy(ctx context.Context, retries int) (kafka.InterContainerProxy, error) {
-	logger.Infow("starting-intercontainer-messaging-proxy", log.Fields{"address": a.config.KafkaAdapterAddress,
+	logger.Infow(ctx, "starting-intercontainer-messaging-proxy", log.Fields{"address": a.config.KafkaAdapterAddress,
 		"topic": a.config.Topic})
 	var err error
 	kip := kafka.NewInterContainerProxy(
@@ -324,8 +325,8 @@
 		kafka.DefaultTopic(&kafka.Topic{Name: a.config.Topic}))
 	count := 0
 	for {
-		if err = kip.Start(); err != nil {
-			logger.Warnw("error-starting-messaging-proxy", log.Fields{"error": err})
+		if err = kip.Start(ctx); err != nil {
+			logger.Warnw(ctx, "error-starting-messaging-proxy", log.Fields{"error": err})
 			if retries == count {
 				return nil, err
 			}
@@ -337,14 +338,14 @@
 		}
 	}
 	probe.UpdateStatusFromContext(ctx, "container-proxy", probe.ServiceStatusRunning)
-	logger.Info("common-messaging-proxy-created")
+	logger.Info(ctx, "common-messaging-proxy-created")
 	return kip, nil
 }
 
 func (a *adapter) startOpenOLT(ctx context.Context, kip kafka.InterContainerProxy,
 	cp adapterif.CoreProxy, ap adapterif.AdapterProxy, ep adapterif.EventProxy,
 	cfg *config.AdapterFlags) (*ac.OpenOLT, error) {
-	logger.Info("starting-open-olt")
+	logger.Info(ctx, "starting-open-olt")
 	var err error
 	sOLT := ac.NewOpenOLT(ctx, a.kip, cp, ap, ep, cfg)
 
@@ -352,25 +353,25 @@
 		return nil, err
 	}
 
-	logger.Info("open-olt-started")
+	logger.Info(ctx, "open-olt-started")
 	return sOLT, nil
 }
 
 func (a *adapter) setupRequestHandler(ctx context.Context, coreInstanceID string, iadapter adapters.IAdapter) error {
-	logger.Info("setting-request-handler")
+	logger.Info(ctx, "setting-request-handler")
 	requestProxy := com.NewRequestHandlerProxy(coreInstanceID, iadapter, a.coreProxy)
-	if err := a.kip.SubscribeWithRequestHandlerInterface(kafka.Topic{Name: a.config.Topic}, requestProxy); err != nil {
+	if err := a.kip.SubscribeWithRequestHandlerInterface(ctx, kafka.Topic{Name: a.config.Topic}, requestProxy); err != nil {
 		return err
 
 	}
 	probe.UpdateStatusFromContext(ctx, "core-request-handler", probe.ServiceStatusRunning)
-	logger.Info("request-handler-setup-done")
+	logger.Info(ctx, "request-handler-setup-done")
 	return nil
 }
 
 func (a *adapter) registerWithCore(ctx context.Context, retries int) error {
 	adapterID := fmt.Sprintf("openolt_%d", a.config.CurrentReplica)
-	logger.Infow("registering-with-core", log.Fields{
+	logger.Infow(ctx, "registering-with-core", log.Fields{
 		"adapterID":      adapterID,
 		"currentReplica": a.config.CurrentReplica,
 		"totalReplicas":  a.config.TotalReplicas,
@@ -395,7 +396,7 @@
 	count := 0
 	for {
 		if err := a.coreProxy.RegisterAdapter(context.TODO(), adapterDescription, deviceTypes); err != nil {
-			logger.Warnw("registering-with-core-failed", log.Fields{"error": err})
+			logger.Warnw(ctx, "registering-with-core-failed", log.Fields{"error": err})
 			if retries == count {
 				return err
 			}
@@ -407,11 +408,11 @@
 		}
 	}
 	probe.UpdateStatusFromContext(ctx, "register-with-core", probe.ServiceStatusRunning)
-	logger.Info("registered-with-core")
+	logger.Info(ctx, "registered-with-core")
 	return nil
 }
 
-func waitForExit() int {
+func waitForExit(ctx context.Context) int {
 	signalChannel := make(chan os.Signal, 1)
 	signal.Notify(signalChannel,
 		syscall.SIGHUP,
@@ -428,10 +429,10 @@
 			syscall.SIGINT,
 			syscall.SIGTERM,
 			syscall.SIGQUIT:
-			logger.Infow("closing-signal-received", log.Fields{"signal": s})
+			logger.Infow(ctx, "closing-signal-received", log.Fields{"signal": s})
 			exitChannel <- 0
 		default:
-			logger.Infow("unexpected-signal-received", log.Fields{"signal": s})
+			logger.Infow(ctx, "unexpected-signal-received", log.Fields{"signal": s})
 			exitChannel <- 1
 		}
 	}()
@@ -458,6 +459,7 @@
 }
 
 func main() {
+	ctx := context.Background()
 	start := time.Now()
 
 	cf := config.NewAdapterFlags()
@@ -467,7 +469,7 @@
 
 	logLevel, err := log.StringToLogLevel(cf.LogLevel)
 	if err != nil {
-		logger.Fatalf("Cannot setup logging, %s", err)
+		logger.Fatalf(ctx, "Cannot setup logging, %s", err)
 	}
 
 	// Setup default logger - applies for packages that do not have specific logger set
@@ -497,7 +499,7 @@
 		printBanner()
 	}
 
-	logger.Infow("config", log.Fields{"config": *cf})
+	logger.Infow(ctx, "config", log.Fields{"config": *cf})
 
 	ctx, cancel := context.WithCancel(context.Background())
 	defer cancel()
@@ -505,18 +507,18 @@
 	ad := newAdapter(cf)
 
 	p := &probe.Probe{}
-	go p.ListenAndServe(ad.config.ProbeAddress)
+	go p.ListenAndServe(ctx, ad.config.ProbeAddress)
 
 	probeCtx := context.WithValue(ctx, probe.ProbeContextKey, p)
 
 	go ad.start(probeCtx)
 
-	code := waitForExit()
-	logger.Infow("received-a-closing-signal", log.Fields{"code": code})
+	code := waitForExit(ctx)
+	logger.Infow(ctx, "received-a-closing-signal", log.Fields{"code": code})
 
 	// Cleanup before leaving
 	ad.stop(ctx)
 
 	elapsed := time.Since(start)
-	logger.Infow("run-time", log.Fields{"instanceId": ad.config.InstanceID, "time": elapsed / time.Second})
+	logger.Infow(ctx, "run-time", log.Fields{"instanceId": ad.config.InstanceID, "time": elapsed / time.Second})
 }
diff --git a/cmd/openolt-adapter/main_test.go b/cmd/openolt-adapter/main_test.go
index bd5c81a..f084b5e 100644
--- a/cmd/openolt-adapter/main_test.go
+++ b/cmd/openolt-adapter/main_test.go
@@ -61,7 +61,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			if err := tt.adapter.setKVClient(); (err != nil) != tt.wantErr {
+			if err := tt.adapter.setKVClient(context.Background()); (err != nil) != tt.wantErr {
 				t.Errorf("adapter.setKVClient() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -74,7 +74,7 @@
 	a.StartAt(0)
 	defer a.StopAt(0)
 
-	if err := adapt.setKVClient(); err != nil {
+	if err := adapt.setKVClient(context.Background()); err != nil {
 		t.Errorf("adapter.setKVClient() error = %v", err)
 	}
 }
@@ -137,7 +137,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			_, err := newKafkaClient(tt.args.clientType, tt.args.address)
+			_, err := newKafkaClient(context.Background(), tt.args.clientType, tt.args.address)
 			if (err != nil) != tt.wantErr {
 				t.Errorf("newKafkaClient() error = %v, wantErr %v", err, tt.wantErr)
 				return
@@ -157,7 +157,7 @@
 		kafka.DefaultTopic(&kafka.Topic{Name: ad.config.Topic}))
 
 	ad.kip = kip
-	ad.kip.Start()
+	ad.kip.Start(context.Background())
 
 	oolt, _ := ad.startOpenOLT(context.TODO(), nil,
 		ad.coreProxy, ad.adapterProxy, ad.eventProxy, ad.config)
@@ -174,55 +174,55 @@
 type mockKafkaClient struct {
 }
 
-func (kc *mockKafkaClient) Start() error {
+func (kc *mockKafkaClient) Start(ctx context.Context) error {
 	return nil
 }
-func (kc *mockKafkaClient) Stop() {
+func (kc *mockKafkaClient) Stop(ctx context.Context) {
 }
-func (kc *mockKafkaClient) CreateTopic(topic *kafka.Topic, numPartition int, repFactor int) error {
+func (kc *mockKafkaClient) CreateTopic(ctx context.Context, topic *kafka.Topic, numPartition int, repFactor int) error {
 	if topic != nil {
 		return nil
 	}
 	return errors.New("invalid Topic")
 }
-func (kc *mockKafkaClient) DeleteTopic(topic *kafka.Topic) error {
+func (kc *mockKafkaClient) DeleteTopic(ctx context.Context, topic *kafka.Topic) error {
 	if topic != nil {
 		return nil
 	}
 	return errors.New("invalid Topic")
 }
-func (kc *mockKafkaClient) Subscribe(topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ca.InterContainerMessage, error) {
+func (kc *mockKafkaClient) Subscribe(ctx context.Context, topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ca.InterContainerMessage, error) {
 	if topic != nil {
 		ch := make(chan *ca.InterContainerMessage)
 		return ch, nil
 	}
 	return nil, errors.New("invalid Topic")
 }
-func (kc *mockKafkaClient) UnSubscribe(topic *kafka.Topic, ch <-chan *ca.InterContainerMessage) error {
+func (kc *mockKafkaClient) UnSubscribe(ctx context.Context, topic *kafka.Topic, ch <-chan *ca.InterContainerMessage) error {
 	if topic == nil {
 		return nil
 	}
 	return errors.New("invalid Topic")
 }
-func (kc *mockKafkaClient) Send(msg interface{}, topic *kafka.Topic, keys ...string) error {
+func (kc *mockKafkaClient) Send(ctx context.Context, msg interface{}, topic *kafka.Topic, keys ...string) error {
 	if topic != nil {
 		return nil
 	}
 	return errors.New("invalid topic")
 }
 
-func (kc *mockKafkaClient) SendLiveness() error {
+func (kc *mockKafkaClient) SendLiveness(ctx context.Context) error {
 	return status.Error(codes.Unimplemented, "SendLiveness")
 }
 
-func (kc *mockKafkaClient) EnableLivenessChannel(enable bool) chan bool {
+func (kc *mockKafkaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
 	return nil
 }
 
-func (kc *mockKafkaClient) EnableHealthinessChannel(enable bool) chan bool {
+func (kc *mockKafkaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
 	return nil
 }
 
-func (kc *mockKafkaClient) SubscribeForMetadata(func(fromTopic string, timestamp time.Time)) {
+func (kc *mockKafkaClient) SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time)) {
 	return
 }
diff --git a/cmd/openolt-adapter/profile.go b/cmd/openolt-adapter/profile.go
index f539afd..98e3ac0 100644
--- a/cmd/openolt-adapter/profile.go
+++ b/cmd/openolt-adapter/profile.go
@@ -25,7 +25,7 @@
 
 func realMain() {
 	go func() {
-		logger.Fatal(http.ListenAndServe("0.0.0.0:6060", nil))
+		logger.Fatal(ctx, http.ListenAndServe("0.0.0.0:6060", nil))
 	}()
 
 }
diff --git a/cmd/openolt-adapter/release.go b/cmd/openolt-adapter/release.go
index a8fb220..0f18e72 100644
--- a/cmd/openolt-adapter/release.go
+++ b/cmd/openolt-adapter/release.go
@@ -19,6 +19,8 @@
 //Package main invokes the application
 package main
 
+import "context"
+
 func realMain() {
-	logger.Infoln("NOT PROFILING")
+	logger.Infoln(context.Background(), "NOT PROFILING")
 }
diff --git a/go.mod b/go.mod
index 606a766..e6d6a52 100755
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@
 	github.com/cenkalti/backoff/v3 v3.1.1
 	github.com/gogo/protobuf v1.3.1
 	github.com/golang/protobuf v1.3.2
-	github.com/opencord/voltha-lib-go/v3 v3.1.22
+	github.com/opencord/voltha-lib-go/v3 v3.2.0
 	github.com/opencord/voltha-protos/v3 v3.3.9
 	go.etcd.io/etcd v0.0.0-20190930204107-236ac2a90522
 	google.golang.org/grpc v1.25.1
diff --git a/go.sum b/go.sum
index a8ec1c8..2d7782a 100644
--- a/go.sum
+++ b/go.sum
@@ -204,8 +204,8 @@
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
 github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opencord/voltha-lib-go/v3 v3.1.22 h1:ZiM1yGjDKggKy1KXavtH3xuQpAc/gP4sV4/3mvfrL58=
-github.com/opencord/voltha-lib-go/v3 v3.1.22/go.mod h1:sa508HZ5vlOauh0i+WC0XFX1JZnfHtJqNIms5XBT/Z0=
+github.com/opencord/voltha-lib-go/v3 v3.2.0 h1:r7X7jZE5oJklv7GgqkuZWJMqjDZNT35X7VOMY1oyfus=
+github.com/opencord/voltha-lib-go/v3 v3.2.0/go.mod h1:sa508HZ5vlOauh0i+WC0XFX1JZnfHtJqNIms5XBT/Z0=
 github.com/opencord/voltha-protos/v3 v3.3.9 h1:BnfDN9oaRBgyAiH9ZN7LpBpEJYxjX/ZS7R4OT2hDrtY=
 github.com/opencord/voltha-protos/v3 v3.3.9/go.mod h1:nl1ETp5Iw3avxOaKD8BJlYY5wYI4KeV95aT1pL63nto=
 github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
diff --git a/internal/pkg/config/common.go b/internal/pkg/config/common.go
index bfa8e03..1b05963 100644
--- a/internal/pkg/config/common.go
+++ b/internal/pkg/config/common.go
@@ -21,12 +21,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/pkg/core/common.go b/internal/pkg/core/common.go
index 9370c0e..4d46870 100644
--- a/internal/pkg/core/common.go
+++ b/internal/pkg/core/common.go
@@ -21,12 +21,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "core"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "core"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/pkg/core/device_handler.go b/internal/pkg/core/device_handler.go
index 91c9ced..fe30de5 100644
--- a/internal/pkg/core/device_handler.go
+++ b/internal/pkg/core/device_handler.go
@@ -169,18 +169,18 @@
 func (dh *DeviceHandler) start(ctx context.Context) {
 	dh.lockDevice.Lock()
 	defer dh.lockDevice.Unlock()
-	logger.Debugw("starting-device-agent", log.Fields{"device": dh.device})
+	logger.Debugw(ctx, "starting-device-agent", log.Fields{"device": dh.device})
 	// Add the initial device to the local model
-	logger.Debug("device-agent-started")
+	logger.Debug(ctx, "device-agent-started")
 }
 
 // stop stops the device dh.  Not much to do for now
 func (dh *DeviceHandler) stop(ctx context.Context) {
 	dh.lockDevice.Lock()
 	defer dh.lockDevice.Unlock()
-	logger.Debug("stopping-device-agent")
+	logger.Debug(ctx, "stopping-device-agent")
 	dh.exitChannel <- 1
-	logger.Debug("device-agent-stopped")
+	logger.Debug(ctx, "device-agent-stopped")
 }
 
 func macifyIP(ip net.IP) string {
@@ -194,24 +194,24 @@
 	return ""
 }
 
-func generateMacFromHost(host string) (string, error) {
+func generateMacFromHost(ctx context.Context, host string) (string, error) {
 	var genmac string
 	var addr net.IP
 	var ips []string
 	var err error
 
-	logger.Debugw("generating-mac-from-host", log.Fields{"host": host})
+	logger.Debugw(ctx, "generating-mac-from-host", log.Fields{"host": host})
 
 	if addr = net.ParseIP(host); addr == nil {
-		logger.Debugw("looking-up-hostname", log.Fields{"host": host})
+		logger.Debugw(ctx, "looking-up-hostname", log.Fields{"host": host})
 
 		if ips, err = net.LookupHost(host); err == nil {
-			logger.Debugw("dns-result-ips", log.Fields{"ips": ips})
+			logger.Debugw(ctx, "dns-result-ips", log.Fields{"ips": ips})
 			if addr = net.ParseIP(ips[0]); addr == nil {
 				return "", olterrors.NewErrInvalidValue(log.Fields{"ip": ips[0]}, nil)
 			}
 			genmac = macifyIP(addr)
-			logger.Debugw("using-ip-as-mac",
+			logger.Debugw(ctx, "using-ip-as-mac",
 				log.Fields{"host": ips[0],
 					"mac": genmac})
 			return genmac, nil
@@ -220,7 +220,7 @@
 	}
 
 	genmac = macifyIP(addr)
-	logger.Debugw("using-ip-as-mac",
+	logger.Debugw(ctx, "using-ip-as-mac",
 		log.Fields{"host": host,
 			"mac": genmac})
 	return genmac, nil
@@ -253,7 +253,7 @@
 	return "", olterrors.NewErrInvalidValue(log.Fields{"port-type": portType}, nil)
 }
 
-func (dh *DeviceHandler) addPort(intfID uint32, portType voltha.Port_PortType, state string) error {
+func (dh *DeviceHandler) addPort(ctx context.Context, intfID uint32, portType voltha.Port_PortType, state string) error {
 	var operStatus common.OperStatus_Types
 	if state == "up" {
 		operStatus = voltha.OperStatus_ACTIVE
@@ -276,7 +276,7 @@
 	if device.Ports != nil {
 		for _, dPort := range device.Ports {
 			if dPort.Type == portType && dPort.PortNo == portNum {
-				logger.Debug("port-already-exists-updating-oper-status-of-port")
+				logger.Debug(ctx, "port-already-exists-updating-oper-status-of-port")
 				if err := dh.coreProxy.PortStateUpdate(context.TODO(), dh.device.Id, portType, portNum, operStatus); err != nil {
 					return olterrors.NewErrAdapter("failed-to-update-port-state", log.Fields{
 						"device-id":   dh.device.Id,
@@ -307,23 +307,23 @@
 			MaxSpeed:   uint32(of.OfpPortFeatures_OFPPF_1GB_FD),
 		},
 	}
-	logger.Debugw("sending-port-update-to-core", log.Fields{"port": port})
+	logger.Debugw(ctx, "sending-port-update-to-core", log.Fields{"port": port})
 	// Synchronous call to update device - this method is run in its own go routine
 	if err := dh.coreProxy.PortCreated(context.TODO(), dh.device.Id, port); err != nil {
 		return olterrors.NewErrAdapter("error-creating-port", log.Fields{
 			"device-id": dh.device.Id,
 			"port-type": portType}, err)
 	}
-	go dh.updateLocalDevice()
+	go dh.updateLocalDevice(ctx)
 	return nil
 }
 
-func (dh *DeviceHandler) updateLocalDevice() error {
+func (dh *DeviceHandler) updateLocalDevice(ctx context.Context) error {
 	dh.lockDevice.Lock()
 	defer dh.lockDevice.Unlock()
 	device, err := dh.coreProxy.GetDevice(context.TODO(), dh.device.Id, dh.device.Id)
 	if err != nil || device == nil {
-		logger.Errorf("device", log.Fields{"device-id": dh.device.Id}, err)
+		logger.Errorf(ctx, "device", log.Fields{"device-id": dh.device.Id}, err)
 		return olterrors.NewErrNotFound("device", log.Fields{"device-id": dh.device.Id}, err)
 	}
 	dh.device = device
@@ -333,7 +333,7 @@
 // nolint: gocyclo
 // readIndications to read the indications from the OLT device
 func (dh *DeviceHandler) readIndications(ctx context.Context) error {
-	defer logger.Debugw("indications-ended", log.Fields{"device-id": dh.device.Id})
+	defer logger.Debugw(ctx, "indications-ended", log.Fields{"device-id": dh.device.Id})
 	defer func() {
 		dh.lockDevice.Lock()
 		dh.isReadIndicationRoutineActive = false
@@ -366,12 +366,12 @@
 	for {
 		select {
 		case <-dh.stopIndications:
-			logger.Debugw("stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
+			logger.Debugw(ctx, "stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
 			break Loop
 		default:
 			indication, err := indications.Recv()
 			if err == io.EOF {
-				logger.Infow("eof-for-indications",
+				logger.Infow(ctx, "eof-for-indications",
 					log.Fields{"err": err,
 						"device-id": dh.device.Id})
 				// Use an exponential back off to prevent getting into a tight loop
@@ -379,7 +379,7 @@
 				if duration == backoff.Stop {
 					// If we reach a maximum then warn and reset the backoff
 					// timer and keep attempting.
-					logger.Warnw("maximum-indication-backoff-reached--resetting-backoff-timer",
+					logger.Warnw(ctx, "maximum-indication-backoff-reached--resetting-backoff-timer",
 						log.Fields{"max-indication-backoff": indicationBackoff.MaxElapsedTime,
 							"device-id": dh.device.Id})
 					indicationBackoff.Reset()
@@ -390,7 +390,7 @@
 				backoff := time.NewTimer(indicationBackoff.NextBackOff())
 				select {
 				case <-dh.stopIndications:
-					logger.Debugw("stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
+					logger.Debugw(ctx, "stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
 					if !backoff.Stop() {
 						<-backoff.C
 					}
@@ -404,18 +404,18 @@
 				continue
 			}
 			if err != nil {
-				logger.Errorw("read-indication-error",
+				logger.Errorw(ctx, "read-indication-error",
 					log.Fields{"err": err,
 						"device-id": dh.device.Id})
 				if device.AdminState == voltha.AdminState_DELETED {
-					logger.Debug("device-deleted--stopping-the-read-indication-thread")
+					logger.Debug(ctx, "device-deleted--stopping-the-read-indication-thread")
 					break Loop
 				}
 				// Close the stream, and re-initialize it
 				if err = indications.CloseSend(); err != nil {
 					// Ok to ignore here, because we landed here due to a problem on the stream
 					// In all probability, the closeSend call may fail
-					logger.Debugw("error-closing-send stream--error-ignored",
+					logger.Debugw(ctx, "error-closing-send stream--error-ignored",
 						log.Fields{"err": err,
 							"device-id": dh.device.Id})
 				}
@@ -429,7 +429,7 @@
 			indicationBackoff.Reset()
 			// When OLT is admin down, ignore all indications.
 			if device.AdminState == voltha.AdminState_DISABLED && !isIndicationAllowedDuringOltAdminDown(indication) {
-				logger.Debugw("olt-is-admin-down, ignore indication",
+				logger.Debugw(ctx, "olt-is-admin-down, ignore indication",
 					log.Fields{"indication": indication,
 						"device-id": dh.device.Id})
 				continue
@@ -475,7 +475,7 @@
 		dh.transitionMap.Handle(ctx, DeviceDownInd)
 	}
 	// Send or clear Alarm
-	if err := dh.eventMgr.oltUpDownIndication(oltIndication, dh.device.Id, raisedTs); err != nil {
+	if err := dh.eventMgr.oltUpDownIndication(ctx, oltIndication, dh.device.Id, raisedTs); err != nil {
 		return olterrors.NewErrAdapter("failed-indication", log.Fields{
 			"device_id":  dh.device.Id,
 			"indication": oltIndication,
@@ -495,16 +495,16 @@
 	case *oop.Indication_IntfInd:
 		intfInd := indication.GetIntfInd()
 		go func() {
-			if err := dh.addPort(intfInd.GetIntfId(), voltha.Port_PON_OLT, intfInd.GetOperState()); err != nil {
+			if err := dh.addPort(ctx, intfInd.GetIntfId(), voltha.Port_PON_OLT, intfInd.GetOperState()); err != nil {
 				olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface", "device-id": dh.device.Id}, err).Log()
 			}
 		}()
-		logger.Infow("received-interface-indication", log.Fields{"InterfaceInd": intfInd, "device-id": dh.device.Id})
+		logger.Infow(ctx, "received-interface-indication", log.Fields{"InterfaceInd": intfInd, "device-id": dh.device.Id})
 	case *oop.Indication_IntfOperInd:
 		intfOperInd := indication.GetIntfOperInd()
 		if intfOperInd.GetType() == "nni" {
 			go func() {
-				if err := dh.addPort(intfOperInd.GetIntfId(), voltha.Port_ETHERNET_NNI, intfOperInd.GetOperState()); err != nil {
+				if err := dh.addPort(ctx, intfOperInd.GetIntfId(), voltha.Port_ETHERNET_NNI, intfOperInd.GetOperState()); err != nil {
 					olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-nni", "device-id": dh.device.Id}, err).Log()
 				}
 			}()
@@ -513,18 +513,18 @@
 			// TODO: Check what needs to be handled here for When PON PORT down, ONU will be down
 			// Handle pon port update
 			go func() {
-				if err := dh.addPort(intfOperInd.GetIntfId(), voltha.Port_PON_OLT, intfOperInd.GetOperState()); err != nil {
+				if err := dh.addPort(ctx, intfOperInd.GetIntfId(), voltha.Port_PON_OLT, intfOperInd.GetOperState()); err != nil {
 					olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-pon", "device-id": dh.device.Id}, err).Log()
 				}
 			}()
-			go dh.eventMgr.oltIntfOperIndication(indication.GetIntfOperInd(), dh.device.Id, raisedTs)
+			go dh.eventMgr.oltIntfOperIndication(ctx, indication.GetIntfOperInd(), dh.device.Id, raisedTs)
 		}
-		logger.Infow("received-interface-oper-indication",
+		logger.Infow(ctx, "received-interface-oper-indication",
 			log.Fields{"interfaceOperInd": intfOperInd,
 				"device-id": dh.device.Id})
 	case *oop.Indication_OnuDiscInd:
 		onuDiscInd := indication.GetOnuDiscInd()
-		logger.Infow("received-onu-discovery-indication", log.Fields{"OnuDiscInd": onuDiscInd, "device-id": dh.device.Id})
+		logger.Infow(ctx, "received-onu-discovery-indication", log.Fields{"OnuDiscInd": onuDiscInd, "device-id": dh.device.Id})
 		sn := dh.stringifySerialNumber(onuDiscInd.SerialNumber)
 		go func() {
 			if err := dh.onuDiscIndication(ctx, onuDiscInd, sn); err != nil {
@@ -533,23 +533,23 @@
 		}()
 	case *oop.Indication_OnuInd:
 		onuInd := indication.GetOnuInd()
-		logger.Infow("received-onu-indication", log.Fields{"OnuInd": onuInd, "device-id": dh.device.Id})
+		logger.Infow(ctx, "received-onu-indication", log.Fields{"OnuInd": onuInd, "device-id": dh.device.Id})
 		go func() {
-			if err := dh.onuIndication(onuInd); err != nil {
+			if err := dh.onuIndication(ctx, onuInd); err != nil {
 				olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "onu", "device-id": dh.device.Id}, err).Log()
 			}
 		}()
 	case *oop.Indication_OmciInd:
 		omciInd := indication.GetOmciInd()
-		logger.Debugw("received-omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+		logger.Debugw(ctx, "received-omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
 		go func() {
-			if err := dh.omciIndication(omciInd); err != nil {
+			if err := dh.omciIndication(ctx, omciInd); err != nil {
 				olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "omci", "device-id": dh.device.Id}, err).Log()
 			}
 		}()
 	case *oop.Indication_PktInd:
 		pktInd := indication.GetPktInd()
-		logger.Debugw("received-packet-indication", log.Fields{
+		logger.Debugw(ctx, "received-packet-indication", log.Fields{
 			"intf-type":   pktInd.IntfId,
 			"intf-id":     pktInd.IntfId,
 			"gem-port-id": pktInd.GemportId,
@@ -558,7 +558,7 @@
 		})
 
 		if logger.V(log.DebugLevel) {
-			logger.Debugw("received-packet-indication-packet", log.Fields{
+			logger.Debugw(ctx, "received-packet-indication-packet", log.Fields{
 				"intf-type":   pktInd.IntfId,
 				"intf-id":     pktInd.IntfId,
 				"gem-port-id": pktInd.GemportId,
@@ -575,21 +575,21 @@
 		}()
 	case *oop.Indication_PortStats:
 		portStats := indication.GetPortStats()
-		go dh.portStats.PortStatisticsIndication(portStats, dh.resourceMgr.DevInfo.GetPonPorts())
+		go dh.portStats.PortStatisticsIndication(ctx, portStats, dh.resourceMgr.DevInfo.GetPonPorts())
 	case *oop.Indication_FlowStats:
 		flowStats := indication.GetFlowStats()
-		logger.Infow("received-flow-stats", log.Fields{"FlowStats": flowStats, "device-id": dh.device.Id})
+		logger.Infow(ctx, "received-flow-stats", log.Fields{"FlowStats": flowStats, "device-id": dh.device.Id})
 	case *oop.Indication_AlarmInd:
 		alarmInd := indication.GetAlarmInd()
-		logger.Infow("received-alarm-indication", log.Fields{"AlarmInd": alarmInd, "device-id": dh.device.Id})
-		go dh.eventMgr.ProcessEvents(alarmInd, dh.device.Id, raisedTs)
+		logger.Infow(ctx, "received-alarm-indication", log.Fields{"AlarmInd": alarmInd, "device-id": dh.device.Id})
+		go dh.eventMgr.ProcessEvents(ctx, alarmInd, dh.device.Id, raisedTs)
 	}
 }
 
 // doStateUp handle the olt up indication and update to voltha core
 func (dh *DeviceHandler) doStateUp(ctx context.Context) error {
 	//starting the stat collector
-	go startCollector(dh)
+	go startCollector(ctx, dh)
 
 	// Synchronous call to update device state - this method is run in its own go routine
 	if err := dh.coreProxy.DeviceStateUpdate(ctx, dh.device.Id, voltha.ConnectStatus_REACHABLE,
@@ -603,7 +603,7 @@
 func (dh *DeviceHandler) doStateDown(ctx context.Context) error {
 	dh.lockDevice.Lock()
 	defer dh.lockDevice.Unlock()
-	logger.Debugw("do-state-down-start", log.Fields{"device-id": dh.device.Id})
+	logger.Debugw(ctx, "do-state-down-start", log.Fields{"device-id": dh.device.Id})
 
 	device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
 	if err != nil || device == nil {
@@ -645,7 +645,7 @@
 	/* Discovered ONUs entries need to be cleared , since after OLT
 	   is up, it starts sending discovery indications again*/
 	dh.discOnus = sync.Map{}
-	logger.Debugw("do-state-down-end", log.Fields{"device-id": device.Id})
+	logger.Debugw(ctx, "do-state-down-end", log.Fields{"device-id": device.Id})
 	return nil
 }
 
@@ -670,7 +670,7 @@
 // doStateConnected get the device info and update to voltha core
 func (dh *DeviceHandler) doStateConnected(ctx context.Context) error {
 	var err error
-	logger.Debugw("olt-device-connected", log.Fields{"device-id": dh.device.Id})
+	logger.Debugw(ctx, "olt-device-connected", log.Fields{"device-id": dh.device.Id})
 
 	// Case where OLT is disabled and then rebooted.
 	device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
@@ -679,7 +679,7 @@
 		return olterrors.NewErrAdapter("device-fetch-failed", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
 	}
 	if device.AdminState == voltha.AdminState_DISABLED {
-		logger.Debugln("do-state-connected--device-admin-state-down")
+		logger.Debugln(ctx, "do-state-connected--device-admin-state-down")
 
 		cloned := proto.Clone(device).(*voltha.Device)
 		cloned.ConnectStatus = voltha.ConnectStatus_REACHABLE
@@ -716,8 +716,8 @@
 		/*TODO: needs to handle error scenarios */
 		return olterrors.NewErrAdapter("fetch-device-failed", log.Fields{"device-id": dh.device.Id}, err)
 	}
-	dh.populateActivePorts(device)
-	if err := dh.disableAdminDownPorts(device); err != nil {
+	dh.populateActivePorts(ctx, device)
+	if err := dh.disableAdminDownPorts(ctx, device); err != nil {
 		return olterrors.NewErrAdapter("port-status-update-failed", log.Fields{"device": device}, err)
 	}
 
@@ -731,16 +731,16 @@
 			olterrors.NewErrAdapter("read-indications-failure", log.Fields{"device-id": dh.device.Id}, err).Log()
 		}
 	}()
-	go dh.updateLocalDevice()
+	go dh.updateLocalDevice(ctx)
 
 	if device.PmConfigs != nil {
-		dh.UpdatePmConfig(device.PmConfigs)
+		dh.UpdatePmConfig(ctx, device.PmConfigs)
 	}
 	return nil
 }
 
 func (dh *DeviceHandler) initializeDeviceHandlerModules(ctx context.Context) error {
-	deviceInfo, err := dh.populateDeviceInfo()
+	deviceInfo, err := dh.populateDeviceInfo(ctx)
 
 	if err != nil {
 		return olterrors.NewErrAdapter("populate-device-info-failed", log.Fields{"device-id": dh.device.Id}, err)
@@ -760,13 +760,13 @@
 	dh.eventMgr = NewEventMgr(dh.EventProxy, dh)
 
 	// Stats config for new device
-	dh.portStats = NewOpenOltStatsMgr(dh)
+	dh.portStats = NewOpenOltStatsMgr(ctx, dh)
 
 	return nil
 
 }
 
-func (dh *DeviceHandler) populateDeviceInfo() (*oop.DeviceInfo, error) {
+func (dh *DeviceHandler) populateDeviceInfo(ctx context.Context) (*oop.DeviceInfo, error) {
 	var err error
 	var deviceInfo *oop.DeviceInfo
 
@@ -779,7 +779,7 @@
 		return nil, olterrors.NewErrInvalidValue(log.Fields{"device": nil}, nil)
 	}
 
-	logger.Debugw("fetched-device-info", log.Fields{"deviceInfo": deviceInfo, "device-id": dh.device.Id})
+	logger.Debugw(ctx, "fetched-device-info", log.Fields{"deviceInfo": deviceInfo, "device-id": dh.device.Id})
 	dh.device.Root = true
 	dh.device.Vendor = deviceInfo.Vendor
 	dh.device.Model = deviceInfo.Model
@@ -788,13 +788,13 @@
 	dh.device.FirmwareVersion = deviceInfo.FirmwareVersion
 
 	if deviceInfo.DeviceId == "" {
-		logger.Warnw("no-device-id-provided-using-host", log.Fields{"hostport": dh.device.GetHostAndPort()})
+		logger.Warnw(ctx, "no-device-id-provided-using-host", log.Fields{"hostport": dh.device.GetHostAndPort()})
 		host := strings.Split(dh.device.GetHostAndPort(), ":")[0]
-		genmac, err := generateMacFromHost(host)
+		genmac, err := generateMacFromHost(ctx, host)
 		if err != nil {
 			return nil, olterrors.NewErrAdapter("failed-to-generate-mac-host", log.Fields{"host": host}, err)
 		}
-		logger.Debugw("using-host-for-mac-address", log.Fields{"host": host, "mac": genmac})
+		logger.Debugw(ctx, "using-host-for-mac-address", log.Fields{"host": host, "mac": genmac})
 		dh.device.MacAddress = genmac
 	} else {
 		dh.device.MacAddress = deviceInfo.DeviceId
@@ -808,12 +808,12 @@
 	return deviceInfo, nil
 }
 
-func startCollector(dh *DeviceHandler) {
-	logger.Debugf("starting-collector")
+func startCollector(ctx context.Context, dh *DeviceHandler) {
+	logger.Debugf(ctx, "starting-collector")
 	for {
 		select {
 		case <-dh.stopCollector:
-			logger.Debugw("stopping-collector-for-olt", log.Fields{"deviceID:": dh.device.Id})
+			logger.Debugw(ctx, "stopping-collector-for-olt", log.Fields{"deviceID:": dh.device.Id})
 			return
 		case <-time.After(time.Duration(dh.metrics.ToPmConfigs().DefaultFreq) * time.Second):
 
@@ -824,19 +824,19 @@
 				if port.Type == voltha.Port_ETHERNET_NNI {
 					intfID := PortNoToIntfID(port.PortNo, voltha.Port_ETHERNET_NNI)
 					cmnni := dh.portStats.collectNNIMetrics(intfID)
-					logger.Debugw("collect-nni-metrics", log.Fields{"metrics": cmnni})
-					go dh.portStats.publishMetrics(cmnni, port, dh.device.Id, dh.device.Type)
-					logger.Debugw("publish-nni-metrics", log.Fields{"nni-port": port.Label})
+					logger.Debugw(ctx, "collect-nni-metrics", log.Fields{"metrics": cmnni})
+					go dh.portStats.publishMetrics(ctx, cmnni, port, dh.device.Id, dh.device.Type)
+					logger.Debugw(ctx, "publish-nni-metrics", log.Fields{"nni-port": port.Label})
 				}
 				// PON Stats
 				if port.Type == voltha.Port_PON_OLT {
 					intfID := PortNoToIntfID(port.PortNo, voltha.Port_PON_OLT)
 					if val, ok := dh.activePorts.Load(intfID); ok && val == true {
 						cmpon := dh.portStats.collectPONMetrics(intfID)
-						logger.Debugw("collect-pon-metrics", log.Fields{"metrics": cmpon})
-						go dh.portStats.publishMetrics(cmpon, port, dh.device.Id, dh.device.Type)
+						logger.Debugw(ctx, "collect-pon-metrics", log.Fields{"metrics": cmpon})
+						go dh.portStats.publishMetrics(ctx, cmpon, port, dh.device.Id, dh.device.Type)
 					}
-					logger.Debugw("publish-pon-metrics", log.Fields{"pon-port": port.Label})
+					logger.Debugw(ctx, "publish-pon-metrics", log.Fields{"pon-port": port.Label})
 				}
 			}
 		}
@@ -846,7 +846,7 @@
 //AdoptDevice adopts the OLT device
 func (dh *DeviceHandler) AdoptDevice(ctx context.Context, device *voltha.Device) {
 	dh.transitionMap = NewTransitionMap(dh)
-	logger.Infow("adopt-device", log.Fields{"device-id": device.Id, "Address": device.GetHostAndPort()})
+	logger.Infow(ctx, "adopt-device", log.Fields{"device-id": device.Id, "Address": device.GetHostAndPort()})
 	dh.transitionMap.Handle(ctx, DeviceInit)
 
 	// Now, set the initial PM configuration for that device
@@ -877,15 +877,15 @@
 	}, nil
 }
 
-func (dh *DeviceHandler) omciIndication(omciInd *oop.OmciIndication) error {
-	logger.Debugw("omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+func (dh *DeviceHandler) omciIndication(ctx context.Context, omciInd *oop.OmciIndication) error {
+	logger.Debugw(ctx, "omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
 	var deviceType string
 	var deviceID string
 	var proxyDeviceID string
 
 	transid := extractOmciTransactionID(omciInd.Pkt)
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("recv-omci-msg", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id,
+		logger.Debugw(ctx, "recv-omci-msg", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id,
 			"omci-transaction-id": transid, "omci-msg": hex.EncodeToString(omciInd.Pkt)})
 	}
 
@@ -893,7 +893,7 @@
 
 	if onuInCache, ok := dh.onus.Load(onuKey); !ok {
 
-		logger.Debugw("omci-indication-for-a-device-not-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+		logger.Debugw(ctx, "omci-indication-for-a-device-not-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
 		ponPort := IntfIDToPortNo(omciInd.GetIntfId(), voltha.Port_PON_OLT)
 		kwargs := make(map[string]interface{})
 		kwargs["onu_id"] = omciInd.OnuId
@@ -912,7 +912,7 @@
 		dh.onus.Store(onuKey, NewOnuDevice(deviceID, deviceType, onuDevice.SerialNumber, omciInd.OnuId, omciInd.IntfId, proxyDeviceID, false))
 	} else {
 		//found in cache
-		logger.Debugw("omci-indication-for-a-device-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+		logger.Debugw(ctx, "omci-indication-for-a-device-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
 		deviceType = onuInCache.(*OnuDevice).deviceType
 		deviceID = onuInCache.(*OnuDevice).deviceID
 		proxyDeviceID = onuInCache.(*OnuDevice).proxyDeviceID
@@ -934,8 +934,8 @@
 //ProcessInterAdapterMessage sends the proxied messages to the target device
 // If the proxy address is not found in the unmarshalled message, it first fetches the onu device for which the message
 // is meant, and then send the unmarshalled omci message to this onu
-func (dh *DeviceHandler) ProcessInterAdapterMessage(msg *ic.InterAdapterMessage) error {
-	logger.Debugw("process-inter-adapter-message", log.Fields{"msgID": msg.Header.Id})
+func (dh *DeviceHandler) ProcessInterAdapterMessage(ctx context.Context, msg *ic.InterAdapterMessage) error {
+	logger.Debugw(ctx, "process-inter-adapter-message", log.Fields{"msgID": msg.Header.Id})
 	if msg.Header.Type == ic.InterAdapterMessageType_OMCI_REQUEST {
 		msgID := msg.Header.Id
 		fromTopic := msg.Header.FromTopic
@@ -943,7 +943,7 @@
 		toDeviceID := msg.Header.ToDeviceId
 		proxyDeviceID := msg.Header.ProxyDeviceId
 
-		logger.Debugw("omci-request-message-header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+		logger.Debugw(ctx, "omci-request-message-header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
 
 		msgBody := msg.GetBody()
 
@@ -959,15 +959,15 @@
 					"device-id":     dh.device.Id,
 					"onu-device-id": toDeviceID}, err)
 			}
-			logger.Debugw("device-retrieved-from-core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
-			if err := dh.sendProxiedMessage(onuDevice, omciMsg); err != nil {
+			logger.Debugw(ctx, "device-retrieved-from-core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+			if err := dh.sendProxiedMessage(ctx, onuDevice, omciMsg); err != nil {
 				return olterrors.NewErrCommunication("send-failed", log.Fields{
 					"device-id":     dh.device.Id,
 					"onu-device-id": toDeviceID}, err)
 			}
 		} else {
-			logger.Debugw("proxy-address-found-in-omci-message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
-			if err := dh.sendProxiedMessage(nil, omciMsg); err != nil {
+			logger.Debugw(ctx, "proxy-address-found-in-omci-message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+			if err := dh.sendProxiedMessage(ctx, nil, omciMsg); err != nil {
 				return olterrors.NewErrCommunication("send-failed", log.Fields{
 					"device-id":     dh.device.Id,
 					"onu-device-id": toDeviceID}, err)
@@ -980,7 +980,7 @@
 	return nil
 }
 
-func (dh *DeviceHandler) sendProxiedMessage(onuDevice *voltha.Device, omciMsg *ic.InterAdapterOmciMessage) error {
+func (dh *DeviceHandler) sendProxiedMessage(ctx context.Context, onuDevice *voltha.Device, omciMsg *ic.InterAdapterOmciMessage) error {
 	var intfID uint32
 	var onuID uint32
 	var connectStatus common.ConnectStatus_Types
@@ -994,7 +994,7 @@
 		connectStatus = omciMsg.GetConnectStatus()
 	}
 	if connectStatus != voltha.ConnectStatus_REACHABLE {
-		logger.Debugw("onu-not-reachable--cannot-send-omci", log.Fields{"intf-id": intfID, "onu-id": onuID})
+		logger.Debugw(ctx, "onu-not-reachable--cannot-send-omci", log.Fields{"intf-id": intfID, "onu-id": onuID})
 
 		return olterrors.NewErrCommunication("unreachable", log.Fields{
 			"intf-id": intfID,
@@ -1011,7 +1011,7 @@
 	// TODO: Below logging illustrates the "stringify" of the omci Pkt.
 	//  once above is fixed this log line can change to just use hex.EncodeToString(omciMessage.Pkt)
 	transid := extractOmciTransactionID(omciMsg.Message)
-	logger.Debugw("sent-omci-msg", log.Fields{"intf-id": intfID, "onu-id": onuID,
+	logger.Debugw(ctx, "sent-omci-msg", log.Fields{"intf-id": intfID, "onu-id": onuID,
 		"omciTransactionID": transid, "omciMsg": string(omciMessage.Pkt)})
 
 	_, err := dh.Client.OmciMsgOut(context.Background(), omciMessage)
@@ -1025,7 +1025,7 @@
 }
 
 func (dh *DeviceHandler) activateONU(ctx context.Context, intfID uint32, onuID int64, serialNum *oop.SerialNumber, serialNumber string) error {
-	logger.Debugw("activate-onu", log.Fields{"intf-id": intfID, "onu-id": onuID, "serialNum": serialNum, "serialNumber": serialNumber, "device-id": dh.device.Id})
+	logger.Debugw(ctx, "activate-onu", log.Fields{"intf-id": intfID, "onu-id": onuID, "serialNum": serialNum, "serialNumber": serialNumber, "device-id": dh.device.Id})
 	if err := dh.flowMgr.UpdateOnuInfo(ctx, intfID, uint32(onuID), serialNumber); err != nil {
 		return olterrors.NewErrAdapter("onu-activate-failed", log.Fields{"onu": onuID, "intf-id": intfID}, err)
 	}
@@ -1035,12 +1035,13 @@
 	if _, err := dh.Client.ActivateOnu(ctx, &Onu); err != nil {
 		st, _ := status.FromError(err)
 		if st.Code() == codes.AlreadyExists {
-			logger.Debugw("onu-activation-in-progress", log.Fields{"SerialNumber": serialNumber, "onu-id": onuID, "device-id": dh.device.Id})
+			logger.Debugw(ctx, "onu-activation-in-progress", log.Fields{"SerialNumber": serialNumber, "onu-id": onuID, "device-id": dh.device.Id})
+
 		} else {
 			return olterrors.NewErrAdapter("onu-activate-failed", log.Fields{"onu": Onu, "device-id": dh.device.Id}, err)
 		}
 	} else {
-		logger.Infow("activated-onu", log.Fields{"SerialNumber": serialNumber, "device-id": dh.device.Id})
+		logger.Infow(ctx, "activated-onu", log.Fields{"SerialNumber": serialNumber, "device-id": dh.device.Id})
 	}
 	return nil
 }
@@ -1050,7 +1051,7 @@
 	channelID := onuDiscInd.GetIntfId()
 	parentPortNo := IntfIDToPortNo(onuDiscInd.GetIntfId(), voltha.Port_PON_OLT)
 
-	logger.Infow("new-discovery-indication", log.Fields{"sn": sn})
+	logger.Infow(ctx, "new-discovery-indication", log.Fields{"sn": sn})
 
 	kwargs := make(map[string]interface{})
 	if sn != "" {
@@ -1070,7 +1071,7 @@
 		dh.onus.Range(func(Onukey interface{}, onuInCache interface{}) bool {
 			if onuInCache.(*OnuDevice).serialNumber == sn && onuInCache.(*OnuDevice).losRaised {
 				if onuDiscInd.GetIntfId() != onuInCache.(*OnuDevice).intfID {
-					logger.Warnw("onu-is-on-a-different-intf-id-now", log.Fields{
+					logger.Warnw(ctx, "onu-is-on-a-different-intf-id-now", log.Fields{
 						"previousIntfId": onuInCache.(*OnuDevice).intfID,
 						"currentIntfId":  onuDiscInd.GetIntfId()})
 					// TODO:: Should we need to ignore raising OnuLosClear event
@@ -1079,12 +1080,12 @@
 				alarmInd.IntfId = onuInCache.(*OnuDevice).intfID
 				alarmInd.OnuId = onuInCache.(*OnuDevice).onuID
 				alarmInd.LosStatus = statusCheckOff
-				go dh.eventMgr.onuAlarmIndication(&alarmInd, onuInCache.(*OnuDevice).deviceID, raisedTs)
+				go dh.eventMgr.onuAlarmIndication(ctx, &alarmInd, onuInCache.(*OnuDevice).deviceID, raisedTs)
 			}
 			return true
 		})
 
-		logger.Warnw("onu-sn-is-already-being-processed", log.Fields{"sn": sn})
+		logger.Warnw(ctx, "onu-sn-is-already-being-processed", log.Fields{"sn": sn})
 		return nil
 	}
 
@@ -1095,9 +1096,9 @@
 	onuDevice, err := dh.coreProxy.GetChildDevice(ctx, dh.device.Id, kwargs)
 
 	if err != nil {
-		logger.Debugw("core-proxy-get-child-device-failed", log.Fields{"parentDevice": dh.device.Id, "err": err, "sn": sn})
+		logger.Debugw(ctx, "core-proxy-get-child-device-failed", log.Fields{"parentDevice": dh.device.Id, "err": err, "sn": sn})
 		if e, ok := status.FromError(err); ok {
-			logger.Debugw("core-proxy-get-child-device-failed-with-code", log.Fields{"errCode": e.Code(), "sn": sn})
+			logger.Debugw(ctx, "core-proxy-get-child-device-failed-with-code", log.Fields{"errCode": e.Code(), "sn": sn})
 			switch e.Code() {
 			case codes.Internal:
 				// this probably means NOT FOUND, so just create a new device
@@ -1112,14 +1113,14 @@
 
 	if onuDevice == nil {
 		// NOTE this should happen a single time, and only if GetChildDevice returns NotFound
-		logger.Debugw("creating-new-onu", log.Fields{"sn": sn})
+		logger.Debugw(ctx, "creating-new-onu", log.Fields{"sn": sn})
 		// we need to create a new ChildDevice
 		ponintfid := onuDiscInd.GetIntfId()
 		dh.lockDevice.Lock()
 		onuID, err = dh.resourceMgr.GetONUID(ctx, ponintfid)
 		dh.lockDevice.Unlock()
 
-		logger.Infow("creating-new-onu-got-onu-id", log.Fields{"sn": sn, "onuId": onuID})
+		logger.Infow(ctx, "creating-new-onu-got-onu-id", log.Fields{"sn": sn, "onuId": onuID})
 
 		if err != nil {
 			// if we can't create an ID in resource manager,
@@ -1138,8 +1139,8 @@
 				"pon-intf-id":   ponintfid,
 				"serial-number": sn}, err)
 		}
-		dh.eventMgr.OnuDiscoveryIndication(onuDiscInd, dh.device.Id, onuDevice.Id, onuID, sn, time.Now().UnixNano())
-		logger.Infow("onu-child-device-added",
+		dh.eventMgr.OnuDiscoveryIndication(ctx, onuDiscInd, dh.device.Id, onuDevice.Id, onuID, sn, time.Now().UnixNano())
+		logger.Infow(ctx, "onu-child-device-added",
 			log.Fields{"onuDevice": onuDevice,
 				"sn":        sn,
 				"onu-id":    onuID,
@@ -1150,7 +1151,7 @@
 	onuID = onuDevice.ProxyAddress.OnuId
 	//Insert the ONU into cache to use in OnuIndication.
 	//TODO: Do we need to remove this from the cache on ONU change, or wait for overwritten on next discovery.
-	logger.Debugw("onu-discovery-indication-key-create",
+	logger.Debugw(ctx, "onu-discovery-indication-key-create",
 		log.Fields{"onu-id": onuID,
 			"intfId": onuDiscInd.GetIntfId(),
 			"sn":     sn})
@@ -1158,7 +1159,7 @@
 
 	onuDev := NewOnuDevice(onuDevice.Id, onuDevice.Type, onuDevice.SerialNumber, onuID, onuDiscInd.GetIntfId(), onuDevice.ProxyAddress.DeviceId, false)
 	dh.onus.Store(onuKey, onuDev)
-	logger.Debugw("new-onu-device-discovered",
+	logger.Debugw(ctx, "new-onu-device-discovered",
 		log.Fields{"onu": onuDev,
 			"sn": sn})
 
@@ -1167,7 +1168,7 @@
 			"device-id":     onuDevice.Id,
 			"serial-number": sn}, err)
 	}
-	logger.Infow("onu-discovered-reachable", log.Fields{"device-id": onuDevice.Id, "sn": sn})
+	logger.Infow(ctx, "onu-discovered-reachable", log.Fields{"device-id": onuDevice.Id, "sn": sn})
 	if err = dh.activateONU(ctx, onuDiscInd.IntfId, int64(onuID), onuDiscInd.SerialNumber, sn); err != nil {
 		return olterrors.NewErrAdapter("onu-activation-failed", log.Fields{
 			"device-id":     onuDevice.Id,
@@ -1176,7 +1177,7 @@
 	return nil
 }
 
-func (dh *DeviceHandler) onuIndication(onuInd *oop.OnuIndication) error {
+func (dh *DeviceHandler) onuIndication(ctx context.Context, onuInd *oop.OnuIndication) error {
 	serialNumber := dh.stringifySerialNumber(onuInd.SerialNumber)
 
 	kwargs := make(map[string]interface{})
@@ -1184,7 +1185,7 @@
 	var onuDevice *voltha.Device
 	var err error
 	foundInCache := false
-	logger.Debugw("onu-indication-key-create",
+	logger.Debugw(ctx, "onu-indication-key-create",
 		log.Fields{"onuId": onuInd.OnuId,
 			"intfId":    onuInd.GetIntfId(),
 			"device-id": dh.device.Id})
@@ -1217,13 +1218,13 @@
 	}
 
 	if onuDevice.ParentPortNo != ponPort {
-		logger.Warnw("onu-is-on-a-different-intf-id-now", log.Fields{
+		logger.Warnw(ctx, "onu-is-on-a-different-intf-id-now", log.Fields{
 			"previousIntfId": onuDevice.ParentPortNo,
 			"currentIntfId":  ponPort})
 	}
 
 	if onuDevice.ProxyAddress.OnuId != onuInd.OnuId {
-		logger.Warnw("onu-id-mismatch-possible-if-voltha-and-olt-rebooted", log.Fields{
+		logger.Warnw(ctx, "onu-id-mismatch-possible-if-voltha-and-olt-rebooted", log.Fields{
 			"expected-onu-id": onuDevice.ProxyAddress.OnuId,
 			"received-onu-id": onuInd.OnuId,
 			"device-id":       dh.device.Id})
@@ -1234,15 +1235,15 @@
 		dh.onus.Store(onuKey, NewOnuDevice(onuDevice.Id, onuDevice.Type, onuDevice.SerialNumber, onuInd.GetOnuId(), onuInd.GetIntfId(), onuDevice.ProxyAddress.DeviceId, false))
 
 	}
-	if err := dh.updateOnuStates(onuDevice, onuInd); err != nil {
+	if err := dh.updateOnuStates(ctx, onuDevice, onuInd); err != nil {
 		return olterrors.NewErrCommunication("state-update-failed", errFields, err)
 	}
 	return nil
 }
 
-func (dh *DeviceHandler) updateOnuStates(onuDevice *voltha.Device, onuInd *oop.OnuIndication) error {
-	ctx := context.TODO()
-	logger.Debugw("onu-indication-for-state", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+func (dh *DeviceHandler) updateOnuStates(ctx context.Context, onuDevice *voltha.Device, onuInd *oop.OnuIndication) error {
+	ctx = context.TODO()
+	logger.Debugw(ctx, "onu-indication-for-state", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
 	if onuInd.AdminState == "down" || onuInd.OperState == "down" {
 		// The ONU has gone admin_state "down" or oper_state "down" - we expect the ONU to send discovery again
 		// The ONU admin_state is "up" while "oper_state" is down in cases where ONU activation fails. In this case
@@ -1250,14 +1251,14 @@
 		dh.discOnus.Delete(onuDevice.SerialNumber)
 		// Tests have shown that we sometimes get OperState as NOT down even if AdminState is down, forcing it
 		if onuInd.OperState != "down" {
-			logger.Warnw("onu-admin-state-down", log.Fields{"operState": onuInd.OperState})
+			logger.Warnw(ctx, "onu-admin-state-down", log.Fields{"operState": onuInd.OperState})
 			onuInd.OperState = "down"
 		}
 	}
 
 	switch onuInd.OperState {
 	case "down":
-		logger.Debugw("sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+		logger.Debugw(ctx, "sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
 		// TODO NEW CORE do not hardcode adapter name. Handler needs Adapter reference
 		err := dh.AdapterProxy.SendInterAdapterMessage(ctx, onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
 			"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
@@ -1269,7 +1270,7 @@
 				"device-id":     onuDevice.Id}, err)
 		}
 	case "up":
-		logger.Debugw("sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+		logger.Debugw(ctx, "sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
 		// TODO NEW CORE do not hardcode adapter name. Handler needs Adapter reference
 		err := dh.AdapterProxy.SendInterAdapterMessage(ctx, onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
 			"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
@@ -1321,8 +1322,8 @@
 }
 
 //GetChildDevice returns the child device for given parent port and onu id
-func (dh *DeviceHandler) GetChildDevice(parentPort, onuID uint32) (*voltha.Device, error) {
-	logger.Debugw("getchilddevice",
+func (dh *DeviceHandler) GetChildDevice(ctx context.Context, parentPort, onuID uint32) (*voltha.Device, error) {
+	logger.Debugw(ctx, "getchilddevice",
 		log.Fields{"pon-port": parentPort,
 			"onu-id":    onuID,
 			"device-id": dh.device.Id})
@@ -1335,16 +1336,16 @@
 			"intf-id": parentPort,
 			"onu-id":  onuID}, err)
 	}
-	logger.Debugw("successfully-received-child-device-from-core", log.Fields{"child-device-id": onuDevice.Id, "child-device-sn": onuDevice.SerialNumber})
+	logger.Debugw(ctx, "successfully-received-child-device-from-core", log.Fields{"child-device-id": onuDevice.Id, "child-device-sn": onuDevice.SerialNumber})
 	return onuDevice, nil
 }
 
 // SendPacketInToCore sends packet-in to core
 // For this, it calls SendPacketIn of the core-proxy which uses a device specific topic to send the request.
 // The adapter handling the device creates a device specific topic
-func (dh *DeviceHandler) SendPacketInToCore(logicalPort uint32, packetPayload []byte) error {
+func (dh *DeviceHandler) SendPacketInToCore(ctx context.Context, logicalPort uint32, packetPayload []byte) error {
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("send-packet-in-to-core", log.Fields{
+		logger.Debugw(ctx, "send-packet-in-to-core", log.Fields{
 			"port":      logicalPort,
 			"packet":    hex.EncodeToString(packetPayload),
 			"device-id": dh.device.Id,
@@ -1359,7 +1360,7 @@
 			"packet":       hex.EncodeToString(packetPayload)}, err)
 	}
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("sent-packet-in-to-core-successfully", log.Fields{
+		logger.Debugw(ctx, "sent-packet-in-to-core-successfully", log.Fields{
 			"packet":    hex.EncodeToString(packetPayload),
 			"device-id": dh.device.Id,
 		})
@@ -1368,26 +1369,26 @@
 }
 
 // AddUniPortToOnu adds the uni port to the onu device
-func (dh *DeviceHandler) AddUniPortToOnu(intfID, onuID, uniPort uint32) {
+func (dh *DeviceHandler) AddUniPortToOnu(ctx context.Context, intfID, onuID, uniPort uint32) {
 	onuKey := dh.formOnuKey(intfID, onuID)
 
 	if onuDevice, ok := dh.onus.Load(onuKey); ok {
 		// add it to the uniPort map for the onu device
 		if _, ok = onuDevice.(*OnuDevice).uniPorts[uniPort]; !ok {
 			onuDevice.(*OnuDevice).uniPorts[uniPort] = struct{}{}
-			logger.Debugw("adding-uni-port", log.Fields{"port": uniPort, "intf-id": intfID, "onuId": onuID})
+			logger.Debugw(ctx, "adding-uni-port", log.Fields{"port": uniPort, "intf-id": intfID, "onuId": onuID})
 		}
 	}
 }
 
 // UpdatePmConfig updates the pm metrics.
-func (dh *DeviceHandler) UpdatePmConfig(pmConfigs *voltha.PmConfigs) {
+func (dh *DeviceHandler) UpdatePmConfig(ctx context.Context, pmConfigs *voltha.PmConfigs) {
 
-	logger.Infow("update-pm-configs", log.Fields{"device-id": dh.device.Id, "pm-configs": pmConfigs})
+	logger.Infow(ctx, "update-pm-configs", log.Fields{"device-id": dh.device.Id, "pm-configs": pmConfigs})
 
 	if pmConfigs.DefaultFreq != dh.metrics.ToPmConfigs().DefaultFreq {
 		dh.metrics.UpdateFrequency(pmConfigs.DefaultFreq)
-		logger.Debugf("frequency-updated")
+		logger.Debugf(ctx, "frequency-updated")
 	}
 
 	if pmConfigs.Grouped == false {
@@ -1401,15 +1402,15 @@
 
 //UpdateFlowsIncrementally updates the device flow
 func (dh *DeviceHandler) UpdateFlowsIncrementally(ctx context.Context, device *voltha.Device, flows *of.FlowChanges, groups *of.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
-	logger.Debugw("received-incremental-flowupdate-in-device-handler", log.Fields{"device-id": device.Id, "flows": flows, "groups": groups, "flowMetadata": flowMetadata})
+	logger.Debugw(ctx, "received-incremental-flowupdate-in-device-handler", log.Fields{"device-id": device.Id, "flows": flows, "groups": groups, "flowMetadata": flowMetadata})
 
 	var errorsList []error
 
 	if flows != nil {
 		for _, flow := range flows.ToRemove.Items {
-			dh.incrementActiveFlowRemoveCount(flow)
+			dh.incrementActiveFlowRemoveCount(ctx, flow)
 
-			logger.Debugw("removing-flow",
+			logger.Debugw(ctx, "removing-flow",
 				log.Fields{"device-id": device.Id,
 					"flowToRemove": flow})
 			err := dh.flowMgr.RemoveFlow(ctx, flow)
@@ -1417,15 +1418,15 @@
 				errorsList = append(errorsList, err)
 			}
 
-			dh.decrementActiveFlowRemoveCount(flow)
+			dh.decrementActiveFlowRemoveCount(ctx, flow)
 		}
 
 		for _, flow := range flows.ToAdd.Items {
-			logger.Debugw("adding-flow",
+			logger.Debugw(ctx, "adding-flow",
 				log.Fields{"device-id": device.Id,
 					"flowToAdd": flow})
 			// If there are active Flow Remove in progress for a given subscriber, wait until it completes
-			dh.waitForFlowRemoveToFinish(flow)
+			dh.waitForFlowRemoveToFinish(ctx, flow)
 			err := dh.flowMgr.AddFlow(ctx, flow, flowMetadata)
 			if err != nil {
 				errorsList = append(errorsList, err)
@@ -1448,13 +1449,13 @@
 			}
 		}
 		if len(groups.ToRemove.Items) != 0 {
-			logger.Debugw("group-delete-operation-not-supported", log.Fields{"device-id": dh.device.Id})
+			logger.Debugw(ctx, "group-delete-operation-not-supported", log.Fields{"device-id": dh.device.Id})
 		}
 	}
 	if len(errorsList) > 0 {
 		return fmt.Errorf("errors-installing-flows-groups, errors:%v", errorsList)
 	}
-	logger.Debugw("updated-flows-incrementally-successfully", log.Fields{"device-id": dh.device.Id})
+	logger.Debugw(ctx, "updated-flows-incrementally-successfully", log.Fields{"device-id": dh.device.Id})
 	return nil
 }
 
@@ -1463,7 +1464,7 @@
 //Device-Handler Admin-State : down
 //Device Port-State: UNKNOWN
 //Device Oper-State: UNKNOWN
-func (dh *DeviceHandler) DisableDevice(device *voltha.Device) error {
+func (dh *DeviceHandler) DisableDevice(ctx context.Context, device *voltha.Device) error {
 	/* On device disable ,admin state update has to be done prior sending request to agent since
 	   the indication thread may processes invalid  indications of ONU and OLT*/
 	if dh.Client != nil {
@@ -1473,7 +1474,7 @@
 			}
 		}
 	}
-	logger.Debugw("olt-disabled", log.Fields{"device-id": device.Id})
+	logger.Debugw(ctx, "olt-disabled", log.Fields{"device-id": device.Id})
 	/* Discovered ONUs entries need to be cleared , since on device disable the child devices goes to
 	UNREACHABLE state which needs to be configured again*/
 
@@ -1483,7 +1484,7 @@
 	//stopping the stats collector
 	dh.stopCollector <- true
 
-	go dh.notifyChildDevices("unreachable")
+	go dh.notifyChildDevices(ctx, "unreachable")
 	cloned := proto.Clone(device).(*voltha.Device)
 	//Update device Admin state
 	dh.device = cloned
@@ -1497,11 +1498,11 @@
 		}
 	}
 
-	logger.Debugw("disable-device-end", log.Fields{"device-id": device.Id})
+	logger.Debugw(ctx, "disable-device-end", log.Fields{"device-id": device.Id})
 	return nil
 }
 
-func (dh *DeviceHandler) notifyChildDevices(state string) {
+func (dh *DeviceHandler) notifyChildDevices(ctx context.Context, state string) {
 
 	// Update onu state as unreachable in onu adapter
 	onuInd := oop.OnuIndication{}
@@ -1509,14 +1510,14 @@
 	//get the child device for the parent device
 	onuDevices, err := dh.coreProxy.GetChildDevices(context.TODO(), dh.device.Id)
 	if err != nil {
-		logger.Errorw("failed-to-get-child-devices-information", log.Fields{"device-id": dh.device.Id, "error": err})
+		logger.Errorw(ctx, "failed-to-get-child-devices-information", log.Fields{"device-id": dh.device.Id, "error": err})
 	}
 	if onuDevices != nil {
 		for _, onuDevice := range onuDevices.Items {
 			err := dh.AdapterProxy.SendInterAdapterMessage(context.TODO(), &onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
 				"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
 			if err != nil {
-				logger.Errorw("failed-to-send-inter-adapter-message", log.Fields{"OnuInd": onuInd,
+				logger.Errorw(ctx, "failed-to-send-inter-adapter-message", log.Fields{"OnuInd": onuInd,
 					"From Adapter": "openolt", "DeviceType": onuDevice.Type, "device-id": onuDevice.Id})
 			}
 
@@ -1530,19 +1531,19 @@
 //Device-Handler Admin-State : up
 //Device Port-State: ACTIVE
 //Device Oper-State: ACTIVE
-func (dh *DeviceHandler) ReenableDevice(device *voltha.Device) error {
+func (dh *DeviceHandler) ReenableDevice(ctx context.Context, device *voltha.Device) error {
 
 	if _, err := dh.Client.ReenableOlt(context.Background(), new(oop.Empty)); err != nil {
 		if e, ok := status.FromError(err); ok && e.Code() == codes.Internal {
 			return olterrors.NewErrAdapter("olt-reenable-failed", log.Fields{"device-id": dh.device.Id}, err)
 		}
 	}
-	logger.Debug("olt-reenabled")
+	logger.Debug(ctx, "olt-reenabled")
 
 	cloned := proto.Clone(device).(*voltha.Device)
 	// Update the all ports state on that device to enable
 
-	if err := dh.disableAdminDownPorts(device); err != nil {
+	if err := dh.disableAdminDownPorts(ctx, device); err != nil {
 		return olterrors.NewErrAdapter("port-status-update-failed-after-olt-reenable", log.Fields{"device": device}, err)
 	}
 	//Update the device oper status as ACTIVE
@@ -1556,7 +1557,7 @@
 			"oper-status":    cloned.OperStatus}, err)
 	}
 
-	logger.Debugw("reenabledevice-end", log.Fields{"device-id": device.Id})
+	logger.Debugw(ctx, "reenabledevice-end", log.Fields{"device-id": device.Id})
 
 	return nil
 }
@@ -1566,12 +1567,12 @@
 	var err error
 	for _, port := range onu.UniPorts {
 		uniID = UniIDFromPortNum(uint32(port))
-		logger.Debugw("clearing-resource-data-for-uni-port", log.Fields{"port": port, "uni-id": uniID})
+		logger.Debugw(ctx, "clearing-resource-data-for-uni-port", log.Fields{"port": port, "uni-id": uniID})
 		/* Delete tech-profile instance from the KV store */
 		if err = dh.flowMgr.DeleteTechProfileInstances(ctx, onu.IntfID, onu.OnuID, uniID, onu.SerialNumber); err != nil {
-			logger.Debugw("failed-to-remove-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
+			logger.Debugw(ctx, "failed-to-remove-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
 		}
-		logger.Debugw("deleted-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
+		logger.Debugw(ctx, "deleted-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
 		flowIDs := dh.resourceMgr.GetCurrentFlowIDsForOnu(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID))
 		for _, flowID := range flowIDs {
 			dh.resourceMgr.FreeFlowID(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID), flowID)
@@ -1579,21 +1580,21 @@
 		tpIDList := dh.resourceMgr.GetTechProfileIDForOnu(ctx, onu.IntfID, onu.OnuID, uniID)
 		for _, tpID := range tpIDList {
 			if err = dh.resourceMgr.RemoveMeterIDForOnu(ctx, "upstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
-				logger.Debugw("failed-to-remove-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
+				logger.Debugw(ctx, "failed-to-remove-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
 			}
-			logger.Debugw("removed-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
+			logger.Debugw(ctx, "removed-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
 			if err = dh.resourceMgr.RemoveMeterIDForOnu(ctx, "downstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
-				logger.Debugw("failed-to-remove-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
+				logger.Debugw(ctx, "failed-to-remove-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
 			}
-			logger.Debugw("removed-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
+			logger.Debugw(ctx, "removed-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
 		}
 		dh.resourceMgr.FreePONResourcesForONU(ctx, onu.IntfID, onu.OnuID, uniID)
 		if err = dh.resourceMgr.RemoveTechProfileIDsForOnu(ctx, onu.IntfID, onu.OnuID, uniID); err != nil {
-			logger.Debugw("failed-to-remove-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
+			logger.Debugw(ctx, "failed-to-remove-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
 		}
-		logger.Debugw("removed-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
+		logger.Debugw(ctx, "removed-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
 		if err = dh.resourceMgr.DelGemPortPktIn(ctx, onu.IntfID, onu.OnuID, uint32(port)); err != nil {
-			logger.Debugw("failed-to-remove-gemport-pkt-in", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
+			logger.Debugw(ctx, "failed-to-remove-gemport-pkt-in", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
 		}
 	}
 	return nil
@@ -1611,10 +1612,10 @@
 	if err != nil {
 		return olterrors.NewErrPersistence("get", "nni", 0, nil, err)
 	}
-	logger.Debugw("nni-", log.Fields{"nni": nni})
+	logger.Debugw(ctx, "nni-", log.Fields{"nni": nni})
 	for _, nniIntfID := range nni {
 		flowIDs := dh.resourceMgr.GetCurrentFlowIDsForOnu(ctx, uint32(nniIntfID), int32(nniOnuID), int32(nniUniID))
-		logger.Debugw("current-flow-ids-for-nni", log.Fields{"flow-ids": flowIDs})
+		logger.Debugw(ctx, "current-flow-ids-for-nni", log.Fields{"flow-ids": flowIDs})
 		for _, flowID := range flowIDs {
 			dh.resourceMgr.FreeFlowID(ctx, uint32(nniIntfID), -1, -1, uint32(flowID))
 		}
@@ -1628,13 +1629,13 @@
 
 // DeleteDevice deletes the device instance from openolt handler array.  Also clears allocated resource manager resources.  Also reboots the OLT hardware!
 func (dh *DeviceHandler) DeleteDevice(ctx context.Context, device *voltha.Device) error {
-	logger.Debug("function-entry-delete-device")
+	logger.Debug(ctx, "function-entry-delete-device")
 	/* Clear the KV store data associated with the all the UNI ports
 	   This clears up flow data and also resource map data for various
 	   other pon resources like alloc_id and gemport_id
 	*/
 	go dh.cleanupDeviceResources(ctx)
-	logger.Debug("removed-device-from-Resource-manager-KV-store")
+	logger.Debug(ctx, "removed-device-from-Resource-manager-KV-store")
 	// Stop the Stats collector
 	dh.stopCollector <- true
 	// stop the heartbeat check routine
@@ -1670,9 +1671,9 @@
 			}
 			for _, onu := range onuGemData {
 				onuID := make([]uint32, 1)
-				logger.Debugw("onu-data", log.Fields{"onu": onu})
+				logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
 				if err = dh.clearUNIData(ctx, &onu); err != nil {
-					logger.Errorw("failed-to-clear-data-for-onu", log.Fields{"onu-device": onu})
+					logger.Errorw(ctx, "failed-to-clear-data-for-onu", log.Fields{"onu-device": onu})
 				}
 				// Clear flowids for gem cache.
 				for _, gem := range onu.GemPorts {
@@ -1685,14 +1686,14 @@
 			onuGemData = nil
 			err = dh.resourceMgr.DelOnuGemInfoForIntf(ctx, ponPort)
 			if err != nil {
-				logger.Errorw("failed-to-update-onugem-info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
+				logger.Errorw(ctx, "failed-to-update-onugem-info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
 			}
 		}
 		/* Clear the flows from KV store associated with NNI port.
 		   There are mostly trap rules from NNI port (like LLDP)
 		*/
 		if err := dh.clearNNIData(ctx); err != nil {
-			logger.Errorw("failed-to-clear-data-for-NNI-port", log.Fields{"device-id": dh.device.Id})
+			logger.Errorw(ctx, "failed-to-clear-data-for-NNI-port", log.Fields{"device-id": dh.device.Id})
 		}
 
 		/* Clear the resource pool for each PON port in the background */
@@ -1715,17 +1716,17 @@
 }
 
 //RebootDevice reboots the given device
-func (dh *DeviceHandler) RebootDevice(device *voltha.Device) error {
+func (dh *DeviceHandler) RebootDevice(ctx context.Context, device *voltha.Device) error {
 	if _, err := dh.Client.Reboot(context.Background(), new(oop.Empty)); err != nil {
 		return olterrors.NewErrAdapter("olt-reboot-failed", log.Fields{"device-id": dh.device.Id}, err)
 	}
-	logger.Debugw("rebooted-device-successfully", log.Fields{"device-id": device.Id})
+	logger.Debugw(ctx, "rebooted-device-successfully", log.Fields{"device-id": device.Id})
 	return nil
 }
 
 func (dh *DeviceHandler) handlePacketIndication(ctx context.Context, packetIn *oop.PacketIndication) error {
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("received-packet-in", log.Fields{
+		logger.Debugw(ctx, "received-packet-in", log.Fields{
 			"packet-indication": *packetIn,
 			"device-id":         dh.device.Id,
 			"packet":            hex.EncodeToString(packetIn.Pkt),
@@ -1736,12 +1737,13 @@
 		return olterrors.NewErrNotFound("logical-port", log.Fields{"packet": hex.EncodeToString(packetIn.Pkt)}, err)
 	}
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("sending-packet-in-to-core", log.Fields{
+		logger.Debugw(ctx, "sending-packet-in-to-core", log.Fields{
 			"logical-port-num": logicalPortNum,
 			"device-id":        dh.device.Id,
 			"packet":           hex.EncodeToString(packetIn.Pkt),
 		})
 	}
+
 	if err := dh.coreProxy.SendPacketIn(context.TODO(), dh.device.Id, logicalPortNum, packetIn.Pkt); err != nil {
 		return olterrors.NewErrCommunication("send-packet-in", log.Fields{
 			"destination": "core",
@@ -1750,8 +1752,9 @@
 			"packet":      hex.EncodeToString(packetIn.Pkt),
 		}, err)
 	}
+
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("success-sending-packet-in-to-core!", log.Fields{
+		logger.Debugw(ctx, "success-sending-packet-in-to-core!", log.Fields{
 			"packet":    hex.EncodeToString(packetIn.Pkt),
 			"device-id": dh.device.Id,
 		})
@@ -1762,7 +1765,7 @@
 // PacketOut sends packet-out from VOLTHA to OLT on the egress port provided
 func (dh *DeviceHandler) PacketOut(ctx context.Context, egressPortNo int, packet *of.OfpPacketOut) error {
 	if logger.V(log.DebugLevel) {
-		logger.Debugw("incoming-packet-out", log.Fields{
+		logger.Debugw(ctx, "incoming-packet-out", log.Fields{
 			"device-id":      dh.device.Id,
 			"egress-port-no": egressPortNo,
 			"pkt-length":     len(packet.Data),
@@ -1779,7 +1782,7 @@
 			// ONOS has no clue about uni/nni ports, it just packets out on all
 			// available ports on the Logical Switch. It should not be interested
 			// in the UNI links.
-			logger.Debugw("dropping-lldp-packet-out-on-uni", log.Fields{
+			logger.Debugw(ctx, "dropping-lldp-packet-out-on-uni", log.Fields{
 				"device-id": dh.device.Id,
 			})
 			return nil
@@ -1790,7 +1793,7 @@
 				// slice out the outer tag.
 				packet.Data = append(packet.Data[:12], packet.Data[16:]...)
 				if logger.V(log.DebugLevel) {
-					logger.Debugw("packet-now-single-tagged", log.Fields{
+					logger.Debugw(ctx, "packet-now-single-tagged", log.Fields{
 						"packet-data": hex.EncodeToString(packet.Data),
 						"device-id":   dh.device.Id,
 					})
@@ -1806,7 +1809,7 @@
 			// In this case the openolt agent will receive the gemPortID as 0.
 			// The agent tries to retrieve the gemPortID in this case.
 			// This may not always succeed at the agent and packetOut may fail.
-			logger.Errorw("failed-to-retrieve-gemport-id-for-packet-out", log.Fields{
+			logger.Errorw(ctx, "failed-to-retrieve-gemport-id-for-packet-out", log.Fields{
 				"intf-id":   intfID,
 				"onu-id":    onuID,
 				"uni-id":    uniID,
@@ -1818,7 +1821,7 @@
 		onuPkt := oop.OnuPacket{IntfId: intfID, OnuId: onuID, PortNo: uint32(egressPortNo), GemportId: gemPortID, Pkt: packet.Data}
 
 		if logger.V(log.DebugLevel) {
-			logger.Debugw("sending-packet-to-onu", log.Fields{
+			logger.Debugw(ctx, "sending-packet-to-onu", log.Fields{
 				"egress-port-no": egressPortNo,
 				"intf-id":        intfID,
 				"onu-id":         onuID,
@@ -1843,7 +1846,7 @@
 			}, err)
 		}
 	} else if egressPortType == voltha.Port_ETHERNET_NNI {
-		nniIntfID, err := IntfIDFromNniPortNum(uint32(egressPortNo))
+		nniIntfID, err := IntfIDFromNniPortNum(ctx, uint32(egressPortNo))
 		if err != nil {
 			return olterrors.NewErrInvalidValue(log.Fields{
 				"egress-nni-port": egressPortNo,
@@ -1853,7 +1856,7 @@
 		uplinkPkt := oop.UplinkPacket{IntfId: nniIntfID, Pkt: packet.Data}
 
 		if logger.V(log.DebugLevel) {
-			logger.Debugw("sending-packet-to-nni", log.Fields{
+			logger.Debugw(ctx, "sending-packet-to-nni", log.Fields{
 				"uplink-pkt": uplinkPkt,
 				"packet":     hex.EncodeToString(packet.Data),
 				"device-id":  dh.device.Id,
@@ -1867,7 +1870,7 @@
 			}, err)
 		}
 	} else {
-		logger.Warnw("packet-out-to-this-interface-type-not-implemented", log.Fields{
+		logger.Warnw(ctx, "packet-out-to-this-interface-type-not-implemented", log.Fields{
 			"egress-port-no": egressPortNo,
 			"egressPortType": egressPortType,
 			"packet":         hex.EncodeToString(packet.Data),
@@ -1891,7 +1894,7 @@
 		case <-heartbeatTimer.C:
 			ctxWithTimeout, cancel := context.WithTimeout(context.Background(), dh.openOLT.GrpcTimeoutInterval)
 			if heartBeat, err := dh.Client.HeartbeatCheck(ctxWithTimeout, new(oop.Empty)); err != nil {
-				logger.Warnw("hearbeat-failed", log.Fields{"device-id": dh.device.Id})
+				logger.Warnw(ctx, "hearbeat-failed", log.Fields{"device-id": dh.device.Id})
 				if timerCheck == nil {
 					// start a after func, when expired will update the state to the core
 					timerCheck = time.AfterFunc(dh.openOLT.HeartbeatFailReportInterval, func() { dh.updateStateUnreachable(ctx) })
@@ -1899,17 +1902,17 @@
 			} else {
 				if timerCheck != nil {
 					if timerCheck.Stop() {
-						logger.Debugw("got-hearbeat-within-timeout", log.Fields{"device-id": dh.device.Id})
+						logger.Debugw(ctx, "got-hearbeat-within-timeout", log.Fields{"device-id": dh.device.Id})
 					}
 					timerCheck = nil
 				}
-				logger.Debugw("hearbeat",
+				logger.Debugw(ctx, "hearbeat",
 					log.Fields{"signature": heartBeat,
 						"device-id": dh.device.Id})
 			}
 			cancel()
 		case <-dh.stopHeartbeatCheck:
-			logger.Debugw("stopping-heart-beat-check", log.Fields{"device-id": dh.device.Id})
+			logger.Debugw(ctx, "stopping-heart-beat-check", log.Fields{"device-id": dh.device.Id})
 			return
 		}
 	}
@@ -1946,24 +1949,23 @@
 }
 
 // EnablePort to enable Pon interface
-func (dh *DeviceHandler) EnablePort(port *voltha.Port) error {
-	logger.Debugw("enable-port", log.Fields{"Device": dh.device, "port": port})
-	return dh.modifyPhyPort(port, true)
+func (dh *DeviceHandler) EnablePort(ctx context.Context, port *voltha.Port) error {
+	logger.Debugw(ctx, "enable-port", log.Fields{"Device": dh.device, "port": port})
+	return dh.modifyPhyPort(ctx, port, true)
 }
 
 // DisablePort to disable pon interface
-func (dh *DeviceHandler) DisablePort(port *voltha.Port) error {
-	logger.Debugw("disable-port", log.Fields{"Device": dh.device, "port": port})
-	return dh.modifyPhyPort(port, false)
+func (dh *DeviceHandler) DisablePort(ctx context.Context, port *voltha.Port) error {
+	logger.Debugw(ctx, "disable-port", log.Fields{"Device": dh.device, "port": port})
+	return dh.modifyPhyPort(ctx, port, false)
 }
 
 //modifyPhyPort is common function to enable and disable the port. parm :enablePort, true to enablePort and false to disablePort.
-func (dh *DeviceHandler) modifyPhyPort(port *voltha.Port, enablePort bool) error {
-	ctx := context.Background()
-	logger.Infow("modifyPhyPort", log.Fields{"port": port, "Enable": enablePort, "device-id": dh.device.Id})
+func (dh *DeviceHandler) modifyPhyPort(ctx context.Context, port *voltha.Port, enablePort bool) error {
+	logger.Infow(ctx, "modifyPhyPort", log.Fields{"port": port, "Enable": enablePort, "device-id": dh.device.Id})
 	if port.GetType() == voltha.Port_ETHERNET_NNI {
 		// Bug is opened for VOL-2505 to support NNI disable feature.
-		logger.Infow("voltha-supports-single-nni-hence-disable-of-nni-not-allowed",
+		logger.Infow(ctx, "voltha-supports-single-nni-hence-disable-of-nni-not-allowed",
 			log.Fields{"device": dh.device, "port": port})
 		return olterrors.NewErrAdapter("illegal-port-request", log.Fields{
 			"port-type":    port.GetType,
@@ -1984,7 +1986,7 @@
 		}
 		// updating interface local cache for collecting stats
 		dh.activePorts.Store(ponID, true)
-		logger.Infow("enabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
+		logger.Infow(ctx, "enabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
 	} else {
 		operStatus = voltha.OperStatus_UNKNOWN
 		out, err := dh.Client.DisablePonIf(ctx, ponIntf)
@@ -1995,7 +1997,7 @@
 		}
 		// updating interface local cache for collecting stats
 		dh.activePorts.Store(ponID, false)
-		logger.Infow("disabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
+		logger.Infow(ctx, "disabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
 	}
 	if err := dh.coreProxy.PortStateUpdate(ctx, dh.device.Id, voltha.Port_PON_OLT, port.PortNo, operStatus); err != nil {
 		return olterrors.NewErrAdapter("port-state-update-failed", log.Fields{
@@ -2006,13 +2008,13 @@
 }
 
 //disableAdminDownPorts disables the ports, if the corresponding port Adminstate is disabled on reboot and Renable device.
-func (dh *DeviceHandler) disableAdminDownPorts(device *voltha.Device) error {
+func (dh *DeviceHandler) disableAdminDownPorts(ctx context.Context, device *voltha.Device) error {
 	cloned := proto.Clone(device).(*voltha.Device)
 	// Disable the port and update the oper_port_status to core
 	// if the Admin state of the port is disabled on reboot and re-enable device.
 	for _, port := range cloned.Ports {
 		if port.AdminState == common.AdminState_DISABLED {
-			if err := dh.DisablePort(port); err != nil {
+			if err := dh.DisablePort(ctx, port); err != nil {
 				return olterrors.NewErrAdapter("port-disable-failed", log.Fields{
 					"device-id": dh.device.Id,
 					"port":      port}, err)
@@ -2023,8 +2025,8 @@
 }
 
 //populateActivePorts to populate activePorts map
-func (dh *DeviceHandler) populateActivePorts(device *voltha.Device) {
-	logger.Infow("populateActiveports", log.Fields{"Device": device})
+func (dh *DeviceHandler) populateActivePorts(ctx context.Context, device *voltha.Device) {
+	logger.Infow(ctx, "populateActiveports", log.Fields{"Device": device})
 	for _, port := range device.Ports {
 		if port.Type == voltha.Port_ETHERNET_NNI {
 			if port.OperStatus == voltha.OperStatus_ACTIVE {
@@ -2045,7 +2047,7 @@
 
 // ChildDeviceLost deletes ONU and clears pon resources related to it.
 func (dh *DeviceHandler) ChildDeviceLost(ctx context.Context, pPortNo uint32, onuID uint32) error {
-	logger.Debugw("child-device-lost", log.Fields{"pdeviceID": dh.device.Id})
+	logger.Debugw(ctx, "child-device-lost", log.Fields{"pdeviceID": dh.device.Id})
 	intfID := PortNoToIntfID(pPortNo, voltha.Port_PON_OLT)
 	onuKey := dh.formOnuKey(intfID, onuID)
 	onuDevice, ok := dh.onus.Load(onuKey)
@@ -2092,21 +2094,21 @@
 	//clear PON resources associated with ONU
 	var onuGemData []rsrcMgr.OnuGemInfo
 	if onuMgr, ok := dh.resourceMgr.ResourceMgrs[intfID]; !ok {
-		logger.Warnw("failed-to-get-resource-manager-for-interface-Id", log.Fields{
+		logger.Warnw(ctx, "failed-to-get-resource-manager-for-interface-Id", log.Fields{
 			"device-id": dh.device.Id,
 			"intf-id":   intfID})
 	} else {
 		if err := onuMgr.GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
-			logger.Warnw("failed-to-get-onu-info-for-pon-port", log.Fields{
+			logger.Warnw(ctx, "failed-to-get-onu-info-for-pon-port", log.Fields{
 				"device-id": dh.device.Id,
 				"intf-id":   intfID,
 				"error":     err})
 		} else {
 			for i, onu := range onuGemData {
 				if onu.OnuID == onuID && onu.SerialNumber == onuDevice.(*OnuDevice).serialNumber {
-					logger.Debugw("onu-data", log.Fields{"onu": onu})
+					logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
 					if err := dh.clearUNIData(ctx, &onu); err != nil {
-						logger.Warnw("failed-to-clear-uni-data-for-onu", log.Fields{
+						logger.Warnw(ctx, "failed-to-clear-uni-data-for-onu", log.Fields{
 							"device-id":  dh.device.Id,
 							"onu-device": onu,
 							"error":      err})
@@ -2118,14 +2120,14 @@
 					onuGemData = append(onuGemData[:i], onuGemData[i+1:]...)
 					err := onuMgr.AddOnuGemInfo(ctx, intfID, onuGemData)
 					if err != nil {
-						logger.Warnw("persistence-update-onu-gem-info-failed", log.Fields{
+						logger.Warnw(ctx, "persistence-update-onu-gem-info-failed", log.Fields{
 							"intf-id":    intfID,
 							"onu-device": onu,
 							"onu-gem":    onuGemData,
 							"error":      err})
 						//Not returning error on cleanup.
 					}
-					logger.Debugw("removed-onu-gem-info", log.Fields{"intf": intfID, "onu-device": onu, "onugem": onuGemData})
+					logger.Debugw(ctx, "removed-onu-gem-info", log.Fields{"intf": intfID, "onu-device": onu, "onugem": onuGemData})
 					dh.resourceMgr.FreeonuID(ctx, intfID, []uint32{onu.OnuID})
 					break
 				}
@@ -2157,13 +2159,13 @@
 	return InvalidPort
 }
 
-func (dh *DeviceHandler) incrementActiveFlowRemoveCount(flow *of.OfpFlowStats) {
+func (dh *DeviceHandler) incrementActiveFlowRemoveCount(ctx context.Context, flow *of.OfpFlowStats) {
 	inPort, outPort := getPorts(flow)
-	logger.Debugw("increment-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
+	logger.Debugw(ctx, "increment-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
 	if inPort != InvalidPort && outPort != InvalidPort {
 		_, intfID, onuID, uniID := ExtractAccessFromFlow(inPort, outPort)
 		key := pendingFlowRemoveDataKey{intfID: intfID, onuID: onuID, uniID: uniID}
-		logger.Debugw("increment-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+		logger.Debugw(ctx, "increment-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
 
 		dh.lockDevice.Lock()
 		defer dh.lockDevice.Unlock()
@@ -2177,29 +2179,29 @@
 		flowRemoveData.pendingFlowRemoveCount++
 		dh.pendingFlowRemoveDataPerSubscriber[key] = flowRemoveData
 
-		logger.Debugw("current-flow-remove-count–increment",
+		logger.Debugw(ctx, "current-flow-remove-count–increment",
 			log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID,
 				"currCnt": dh.pendingFlowRemoveDataPerSubscriber[key].pendingFlowRemoveCount})
 	}
 }
 
-func (dh *DeviceHandler) decrementActiveFlowRemoveCount(flow *of.OfpFlowStats) {
+func (dh *DeviceHandler) decrementActiveFlowRemoveCount(ctx context.Context, flow *of.OfpFlowStats) {
 	inPort, outPort := getPorts(flow)
-	logger.Debugw("decrement-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
+	logger.Debugw(ctx, "decrement-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
 	if inPort != InvalidPort && outPort != InvalidPort {
 		_, intfID, onuID, uniID := ExtractAccessFromFlow(uint32(inPort), uint32(outPort))
 		key := pendingFlowRemoveDataKey{intfID: intfID, onuID: onuID, uniID: uniID}
-		logger.Debugw("decrement-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+		logger.Debugw(ctx, "decrement-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
 
 		dh.lockDevice.Lock()
 		defer dh.lockDevice.Unlock()
 		if val, ok := dh.pendingFlowRemoveDataPerSubscriber[key]; !ok {
-			logger.Fatalf("flow-remove-key-not-found", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+			logger.Fatalf(ctx, "flow-remove-key-not-found", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
 		} else {
 			if val.pendingFlowRemoveCount > 0 {
 				val.pendingFlowRemoveCount--
 			}
-			logger.Debugw("current-flow-remove-count-after-decrement",
+			logger.Debugw(ctx, "current-flow-remove-count-after-decrement",
 				log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID,
 					"currCnt": dh.pendingFlowRemoveDataPerSubscriber[key].pendingFlowRemoveCount})
 			// If all flow removes have finished, then close the channel to signal the receiver
@@ -2214,19 +2216,19 @@
 	}
 }
 
-func (dh *DeviceHandler) waitForFlowRemoveToFinish(flow *of.OfpFlowStats) {
+func (dh *DeviceHandler) waitForFlowRemoveToFinish(ctx context.Context, flow *of.OfpFlowStats) {
 	var flowRemoveData pendingFlowRemoveData
 	var ok bool
 	inPort, outPort := getPorts(flow)
-	logger.Debugw("wait-for-flow-remove-to-finish-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
+	logger.Debugw(ctx, "wait-for-flow-remove-to-finish-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
 	if inPort != InvalidPort && outPort != InvalidPort {
 		_, intfID, onuID, uniID := ExtractAccessFromFlow(inPort, outPort)
 		key := pendingFlowRemoveDataKey{intfID: intfID, onuID: onuID, uniID: uniID}
-		logger.Debugw("wait-for-flow-remove-to-finish-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+		logger.Debugw(ctx, "wait-for-flow-remove-to-finish-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
 
 		dh.lockDevice.RLock()
 		if flowRemoveData, ok = dh.pendingFlowRemoveDataPerSubscriber[key]; !ok {
-			logger.Debugw("no-pending-flow-to-remove", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+			logger.Debugw(ctx, "no-pending-flow-to-remove", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
 			dh.lockDevice.RUnlock()
 			return
 		}
@@ -2235,7 +2237,7 @@
 		// Wait for all flow removes to finish first
 		<-flowRemoveData.allFlowsRemoved
 
-		logger.Debugw("all-flows-cleared--handling-flow-add-now", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+		logger.Debugw(ctx, "all-flows-cleared--handling-flow-add-now", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
 	}
 }
 
@@ -2287,11 +2289,11 @@
 }
 
 // setOnuITUPonAlarmConfig sets the parameters in the openolt agent for raising the ONU ITU PON alarms.
-func (dh *DeviceHandler) setOnuITUPonAlarmConfig(config *oop.OnuItuPonAlarm) error {
+func (dh *DeviceHandler) setOnuITUPonAlarmConfig(ctx context.Context, config *oop.OnuItuPonAlarm) error {
 	if _, err := dh.Client.OnuItuPonAlarmSet(context.Background(), config); err != nil {
 		return err
 	}
-	logger.Debugw("onu-itu-pon-alarm-config-set-successful", log.Fields{"config": config})
+	logger.Debugw(ctx, "onu-itu-pon-alarm-config-set-successful", log.Fields{"config": config})
 	return nil
 }
 
diff --git a/internal/pkg/core/device_handler_test.go b/internal/pkg/core/device_handler_test.go
index 531c27b..38affa0 100644
--- a/internal/pkg/core/device_handler_test.go
+++ b/internal/pkg/core/device_handler_test.go
@@ -222,6 +222,7 @@
 	return dh
 }
 func Test_generateMacFromHost(t *testing.T) {
+	ctx := context.Background()
 	type args struct {
 		host string
 	}
@@ -238,7 +239,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			got, err := generateMacFromHost(tt.args.host)
+			got, err := generateMacFromHost(ctx, tt.args.host)
 			if (err != nil) != tt.wantErr {
 				t.Errorf("generateMacFromHost() error = %v, wantErr %v", err, tt.wantErr)
 				return
@@ -313,6 +314,7 @@
 }
 
 func TestDeviceHandler_GetChildDevice(t *testing.T) {
+	ctx := context.Background()
 	dh1 := newMockDeviceHandler()
 	dh2 := negativeDeviceHandler()
 	type args struct {
@@ -351,7 +353,7 @@
 	*/
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			got, err := tt.devicehandler.GetChildDevice(tt.args.parentPort, tt.args.onuID)
+			got, err := tt.devicehandler.GetChildDevice(ctx, tt.args.parentPort, tt.args.onuID)
 			if reflect.TypeOf(err) != tt.errType || !sparseCompare([]string{"Id", "ParentId", "ParentPortNo"}, tt.want, got) {
 				t.Errorf("GetportLabel() => want=(%v, %v) got=(%v, %v)",
 					tt.want, tt.errType, got, reflect.TypeOf(err))
@@ -395,6 +397,7 @@
 }
 
 func TestDeviceHandler_ProcessInterAdapterMessage(t *testing.T) {
+	ctx := context.Background()
 	dh := newMockDeviceHandler()
 	proxyAddr := dh.device.ProxyAddress
 	body := &ic.InterAdapterOmciMessage{
@@ -410,18 +413,18 @@
 	var err error
 
 	if marshalledData, err = ptypes.MarshalAny(body); err != nil {
-		logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+		logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
 	}
 
 	var marshalledData1 *any.Any
 
 	if marshalledData1, err = ptypes.MarshalAny(body2); err != nil {
-		logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+		logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
 	}
 	var marshalledData2 *any.Any
 
 	if marshalledData2, err = ptypes.MarshalAny(body3); err != nil {
-		logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+		logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
 	}
 	type args struct {
 		msg *ic.InterAdapterMessage
@@ -505,7 +508,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 
-			if err := dh.ProcessInterAdapterMessage(tt.args.msg); reflect.TypeOf(err) != tt.wantErr {
+			if err := dh.ProcessInterAdapterMessage(ctx, tt.args.msg); reflect.TypeOf(err) != tt.wantErr {
 				t.Errorf("DeviceHandler.ProcessInterAdapterMessage() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -513,6 +516,7 @@
 }
 
 func TestDeviceHandler_sendProxiedMessage(t *testing.T) {
+	ctx := context.Background()
 	dh1 := newMockDeviceHandler()
 	dh2 := negativeDeviceHandler()
 	device1 := &voltha.Device{
@@ -566,7 +570,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			tt.devicehandler.sendProxiedMessage(tt.args.onuDevice, tt.args.omciMsg)
+			tt.devicehandler.sendProxiedMessage(ctx, tt.args.onuDevice, tt.args.omciMsg)
 		})
 	}
 }
@@ -590,7 +594,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			tt.devicehandler.SendPacketInToCore(tt.args.logicalPort, tt.args.packetPayload)
+			tt.devicehandler.SendPacketInToCore(context.Background(), tt.args.logicalPort, tt.args.packetPayload)
 		})
 	}
 }
@@ -613,7 +617,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 
-			if err := tt.devicehandler.DisableDevice(tt.args.device); (err != nil) != tt.wantErr {
+			if err := tt.devicehandler.DisableDevice(context.Background(), tt.args.device); (err != nil) != tt.wantErr {
 				t.Errorf("DeviceHandler.DisableDevice() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -639,7 +643,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			dh := tt.devicehandler
-			if err := dh.ReenableDevice(tt.args.device); (err != nil) != tt.wantErr {
+			if err := dh.ReenableDevice(context.Background(), tt.args.device); (err != nil) != tt.wantErr {
 				t.Errorf("DeviceHandler.ReenableDevice() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -666,7 +670,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 
-			if err := tt.devicehandler.RebootDevice(tt.args.device); (err != nil) != tt.wantErr {
+			if err := tt.devicehandler.RebootDevice(context.Background(), tt.args.device); (err != nil) != tt.wantErr {
 				t.Errorf("DeviceHandler.RebootDevice() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -783,7 +787,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			tt.devicehandler.addPort(tt.args.intfID, tt.args.portType, tt.args.state)
+			tt.devicehandler.addPort(context.Background(), tt.args.intfID, tt.args.portType, tt.args.state)
 		})
 	}
 }
@@ -1082,7 +1086,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 
-			_, err := tt.devicehandler.populateDeviceInfo()
+			_, err := tt.devicehandler.populateDeviceInfo(context.Background())
 			if (err != nil) != tt.wantErr {
 				t.Errorf("DeviceHandler.populateDeviceInfo() error = %v, wantErr %v", err, tt.wantErr)
 				return
@@ -1156,7 +1160,7 @@
 				time.Sleep(1 * time.Second) // simulated wait time to stop startCollector
 				tt.args.dh.stopCollector <- true
 			}()
-			startCollector(tt.args.dh)
+			startCollector(context.Background(), tt.args.dh)
 		})
 	}
 }
diff --git a/internal/pkg/core/olt_platform.go b/internal/pkg/core/olt_platform.go
index b5e8f36..4b36102 100644
--- a/internal/pkg/core/olt_platform.go
+++ b/internal/pkg/core/olt_platform.go
@@ -18,6 +18,7 @@
 package core
 
 import (
+	"context"
 	"github.com/opencord/voltha-lib-go/v3/pkg/flows"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
@@ -123,10 +124,10 @@
 var controllerPorts = []uint32{0xfffd, 0x7ffffffd, 0xfffffffd}
 
 //MkUniPortNum returns new UNIportNum based on intfID, inuID and uniID
-func MkUniPortNum(intfID, onuID, uniID uint32) uint32 {
+func MkUniPortNum(ctx context.Context, intfID, onuID, uniID uint32) uint32 {
 	var limit = int(onuID)
 	if limit > MaxOnusPerPon {
-		logger.Warn("exceeded-the-max-onus-per-pon")
+		logger.Warn(ctx, "exceeded-the-max-onus-per-pon")
 	}
 	return (intfID << (bitsForUniID + bitsForONUID)) | (onuID << bitsForUniID) | uniID
 }
@@ -169,9 +170,9 @@
 }
 
 //IntfIDFromNniPortNum returns Intf ID derived from portNum
-func IntfIDFromNniPortNum(portNum uint32) (uint32, error) {
+func IntfIDFromNniPortNum(ctx context.Context, portNum uint32) (uint32, error) {
 	if portNum < minNniIntPortNum || portNum > maxNniPortNum {
-		logger.Errorw("nniportnumber-is-not-in-valid-range", log.Fields{"portnum": portNum})
+		logger.Errorw(ctx, "nniportnumber-is-not-in-valid-range", log.Fields{"portnum": portNum})
 		return uint32(0), olterrors.ErrInvalidPortRange
 	}
 	return (portNum & 0xFFFF), nil
@@ -222,7 +223,7 @@
 }
 
 //FlowExtractInfo fetches uniport from the flow, based on which it gets and returns ponInf, onuID, uniID, inPort and ethType
-func FlowExtractInfo(flow *ofp.OfpFlowStats, flowDirection string) (uint32, uint32, uint32, uint32, uint32, uint32, error) {
+func FlowExtractInfo(ctx context.Context, flow *ofp.OfpFlowStats, flowDirection string) (uint32, uint32, uint32, uint32, uint32, uint32, error) {
 	var uniPortNo uint32
 	var ponIntf uint32
 	var onuID uint32
@@ -268,7 +269,7 @@
 	onuID = OnuIDFromUniPortNum(uniPortNo)
 	uniID = UniIDFromPortNum(uniPortNo)
 
-	logger.Debugw("flow-extract-info-result",
+	logger.Debugw(ctx, "flow-extract-info-result",
 		log.Fields{
 			"uniportno": uniPortNo,
 			"pon-intf":  ponIntf,
diff --git a/internal/pkg/core/olt_platform_test.go b/internal/pkg/core/olt_platform_test.go
index 211459f..71414c1 100644
--- a/internal/pkg/core/olt_platform_test.go
+++ b/internal/pkg/core/olt_platform_test.go
@@ -18,14 +18,14 @@
 package core
 
 import (
-	"math"
-	"reflect"
-	"testing"
-
+	"context"
 	fu "github.com/opencord/voltha-lib-go/v3/pkg/flows"
 	"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
 	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"math"
+	"reflect"
+	"testing"
 )
 
 func TestMkUniPortNum(t *testing.T) {
@@ -47,7 +47,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			if got := MkUniPortNum(tt.args.intfID, tt.args.onuID, tt.args.uniID); got != tt.want {
+			if got := MkUniPortNum(context.Background(), tt.args.intfID, tt.args.onuID, tt.args.uniID); got != tt.want {
 				t.Errorf("MkUniPortNum() = %v, want %v", got, tt.want)
 			} else {
 				t.Logf("Expected %v , Actual %v \n", tt.want, got)
@@ -187,7 +187,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			got, err := IntfIDFromNniPortNum(tt.args.portNum)
+			got, err := IntfIDFromNniPortNum(context.Background(), tt.args.portNum)
 			if got != tt.want || err != tt.wantErr {
 				t.Errorf("IntfIDFromNniPortNum(): FOR[%v] WANT[%v and %v] GOT[%v and %v]",
 					tt.args.portNum, tt.want, tt.wantErr, got, err)
@@ -350,7 +350,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			got, got1, got2, got3, got4, got5, err := FlowExtractInfo(tt.args.flow, tt.args.flowDirection)
+			got, got1, got2, got3, got4, got5, err := FlowExtractInfo(context.Background(), tt.args.flow, tt.args.flowDirection)
 			if (err != nil) != tt.wantErr {
 				t.Errorf("FlowExtractInfo() error = %v, wantErr %v", err, tt.wantErr)
 				return
diff --git a/internal/pkg/core/olt_state_transitions.go b/internal/pkg/core/olt_state_transitions.go
index cd6df1d..58e2251 100644
--- a/internal/pkg/core/olt_state_transitions.go
+++ b/internal/pkg/core/olt_state_transitions.go
@@ -149,7 +149,7 @@
 
 	// Check whether the transtion is valid from current state
 	if !tMap.isValidTransition(trigger) {
-		logger.Errorw("invalid-transition-triggered",
+		logger.Errorw(ctx, "invalid-transition-triggered",
 			log.Fields{
 				"current-state": tMap.currentDeviceState,
 				"trigger":       trigger})
@@ -159,31 +159,31 @@
 	// Invoke the before handlers
 	beforeHandlers := tMap.transitions[trigger].before
 	if beforeHandlers == nil {
-		logger.Debugw("no-handlers-for-before", log.Fields{"trigger": trigger})
+		logger.Debugw(ctx, "no-handlers-for-before", log.Fields{"trigger": trigger})
 	}
 	for _, handler := range beforeHandlers {
-		logger.Debugw("running-before-handler", log.Fields{"handler": funcName(handler)})
+		logger.Debugw(ctx, "running-before-handler", log.Fields{"handler": funcName(handler)})
 		if err := handler(ctx); err != nil {
 			// TODO handle error
-			logger.Error(err)
+			logger.Error(ctx, err)
 			return
 		}
 	}
 
 	// Update the state
 	tMap.currentDeviceState = tMap.transitions[trigger].currentState
-	logger.Debugw("updated-device-state ", log.Fields{"current-device-state": tMap.currentDeviceState})
+	logger.Debugw(ctx, "updated-device-state ", log.Fields{"current-device-state": tMap.currentDeviceState})
 
 	// Invoke the after handlers
 	afterHandlers := tMap.transitions[trigger].after
 	if afterHandlers == nil {
-		logger.Debugw("no-handlers-for-after", log.Fields{"trigger": trigger})
+		logger.Debugw(ctx, "no-handlers-for-after", log.Fields{"trigger": trigger})
 	}
 	for _, handler := range afterHandlers {
-		logger.Debugw("running-after-handler", log.Fields{"handler": funcName(handler)})
+		logger.Debugw(ctx, "running-after-handler", log.Fields{"handler": funcName(handler)})
 		if err := handler(ctx); err != nil {
 			// TODO handle error
-			logger.Error(err)
+			logger.Error(ctx, err)
 			return
 		}
 	}
diff --git a/internal/pkg/core/openolt.go b/internal/pkg/core/openolt.go
index 4a25dba..fe33876 100644
--- a/internal/pkg/core/openolt.go
+++ b/internal/pkg/core/openolt.go
@@ -74,16 +74,16 @@
 
 //Start starts (logs) the device manager
 func (oo *OpenOLT) Start(ctx context.Context) error {
-	logger.Info("starting-device-manager")
-	logger.Info("device-manager-started")
+	logger.Info(ctx, "starting-device-manager")
+	logger.Info(ctx, "device-manager-started")
 	return nil
 }
 
 //Stop terminates the session
 func (oo *OpenOLT) Stop(ctx context.Context) error {
-	logger.Info("stopping-device-manager")
+	logger.Info(ctx, "stopping-device-manager")
 	oo.exitChannel <- 1
-	logger.Info("device-manager-stopped")
+	logger.Info(ctx, "device-manager-stopped")
 	return nil
 }
 
@@ -92,10 +92,10 @@
 		// Returned response only of the ctx has not been canceled/timeout/etc
 		// Channel is automatically closed when a context is Done
 		ch <- result
-		logger.Debugw("sendResponse", log.Fields{"result": result})
+		logger.Debugw(ctx, "sendResponse", log.Fields{"result": result})
 	} else {
 		// Should the transaction be reverted back?
-		logger.Debugw("sendResponse-context-error", log.Fields{"context-error": ctx.Err()})
+		logger.Debugw(ctx, "sendResponse-context-error", log.Fields{"context-error": ctx.Err()})
 	}
 }
 
@@ -123,24 +123,23 @@
 }
 
 //createDeviceTopic returns
-func (oo *OpenOLT) createDeviceTopic(device *voltha.Device) error {
-	logger.Infow("create-device-topic", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) createDeviceTopic(ctx context.Context, device *voltha.Device) error {
+	logger.Infow(ctx, "create-device-topic", log.Fields{"deviceId": device.Id})
 	defaultTopic := oo.kafkaICProxy.GetDefaultTopic()
 	deviceTopic := kafka.Topic{Name: defaultTopic.Name + "_" + device.Id}
 	// TODO for the offset
-	if err := oo.kafkaICProxy.SubscribeWithDefaultRequestHandler(deviceTopic, 0); err != nil {
+	if err := oo.kafkaICProxy.SubscribeWithDefaultRequestHandler(ctx, deviceTopic, 0); err != nil {
 		return olterrors.NewErrAdapter("subscribe-for-device-topic-failed", log.Fields{"device-topic": deviceTopic}, err)
 	}
 	return nil
 }
 
 // Adopt_device creates a new device handler if not present already and then adopts the device
-func (oo *OpenOLT) Adopt_device(device *voltha.Device) error {
-	ctx := context.Background()
+func (oo *OpenOLT) Adopt_device(ctx context.Context, device *voltha.Device) error {
 	if device == nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"device": nil}, nil).Log()
 	}
-	logger.Infow("adopt-device", log.Fields{"deviceId": device.Id})
+	logger.Infow(ctx, "adopt-device", log.Fields{"deviceId": device.Id})
 	var handler *DeviceHandler
 	if handler = oo.getDeviceHandler(device.Id); handler == nil {
 		handler := NewDeviceHandler(oo.coreProxy, oo.adapterProxy, oo.eventProxy, device, oo)
@@ -153,8 +152,8 @@
 }
 
 //Get_ofp_device_info returns OFP information for the given device
-func (oo *OpenOLT) Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error) {
-	logger.Infow("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error) {
+	logger.Infow(ctx, "Get_ofp_device_info", log.Fields{"deviceId": device.Id})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
 		return handler.GetOfpDeviceInfo(device)
 	}
@@ -162,41 +161,40 @@
 }
 
 //Process_inter_adapter_message sends messages to a target device (between adapters)
-func (oo *OpenOLT) Process_inter_adapter_message(msg *ic.InterAdapterMessage) error {
-	logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": msg.Header.Id})
+func (oo *OpenOLT) Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error {
+	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": msg.Header.Id})
 	targetDevice := msg.Header.ProxyDeviceId // Request?
 	if targetDevice == "" && msg.Header.ToDeviceId != "" {
 		// Typical response
 		targetDevice = msg.Header.ToDeviceId
 	}
 	if handler := oo.getDeviceHandler(targetDevice); handler != nil {
-		return handler.ProcessInterAdapterMessage(msg)
+		return handler.ProcessInterAdapterMessage(ctx, msg)
 	}
 	return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": targetDevice}, nil)
 }
 
 //Adapter_descriptor not implemented
-func (oo *OpenOLT) Adapter_descriptor() error {
+func (oo *OpenOLT) Adapter_descriptor(ctx context.Context) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Device_types unimplemented
-func (oo *OpenOLT) Device_types() (*voltha.DeviceTypes, error) {
+func (oo *OpenOLT) Device_types(ctx context.Context) (*voltha.DeviceTypes, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Health  returns unimplemented
-func (oo *OpenOLT) Health() (*voltha.HealthStatus, error) {
+func (oo *OpenOLT) Health(ctx context.Context) (*voltha.HealthStatus, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Reconcile_device unimplemented
-func (oo *OpenOLT) Reconcile_device(device *voltha.Device) error {
-	ctx := context.Background()
+func (oo *OpenOLT) Reconcile_device(ctx context.Context, device *voltha.Device) error {
 	if device == nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"device": nil}, nil)
 	}
-	logger.Infow("reconcile-device", log.Fields{"deviceId": device.Id})
+	logger.Infow(ctx, "reconcile-device", log.Fields{"deviceId": device.Id})
 	var handler *DeviceHandler
 	if handler = oo.getDeviceHandler(device.Id); handler == nil {
 		handler := NewDeviceHandler(oo.coreProxy, oo.adapterProxy, oo.eventProxy, device, oo)
@@ -208,49 +206,48 @@
 }
 
 //Abandon_device unimplemented
-func (oo *OpenOLT) Abandon_device(device *voltha.Device) error {
+func (oo *OpenOLT) Abandon_device(ctx context.Context, device *voltha.Device) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Disable_device disables the given device
-func (oo *OpenOLT) Disable_device(device *voltha.Device) error {
-	logger.Infow("disable-device", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Disable_device(ctx context.Context, device *voltha.Device) error {
+	logger.Infow(ctx, "disable-device", log.Fields{"deviceId": device.Id})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
-		return handler.DisableDevice(device)
+		return handler.DisableDevice(ctx, device)
 	}
 	return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
 }
 
 //Reenable_device enables the olt device after disable
-func (oo *OpenOLT) Reenable_device(device *voltha.Device) error {
-	logger.Infow("reenable-device", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Reenable_device(ctx context.Context, device *voltha.Device) error {
+	logger.Infow(ctx, "reenable-device", log.Fields{"deviceId": device.Id})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
-		return handler.ReenableDevice(device)
+		return handler.ReenableDevice(ctx, device)
 	}
 	return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
 }
 
 //Reboot_device reboots the given device
-func (oo *OpenOLT) Reboot_device(device *voltha.Device) error {
-	logger.Infow("reboot-device", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Reboot_device(ctx context.Context, device *voltha.Device) error {
+	logger.Infow(ctx, "reboot-device", log.Fields{"deviceId": device.Id})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
-		return handler.RebootDevice(device)
+		return handler.RebootDevice(ctx, device)
 	}
 	return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
 }
 
 //Self_test_device unimplented
-func (oo *OpenOLT) Self_test_device(device *voltha.Device) error {
+func (oo *OpenOLT) Self_test_device(ctx context.Context, device *voltha.Device) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Delete_device unimplemented
-func (oo *OpenOLT) Delete_device(device *voltha.Device) error {
-	logger.Infow("delete-device", log.Fields{"deviceId": device.Id})
-	ctx := context.Background()
+func (oo *OpenOLT) Delete_device(ctx context.Context, device *voltha.Device) error {
+	logger.Infow(ctx, "delete-device", log.Fields{"deviceId": device.Id})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
 		if err := handler.DeleteDevice(ctx, device); err != nil {
-			logger.Errorw("failed-to-handle-delete-device", log.Fields{"device-id": device.Id})
+			logger.Errorw(ctx, "failed-to-handle-delete-device", log.Fields{"device-id": device.Id})
 		}
 		oo.deleteDeviceHandlerToMap(handler)
 		return nil
@@ -259,19 +256,18 @@
 }
 
 //Get_device_details unimplemented
-func (oo *OpenOLT) Get_device_details(device *voltha.Device) error {
+func (oo *OpenOLT) Get_device_details(ctx context.Context, device *voltha.Device) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Update_flows_bulk returns
-func (oo *OpenOLT) Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error {
+func (oo *OpenOLT) Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Update_flows_incrementally updates (add/remove) the flows on a given device
-func (oo *OpenOLT) Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
-	logger.Debugw("Update_flows_incrementally", log.Fields{"deviceId": device.Id, "flows": flows, "flowMetadata": flowMetadata})
-	ctx := context.Background()
+func (oo *OpenOLT) Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
+	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"deviceId": device.Id, "flows": flows, "flowMetadata": flowMetadata})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
 		return handler.UpdateFlowsIncrementally(ctx, device, flows, groups, flowMetadata)
 	}
@@ -279,19 +275,18 @@
 }
 
 //Update_pm_config returns PmConfigs nil or error
-func (oo *OpenOLT) Update_pm_config(device *voltha.Device, pmConfigs *voltha.PmConfigs) error {
-	logger.Debugw("Update_pm_config", log.Fields{"device-id": device.Id, "pm-configs": pmConfigs})
+func (oo *OpenOLT) Update_pm_config(ctx context.Context, device *voltha.Device, pmConfigs *voltha.PmConfigs) error {
+	logger.Debugw(ctx, "Update_pm_config", log.Fields{"device-id": device.Id, "pm-configs": pmConfigs})
 	if handler := oo.getDeviceHandler(device.Id); handler != nil {
-		handler.UpdatePmConfig(pmConfigs)
+		handler.UpdatePmConfig(ctx, pmConfigs)
 		return nil
 	}
 	return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
 }
 
 //Receive_packet_out sends packet out to the device
-func (oo *OpenOLT) Receive_packet_out(deviceID string, egressPortNo int, packet *openflow_13.OfpPacketOut) error {
-	logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceID, "egress_port_no": egressPortNo, "pkt": packet})
-	ctx := context.Background()
+func (oo *OpenOLT) Receive_packet_out(ctx context.Context, deviceID string, egressPortNo int, packet *openflow_13.OfpPacketOut) error {
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"deviceId": deviceID, "egress_port_no": egressPortNo, "pkt": packet})
 	if handler := oo.getDeviceHandler(deviceID); handler != nil {
 		return handler.PacketOut(ctx, egressPortNo, packet)
 	}
@@ -299,55 +294,55 @@
 }
 
 //Suppress_event unimplemented
-func (oo *OpenOLT) Suppress_event(filter *voltha.EventFilter) error {
+func (oo *OpenOLT) Suppress_event(ctx context.Context, filter *voltha.EventFilter) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Unsuppress_event  unimplemented
-func (oo *OpenOLT) Unsuppress_event(filter *voltha.EventFilter) error {
+func (oo *OpenOLT) Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error {
 	return olterrors.ErrNotImplemented
 }
 
 //Download_image unimplemented
-func (oo *OpenOLT) Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Get_image_download_status unimplemented
-func (oo *OpenOLT) Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Cancel_image_download unimplemented
-func (oo *OpenOLT) Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Activate_image_update unimplemented
-func (oo *OpenOLT) Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Revert_image_update unimplemented
-func (oo *OpenOLT) Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 // Enable_port to Enable PON/NNI interface
-func (oo *OpenOLT) Enable_port(deviceID string, port *voltha.Port) error {
-	logger.Infow("Enable_port", log.Fields{"deviceId": deviceID, "port": port})
-	return oo.enableDisablePort(deviceID, port, true)
+func (oo *OpenOLT) Enable_port(ctx context.Context, deviceID string, port *voltha.Port) error {
+	logger.Infow(ctx, "Enable_port", log.Fields{"deviceId": deviceID, "port": port})
+	return oo.enableDisablePort(ctx, deviceID, port, true)
 }
 
 // Disable_port to Disable pon/nni interface
-func (oo *OpenOLT) Disable_port(deviceID string, port *voltha.Port) error {
-	logger.Infow("Disable_port", log.Fields{"deviceId": deviceID, "port": port})
-	return oo.enableDisablePort(deviceID, port, false)
+func (oo *OpenOLT) Disable_port(ctx context.Context, deviceID string, port *voltha.Port) error {
+	logger.Infow(ctx, "Disable_port", log.Fields{"deviceId": deviceID, "port": port})
+	return oo.enableDisablePort(ctx, deviceID, port, false)
 }
 
 // enableDisablePort to Disable pon or Enable PON interface
-func (oo *OpenOLT) enableDisablePort(deviceID string, port *voltha.Port, enablePort bool) error {
-	logger.Infow("enableDisablePort", log.Fields{"deviceId": deviceID, "port": port})
+func (oo *OpenOLT) enableDisablePort(ctx context.Context, deviceID string, port *voltha.Port, enablePort bool) error {
+	logger.Infow(ctx, "enableDisablePort", log.Fields{"deviceId": deviceID, "port": port})
 	if port == nil {
 		return olterrors.NewErrInvalidValue(log.Fields{
 			"reason":    "port cannot be nil",
@@ -355,13 +350,13 @@
 			"port":      nil}, nil)
 	}
 	if handler := oo.getDeviceHandler(deviceID); handler != nil {
-		logger.Debugw("Enable_Disable_Port", log.Fields{"deviceId": deviceID, "port": port})
+		logger.Debugw(ctx, "Enable_Disable_Port", log.Fields{"deviceId": deviceID, "port": port})
 		if enablePort {
-			if err := handler.EnablePort(port); err != nil {
+			if err := handler.EnablePort(ctx, port); err != nil {
 				return olterrors.NewErrAdapter("error-occurred-during-enable-port", log.Fields{"deviceID": deviceID, "port": port}, err)
 			}
 		} else {
-			if err := handler.DisablePort(port); err != nil {
+			if err := handler.DisablePort(ctx, port); err != nil {
 				return olterrors.NewErrAdapter("error-occurred-during-disable-port", log.Fields{"deviceID": deviceID, "port": port}, err)
 			}
 		}
@@ -370,9 +365,8 @@
 }
 
 //Child_device_lost deletes the ONU and its references from PONResources
-func (oo *OpenOLT) Child_device_lost(deviceID string, pPortNo uint32, onuID uint32) error {
-	logger.Infow("Child-device-lost", log.Fields{"parentId": deviceID})
-	ctx := context.Background()
+func (oo *OpenOLT) Child_device_lost(ctx context.Context, deviceID string, pPortNo uint32, onuID uint32) error {
+	logger.Infow(ctx, "Child-device-lost", log.Fields{"parentId": deviceID})
 	if handler := oo.getDeviceHandler(deviceID); handler != nil {
 		return handler.ChildDeviceLost(ctx, pPortNo, onuID)
 	}
@@ -380,12 +374,12 @@
 }
 
 //Start_omci_test not implemented
-func (oo *OpenOLT) Start_omci_test(device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error) {
+func (oo *OpenOLT) Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error) {
 	return nil, olterrors.ErrNotImplemented
 }
 
 //Get_ext_value retrieves a value on a particular ONU
-func (oo *OpenOLT) Get_ext_value(deviceID string, device *voltha.Device, valueparam voltha.ValueType_Type) (*voltha.ReturnValues, error) {
+func (oo *OpenOLT) Get_ext_value(ctx context.Context, deviceID string, device *voltha.Device, valueparam voltha.ValueType_Type) (*voltha.ReturnValues, error) {
 	var err error
 	resp := new(voltha.ReturnValues)
 	log.Infow("Get_ext_value", log.Fields{"device-id": deviceID, "onu-id": device.Id})
diff --git a/internal/pkg/core/openolt_eventmgr.go b/internal/pkg/core/openolt_eventmgr.go
index a11d3f7..76d1ca7 100644
--- a/internal/pkg/core/openolt_eventmgr.go
+++ b/internal/pkg/core/openolt_eventmgr.go
@@ -18,7 +18,7 @@
 package core
 
 import (
-	ctx "context"
+	"context"
 	"errors"
 	"fmt"
 	"strconv"
@@ -103,60 +103,60 @@
 
 // ProcessEvents is function to process and publish OpenOLT event
 // nolint: gocyclo
-func (em *OpenOltEventMgr) ProcessEvents(alarmInd *oop.AlarmIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) ProcessEvents(ctx context.Context, alarmInd *oop.AlarmIndication, deviceID string, raisedTs int64) error {
 	var err error
 	switch alarmInd.Data.(type) {
 	case *oop.AlarmIndication_LosInd:
-		logger.Debugw("received-los-indication", log.Fields{"alarm-ind": alarmInd})
-		err = em.oltLosIndication(alarmInd.GetLosInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-los-indication", log.Fields{"alarm-ind": alarmInd})
+		err = em.oltLosIndication(ctx, alarmInd.GetLosInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuAlarmInd:
-		logger.Debugw("received-onu-alarm-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuAlarmIndication(alarmInd.GetOnuAlarmInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-alarm-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuAlarmIndication(ctx, alarmInd.GetOnuAlarmInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_DyingGaspInd:
-		logger.Debugw("received-dying-gasp-indication", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuDyingGaspIndication(alarmInd.GetDyingGaspInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-dying-gasp-indication", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuDyingGaspIndication(ctx, alarmInd.GetDyingGaspInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuActivationFailInd:
-		logger.Debugw("received-onu-activation-fail-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuActivationFailIndication(alarmInd.GetOnuActivationFailInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-activation-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuActivationFailIndication(ctx, alarmInd.GetOnuActivationFailInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuLossOmciInd:
-		logger.Debugw("received-onu-loss-omci-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuLossOmciIndication(alarmInd.GetOnuLossOmciInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-loss-omci-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuLossOmciIndication(ctx, alarmInd.GetOnuLossOmciInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuDriftOfWindowInd:
-		logger.Debugw("received-onu-drift-of-window-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuDriftOfWindowIndication(alarmInd.GetOnuDriftOfWindowInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-drift-of-window-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuDriftOfWindowIndication(ctx, alarmInd.GetOnuDriftOfWindowInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuSignalDegradeInd:
-		logger.Debugw("received-onu-signal-degrade-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuSignalDegradeIndication(alarmInd.GetOnuSignalDegradeInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-signal-degrade-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuSignalDegradeIndication(ctx, alarmInd.GetOnuSignalDegradeInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuSignalsFailInd:
-		logger.Debugw("received-onu-signal-fail-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuSignalsFailIndication(alarmInd.GetOnuSignalsFailInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-signal-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuSignalsFailIndication(ctx, alarmInd.GetOnuSignalsFailInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuStartupFailInd:
-		logger.Debugw("received-onu-startup-fail-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuStartupFailedIndication(alarmInd.GetOnuStartupFailInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-startup-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuStartupFailedIndication(ctx, alarmInd.GetOnuStartupFailInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuTiwiInd:
-		logger.Debugw("received-onu-transmission-warning-indication ", log.Fields{"alarm-ind": alarmInd})
-		logger.Warnw("not-implemented-yet", log.Fields{"alarm-ind": "Onu-Transmission-indication"})
+		logger.Debugw(ctx, "received-onu-transmission-warning-indication ", log.Fields{"alarm-ind": alarmInd})
+		logger.Warnw(ctx, "not-implemented-yet", log.Fields{"alarm-ind": "Onu-Transmission-indication"})
 	case *oop.AlarmIndication_OnuLossOfSyncFailInd:
-		logger.Debugw("received-onu-loss-of-sync-fail-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuLossOfSyncIndication(alarmInd.GetOnuLossOfSyncFailInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-loss-of-sync-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuLossOfSyncIndication(ctx, alarmInd.GetOnuLossOfSyncFailInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuItuPonStatsInd:
-		logger.Debugw("received-onu-itu-pon-stats-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuItuPonStatsIndication(alarmInd.GetOnuItuPonStatsInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-itu-pon-stats-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuItuPonStatsIndication(ctx, alarmInd.GetOnuItuPonStatsInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuDeactivationFailureInd:
-		logger.Debugw("received-onu-deactivation-failure-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuDeactivationFailureIndication(alarmInd.GetOnuDeactivationFailureInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-deactivation-failure-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuDeactivationFailureIndication(ctx, alarmInd.GetOnuDeactivationFailureInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuLossGemDelineationInd:
-		logger.Debugw("received-onu-loss-of-gem-channel-delineation-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuLossOfGEMChannelDelineationIndication(alarmInd.GetOnuLossGemDelineationInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-loss-of-gem-channel-delineation-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuLossOfGEMChannelDelineationIndication(ctx, alarmInd.GetOnuLossGemDelineationInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuPhysicalEquipmentErrorInd:
-		logger.Debugw("received-onu-physical-equipment-error-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuPhysicalEquipmentErrorIndication(alarmInd.GetOnuPhysicalEquipmentErrorInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-physical-equipment-error-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuPhysicalEquipmentErrorIndication(ctx, alarmInd.GetOnuPhysicalEquipmentErrorInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuLossOfAckInd:
-		logger.Debugw("received-onu-loss-of-acknowledgement-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuLossOfAcknowledgementIndication(alarmInd.GetOnuLossOfAckInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-loss-of-acknowledgement-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuLossOfAcknowledgementIndication(ctx, alarmInd.GetOnuLossOfAckInd(), deviceID, raisedTs)
 	case *oop.AlarmIndication_OnuDiffReachExceededInd:
-		logger.Debugw("received-onu-differential-reach-exceeded-indication ", log.Fields{"alarm-ind": alarmInd})
-		err = em.onuDifferentialReachExceededIndication(alarmInd.GetOnuDiffReachExceededInd(), deviceID, raisedTs)
+		logger.Debugw(ctx, "received-onu-differential-reach-exceeded-indication ", log.Fields{"alarm-ind": alarmInd})
+		err = em.onuDifferentialReachExceededIndication(ctx, alarmInd.GetOnuDiffReachExceededInd(), deviceID, raisedTs)
 	default:
 		err = olterrors.NewErrInvalidValue(log.Fields{"indication-type": alarmInd}, nil)
 	}
@@ -167,7 +167,7 @@
 }
 
 // oltUpDownIndication handles Up and Down state of an OLT
-func (em *OpenOltEventMgr) oltUpDownIndication(oltIndication *oop.OltIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) oltUpDownIndication(ctx context.Context, oltIndication *oop.OltIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -181,15 +181,15 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", oltIndicationDown, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, olt, raisedTs); err != nil {
 		return olterrors.NewErrCommunication("send-olt-event", log.Fields{"device-id": deviceID}, err)
 	}
-	logger.Debugw("olt-updown-event-sent-to-kafka", log.Fields{})
+	logger.Debugw(ctx, "olt-updown-event-sent-to-kafka", log.Fields{})
 	return nil
 }
 
 // OnuDiscoveryIndication is an exported method to handle ONU discovery event
-func (em *OpenOltEventMgr) OnuDiscoveryIndication(onuDisc *oop.OnuDiscIndication, oltDeviceID string, onuDeviceID string, OnuID uint32, serialNumber string, raisedTs int64) error {
+func (em *OpenOltEventMgr) OnuDiscoveryIndication(ctx context.Context, onuDisc *oop.OnuDiscIndication, oltDeviceID string, onuDeviceID string, OnuID uint32, serialNumber string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -202,20 +202,20 @@
 	de.ResourceId = oltDeviceID
 	de.DeviceEventName = fmt.Sprintf("%s_%s", onuDiscoveryEvent, "RAISE_EVENT")
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, equipment, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, equipment, pon, raisedTs); err != nil {
 		return olterrors.NewErrCommunication("send-onu-discovery-event",
 			log.Fields{
 				"serial-number": serialNumber,
 				"intf-id":       onuDisc.IntfId}, err)
 	}
-	logger.Debugw("onu-discovery-event-sent-to-kafka",
+	logger.Debugw(ctx, "onu-discovery-event-sent-to-kafka",
 		log.Fields{
 			"serial-number": serialNumber,
 			"intf-id":       onuDisc.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) oltLosIndication(oltLos *oop.LosIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) oltLosIndication(ctx context.Context, oltLos *oop.LosIndication, deviceID string, raisedTs int64) error {
 	var err error = nil
 	var de voltha.DeviceEvent
 	var alarmInd oop.OnuAlarmIndication
@@ -239,7 +239,7 @@
 				alarmInd.IntfId = ponIntdID
 				alarmInd.OnuId = onuInCache.(*OnuDevice).onuID
 				alarmInd.LosStatus = statusCheckOn
-				err = em.onuAlarmIndication(&alarmInd, deviceID, raisedTs)
+				err = em.onuAlarmIndication(ctx, &alarmInd, deviceID, raisedTs)
 			}
 			return true
 		})
@@ -251,14 +251,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", oltLosEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, olt, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("olt-los-event-sent-to-kafka", log.Fields{"intf-id": oltLos.IntfId})
+	logger.Debugw(ctx, "olt-los-event-sent-to-kafka", log.Fields{"intf-id": oltLos.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuDyingGaspIndication(dgi *oop.DyingGaspIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDyingGaspIndication(ctx context.Context, dgi *oop.DyingGaspIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	var serialNumber string
 	context := make(map[string]string)
@@ -276,22 +276,22 @@
 	de.ResourceId = deviceID
 	de.DeviceEventName = fmt.Sprintf("%s_%s", onuDyingGaspEvent, "EVENT")
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-dying-gasp-event-sent-to-kafka", log.Fields{"intf-id": dgi.IntfId})
+	logger.Debugw(ctx, "onu-dying-gasp-event-sent-to-kafka", log.Fields{"intf-id": dgi.IntfId})
 	return nil
 }
 
 //wasLosRaised checks whether los raised already. If already raised returns true else false
-func (em *OpenOltEventMgr) wasLosRaised(onuAlarm *oop.OnuAlarmIndication) bool {
+func (em *OpenOltEventMgr) wasLosRaised(ctx context.Context, onuAlarm *oop.OnuAlarmIndication) bool {
 	onuKey := em.handler.formOnuKey(onuAlarm.IntfId, onuAlarm.OnuId)
 	if onuInCache, ok := em.handler.onus.Load(onuKey); ok {
-		logger.Debugw("onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
+		logger.Debugw(ctx, "onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
 
 		if onuAlarm.LosStatus == statusCheckOn {
 			if onuInCache.(*OnuDevice).losRaised {
-				logger.Warnw("onu-los-raised-already", log.Fields{"onu_id": onuAlarm.OnuId,
+				logger.Warnw(ctx, "onu-los-raised-already", log.Fields{"onu_id": onuAlarm.OnuId,
 					"intf_id": onuAlarm.IntfId, "LosStatus": onuAlarm.LosStatus})
 				return true
 			}
@@ -302,14 +302,14 @@
 }
 
 //wasLosCleared checks whether los cleared already. If already cleared returns true else false
-func (em *OpenOltEventMgr) wasLosCleared(onuAlarm *oop.OnuAlarmIndication) bool {
+func (em *OpenOltEventMgr) wasLosCleared(ctx context.Context, onuAlarm *oop.OnuAlarmIndication) bool {
 	onuKey := em.handler.formOnuKey(onuAlarm.IntfId, onuAlarm.OnuId)
 	if onuInCache, ok := em.handler.onus.Load(onuKey); ok {
-		logger.Debugw("onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
+		logger.Debugw(ctx, "onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
 
 		if onuAlarm.LosStatus == statusCheckOff {
 			if !onuInCache.(*OnuDevice).losRaised {
-				logger.Warnw("onu-los-cleared-already", log.Fields{"onu_id": onuAlarm.OnuId,
+				logger.Warnw(ctx, "onu-los-cleared-already", log.Fields{"onu_id": onuAlarm.OnuId,
 					"intf_id": onuAlarm.IntfId, "LosStatus": onuAlarm.LosStatus})
 				return true
 			}
@@ -349,7 +349,7 @@
 	return deviceEventName
 }
 
-func (em *OpenOltEventMgr) onuAlarmIndication(onuAlarm *oop.OnuAlarmIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuAlarmIndication(ctx context.Context, onuAlarm *oop.OnuAlarmIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	var serialNumber string
 	context := make(map[string]string)
@@ -370,7 +370,7 @@
 
 	switch onuAlarm.LosStatus {
 	case statusCheckOn:
-		if em.wasLosRaised(onuAlarm) {
+		if em.wasLosRaised(ctx, onuAlarm) {
 			/* No need to raise Onu Los Event as it might have already raised
 			   or Onu might have deleted */
 			return nil
@@ -383,7 +383,7 @@
 				onuInCache.(*OnuDevice).proxyDeviceID, true))
 		}
 	case statusCheckOff:
-		if em.wasLosCleared(onuAlarm) {
+		if em.wasLosCleared(ctx, onuAlarm) {
 			/* No need to clear Onu Los Event as it might have already cleared
 			   or Onu might have deleted */
 			return nil
@@ -398,14 +398,14 @@
 	}
 
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-los-event-sent-to-kafka", log.Fields{"onu-id": onuAlarm.OnuId, "intf-id": onuAlarm.IntfId})
+	logger.Debugw(ctx, "onu-los-event-sent-to-kafka", log.Fields{"onu-id": onuAlarm.OnuId, "intf-id": onuAlarm.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuActivationFailIndication(oaf *oop.OnuActivationFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuActivationFailIndication(ctx context.Context, oaf *oop.OnuActivationFailureIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -417,14 +417,14 @@
 	de.ResourceId = deviceID
 	de.DeviceEventName = fmt.Sprintf("%s_%s", onuActivationFailEvent, "RAISE_EVENT")
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, equipment, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, equipment, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-activation-failure-event-sent-to-kafka", log.Fields{"onu-id": oaf.OnuId, "intf-id": oaf.IntfId})
+	logger.Debugw(ctx, "onu-activation-failure-event-sent-to-kafka", log.Fields{"onu-id": oaf.OnuId, "intf-id": oaf.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuLossOmciIndication(onuLossOmci *oop.OnuLossOfOmciChannelIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOmciIndication(ctx context.Context, onuLossOmci *oop.OnuLossOfOmciChannelIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -439,14 +439,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOmciEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-loss-of-omci-channel-event-sent-to-kafka", log.Fields{"onu-id": onuLossOmci.OnuId, "intf-id": onuLossOmci.IntfId})
+	logger.Debugw(ctx, "onu-loss-of-omci-channel-event-sent-to-kafka", log.Fields{"onu-id": onuLossOmci.OnuId, "intf-id": onuLossOmci.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuDriftOfWindowIndication(onuDriftWindow *oop.OnuDriftOfWindowIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDriftOfWindowIndication(ctx context.Context, onuDriftWindow *oop.OnuDriftOfWindowIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -463,14 +463,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuDriftOfWindowEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-drift-of-window-event-sent-to-kafka", log.Fields{"onu-id": onuDriftWindow.OnuId, "intf-id": onuDriftWindow.IntfId})
+	logger.Debugw(ctx, "onu-drift-of-window-event-sent-to-kafka", log.Fields{"onu-id": onuDriftWindow.OnuId, "intf-id": onuDriftWindow.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuSignalDegradeIndication(onuSignalDegrade *oop.OnuSignalDegradeIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuSignalDegradeIndication(ctx context.Context, onuSignalDegrade *oop.OnuSignalDegradeIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -486,14 +486,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalDegradeEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-signal-degrade-event-sent-to-kafka", log.Fields{"onu-id": onuSignalDegrade.OnuId, "intf-id": onuSignalDegrade.IntfId})
+	logger.Debugw(ctx, "onu-signal-degrade-event-sent-to-kafka", log.Fields{"onu-id": onuSignalDegrade.OnuId, "intf-id": onuSignalDegrade.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuSignalsFailIndication(onuSignalsFail *oop.OnuSignalsFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuSignalsFailIndication(ctx context.Context, onuSignalsFail *oop.OnuSignalsFailureIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -509,14 +509,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalsFailEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-signals-fail-event-sent-to-kafka", log.Fields{"onu-id": onuSignalsFail.OnuId, "intf-id": onuSignalsFail.IntfId})
+	logger.Debugw(ctx, "onu-signals-fail-event-sent-to-kafka", log.Fields{"onu-id": onuSignalsFail.OnuId, "intf-id": onuSignalsFail.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuStartupFailedIndication(onuStartupFail *oop.OnuStartupFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuStartupFailedIndication(ctx context.Context, onuStartupFail *oop.OnuStartupFailureIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -532,14 +532,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuStartupFailEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-startup-fail-event-sent-to-kafka", log.Fields{"onu-id": onuStartupFail.OnuId, "intf-id": onuStartupFail.IntfId})
+	logger.Debugw(ctx, "onu-startup-fail-event-sent-to-kafka", log.Fields{"onu-id": onuStartupFail.OnuId, "intf-id": onuStartupFail.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuLossOfSyncIndication(onuLOKI *oop.OnuLossOfKeySyncFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOfSyncIndication(ctx context.Context, onuLOKI *oop.OnuLossOfKeySyncFailureIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -555,27 +555,27 @@
 	}
 
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, security, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, security, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-loss-of-key-sync-event-sent-to-kafka", log.Fields{"onu-id": onuLOKI.OnuId, "intf-id": onuLOKI.IntfId})
+	logger.Debugw(ctx, "onu-loss-of-key-sync-event-sent-to-kafka", log.Fields{"onu-id": onuLOKI.OnuId, "intf-id": onuLOKI.IntfId})
 	return nil
 }
 
 // oltIntfOperIndication handles Up and Down state of an OLT PON ports
-func (em *OpenOltEventMgr) oltIntfOperIndication(ifindication *oop.IntfOperIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) oltIntfOperIndication(ctx context.Context, ifindication *oop.IntfOperIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
-	context := make(map[string]string)
 	portID := IntfIDToPortNo(ifindication.IntfId, voltha.Port_PON_OLT)
-	device, err := em.handler.coreProxy.GetDevice(ctx.Background(), deviceID, deviceID)
+	device, err := em.handler.coreProxy.GetDevice(context.Background(), deviceID, deviceID)
 	if err != nil {
 		return olterrors.NewErrAdapter("error-while-fetching-device-object", log.Fields{"DeviceId": deviceID}, err)
 	}
+	context := make(map[string]string)
 	for _, port := range device.Ports {
 		if port.PortNo == portID {
 			// Events are suppressed if the Port Adminstate is not enabled.
 			if port.AdminState != common.AdminState_ENABLED {
-				logger.Debugw("port-disable/enable-event-not-generated--the-port-is-not-enabled-by-operator", log.Fields{"deviceId": deviceID, "port": port})
+				logger.Debugw(ctx, "port-disable/enable-event-not-generated--the-port-is-not-enabled-by-operator", log.Fields{"deviceId": deviceID, "port": port})
 				return nil
 			}
 			break
@@ -593,14 +593,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", ponIntfDownIndiction, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, olt, raisedTs); err != nil {
 		return olterrors.NewErrCommunication("send-olt-intf-oper-status-event", log.Fields{"device-id": deviceID, "intf-id": ifindication.IntfId, "oper-state": ifindication.OperState}, err).Log()
 	}
-	logger.Debug("sent-olt-intf-oper-status-event-to-kafka")
+	logger.Debug(ctx, "sent-olt-intf-oper-status-event-to-kafka")
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuDeactivationFailureIndication(onuDFI *oop.OnuDeactivationFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDeactivationFailureIndication(ctx context.Context, onuDFI *oop.OnuDeactivationFailureIndication, deviceID string, raisedTs int64) error {
 	var de voltha.DeviceEvent
 	context := make(map[string]string)
 	/* Populating event context */
@@ -615,13 +615,13 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuDeactivationFailureEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(&de, equipment, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, &de, equipment, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-deactivation-failure-event-sent-to-kafka", log.Fields{"onu-id": onuDFI.OnuId, "intf-id": onuDFI.IntfId})
+	logger.Debugw(ctx, "onu-deactivation-failure-event-sent-to-kafka", log.Fields{"onu-id": onuDFI.OnuId, "intf-id": onuDFI.IntfId})
 	return nil
 }
-func (em *OpenOltEventMgr) onuRemoteDefectIndication(onuID uint32, intfID uint32, rdiCount uint64, status string, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuRemoteDefectIndication(ctx context.Context, onuID uint32, intfID uint32, rdiCount uint64, status string, deviceID string, raisedTs int64) error {
 	/* Populating event context */
 	context := map[string]string{
 		"onu-id":    strconv.FormatUint(uint64(onuID), base10),
@@ -639,29 +639,29 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuRemoteDefectIndication, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-remote-defect-event-sent-to-kafka", log.Fields{"onu-id": onuID, "intf-id": intfID})
+	logger.Debugw(ctx, "onu-remote-defect-event-sent-to-kafka", log.Fields{"onu-id": onuID, "intf-id": intfID})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuItuPonStatsIndication(onuIPS *oop.OnuItuPonStatsIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuItuPonStatsIndication(ctx context.Context, onuIPS *oop.OnuItuPonStatsIndication, deviceID string, raisedTs int64) error {
 	onuDevice, found := em.handler.onus.Load(em.handler.formOnuKey(onuIPS.IntfId, onuIPS.OnuId))
 	if !found {
 		return errors.New("unknown-onu-device")
 	}
 	if onuIPS.GetRdiErrorInd().Status == statusCheckOn {
 		if !onuDevice.(*OnuDevice).rdiRaised {
-			if err := em.onuRemoteDefectIndication(onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOn, deviceID, raisedTs); err != nil {
+			if err := em.onuRemoteDefectIndication(ctx, onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOn, deviceID, raisedTs); err != nil {
 				return err
 			}
 			onuDevice.(*OnuDevice).rdiRaised = true
 			return nil
 		}
-		logger.Debugw("onu-remote-defect-already-raised", log.Fields{"onu-id": onuIPS.OnuId, "intf-id": onuIPS.IntfId})
+		logger.Debugw(ctx, "onu-remote-defect-already-raised", log.Fields{"onu-id": onuIPS.OnuId, "intf-id": onuIPS.IntfId})
 	} else {
-		if err := em.onuRemoteDefectIndication(onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOff, deviceID, raisedTs); err != nil {
+		if err := em.onuRemoteDefectIndication(ctx, onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOff, deviceID, raisedTs); err != nil {
 			return err
 		}
 		onuDevice.(*OnuDevice).rdiRaised = false
@@ -669,7 +669,7 @@
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuLossOfGEMChannelDelineationIndication(onuGCD *oop.OnuLossOfGEMChannelDelineationIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOfGEMChannelDelineationIndication(ctx context.Context, onuGCD *oop.OnuLossOfGEMChannelDelineationIndication, deviceID string, raisedTs int64) error {
 	/* Populating event context */
 	context := map[string]string{
 		"onu-id":             strconv.FormatUint(uint64(onuGCD.OnuId), base10),
@@ -687,14 +687,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfGEMChannelDelineationEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(de, communication, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, de, communication, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-loss-of-gem-channel-delineation-event-sent-to-kafka", log.Fields{"onu-id": onuGCD.OnuId, "intf-id": onuGCD.IntfId})
+	logger.Debugw(ctx, "onu-loss-of-gem-channel-delineation-event-sent-to-kafka", log.Fields{"onu-id": onuGCD.OnuId, "intf-id": onuGCD.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuPhysicalEquipmentErrorIndication(onuErr *oop.OnuPhysicalEquipmentErrorIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuPhysicalEquipmentErrorIndication(ctx context.Context, onuErr *oop.OnuPhysicalEquipmentErrorIndication, deviceID string, raisedTs int64) error {
 	/* Populating event context */
 	context := map[string]string{
 		"onu-id":  strconv.FormatUint(uint64(onuErr.OnuId), base10),
@@ -711,14 +711,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuPhysicalEquipmentErrorEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuErr.OnuId, "intf-id": onuErr.IntfId})
+	logger.Debugw(ctx, "onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuErr.OnuId, "intf-id": onuErr.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuLossOfAcknowledgementIndication(onuLOA *oop.OnuLossOfAcknowledgementIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOfAcknowledgementIndication(ctx context.Context, onuLOA *oop.OnuLossOfAcknowledgementIndication, deviceID string, raisedTs int64) error {
 	/* Populating event context */
 	context := map[string]string{
 		"onu-id":  strconv.FormatUint(uint64(onuLOA.OnuId), base10),
@@ -735,14 +735,14 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfAcknowledgementEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
 		return err
 	}
-	logger.Debugw("onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuLOA.OnuId, "intf-id": onuLOA.IntfId})
+	logger.Debugw(ctx, "onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuLOA.OnuId, "intf-id": onuLOA.IntfId})
 	return nil
 }
 
-func (em *OpenOltEventMgr) onuDifferentialReachExceededIndication(onuDRE *oop.OnuDifferentialReachExceededIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDifferentialReachExceededIndication(ctx context.Context, onuDRE *oop.OnuDifferentialReachExceededIndication, deviceID string, raisedTs int64) error {
 	/* Populating event context */
 	context := map[string]string{
 		"onu-id":                strconv.FormatUint(uint64(onuDRE.OnuId), base10),
@@ -760,7 +760,7 @@
 		de.DeviceEventName = fmt.Sprintf("%s_%s", onuDifferentialReachExceededEvent, "CLEAR_EVENT")
 	}
 	/* Send event to KAFKA */
-	if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+	if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
 		return err
 	}
 	log.Debugw("onu-differential-reach-exceeded–event-sent-to-kafka", log.Fields{"onu-id": onuDRE.OnuId, "intf-id": onuDRE.IntfId})
diff --git a/internal/pkg/core/openolt_eventmgr_test.go b/internal/pkg/core/openolt_eventmgr_test.go
index ebbf976..fbc3539 100644
--- a/internal/pkg/core/openolt_eventmgr_test.go
+++ b/internal/pkg/core/openolt_eventmgr_test.go
@@ -18,12 +18,12 @@
 package core
 
 import (
+	"context"
+	"github.com/opencord/voltha-openolt-adapter/pkg/mocks"
+	oop "github.com/opencord/voltha-protos/v3/go/openolt"
 	"sync"
 	"testing"
 	"time"
-
-	"github.com/opencord/voltha-openolt-adapter/pkg/mocks"
-	oop "github.com/opencord/voltha-protos/v3/go/openolt"
 )
 
 func mockEventMgr() *OpenOltEventMgr {
@@ -132,7 +132,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			em.ProcessEvents(tt.args.alarmInd, tt.args.deviceID, tt.args.raisedTs)
+			em.ProcessEvents(context.Background(), tt.args.alarmInd, tt.args.deviceID, tt.args.raisedTs)
 		})
 	}
 }
@@ -158,7 +158,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			em.OnuDiscoveryIndication(tt.args.onuDisc, tt.args.oltDeviceID, tt.args.onuDeviceID, tt.args.OnuID, tt.args.serialNumber, tt.args.raisedTs)
+			em.OnuDiscoveryIndication(context.Background(), tt.args.onuDisc, tt.args.oltDeviceID, tt.args.onuDeviceID, tt.args.OnuID, tt.args.serialNumber, tt.args.raisedTs)
 		})
 	}
 }
diff --git a/internal/pkg/core/openolt_flowmgr.go b/internal/pkg/core/openolt_flowmgr.go
index d14194e..b31c312 100644
--- a/internal/pkg/core/openolt_flowmgr.go
+++ b/internal/pkg/core/openolt_flowmgr.go
@@ -228,7 +228,7 @@
 
 //NewFlowManager creates OpenOltFlowMgr object and initializes the parameters
 func NewFlowManager(ctx context.Context, dh *DeviceHandler, rMgr *rsrcMgr.OpenOltResourceMgr) *OpenOltFlowMgr {
-	logger.Infow("initializing-flow-manager", log.Fields{"device-id": dh.device.Id})
+	logger.Infow(ctx, "initializing-flow-manager", log.Fields{"device-id": dh.device.Id})
 	var flowMgr OpenOltFlowMgr
 	var err error
 	var idx uint32
@@ -236,8 +236,8 @@
 	flowMgr.deviceHandler = dh
 	flowMgr.resourceMgr = rMgr
 	flowMgr.techprofile = make(map[uint32]tp.TechProfileIf)
-	if err = flowMgr.populateTechProfilePerPonPort(); err != nil {
-		logger.Errorw("error-while-populating-tech-profile-mgr", log.Fields{"error": err})
+	if err = flowMgr.populateTechProfilePerPonPort(ctx); err != nil {
+		logger.Errorw(ctx, "error-while-populating-tech-profile-mgr", log.Fields{"error": err})
 		return nil
 	}
 	flowMgr.onuIdsLock = sync.RWMutex{}
@@ -248,7 +248,7 @@
 	//Load the onugem info cache from kv store on flowmanager start
 	for idx = 0; idx < ponPorts; idx++ {
 		if flowMgr.onuGemInfo[idx], err = rMgr.GetOnuGemInfo(ctx, idx); err != nil {
-			logger.Error("failed-to-load-onu-gem-info-cache")
+			logger.Error(ctx, "failed-to-load-onu-gem-info-cache")
 		}
 		//Load flowID list per gem map per interface from the kvstore.
 		flowMgr.loadFlowIDlistForGem(ctx, idx)
@@ -259,19 +259,19 @@
 	flowMgr.interfaceToMcastQueueMap = make(map[uint32]*queueInfoBrief)
 	//load interface to multicast queue map from kv store
 	flowMgr.loadInterfaceToMulticastQueueMap(ctx)
-	logger.Info("initialization-of-flow-manager-success")
+	logger.Info(ctx, "initialization-of-flow-manager-success")
 	return &flowMgr
 }
 
-func (f *OpenOltFlowMgr) generateStoredFlowID(flowID uint32, direction string) (uint64, error) {
+func (f *OpenOltFlowMgr) generateStoredFlowID(ctx context.Context, flowID uint32, direction string) (uint64, error) {
 	if direction == Upstream {
-		logger.Debugw("upstream-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Debugw(ctx, "upstream-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
 		return 0x1<<15 | uint64(flowID), nil
 	} else if direction == Downstream {
-		logger.Debugw("downstream-flow-not-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Debugw(ctx, "downstream-flow-not-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
 		return uint64(flowID), nil
 	} else if direction == Multicast {
-		logger.Debugw("multicast-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Debugw(ctx, "multicast-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
 		return 0x2<<15 | uint64(flowID), nil
 	} else {
 		return 0, olterrors.NewErrInvalidValue(log.Fields{"direction": direction}, nil).Log()
@@ -279,7 +279,7 @@
 }
 
 func (f *OpenOltFlowMgr) registerFlow(ctx context.Context, flowFromCore *ofp.OfpFlowStats, deviceFlow *openoltpb2.Flow) {
-	logger.Debugw("registering-flow-for-device ",
+	logger.Debugw(ctx, "registering-flow-for-device ",
 		log.Fields{
 			"flow":      flowFromCore,
 			"device-id": f.deviceHandler.device.Id})
@@ -301,7 +301,7 @@
 	var gemPorts []uint32
 	var TpInst interface{}
 
-	logger.Infow("dividing-flow", log.Fields{
+	logger.Infow(ctx, "dividing-flow", log.Fields{
 		"device-id":  f.deviceHandler.device.Id,
 		"intf-id":    intfID,
 		"onu-id":     onuID,
@@ -316,7 +316,7 @@
 	// is because the flow is an NNI flow and there would be no onu resources associated with it
 	// TODO: properly deal with NNI flows
 	if onuID <= 0 {
-		logger.Errorw("no-onu-id-for-flow",
+		logger.Errorw(ctx, "no-onu-id-for-flow",
 			log.Fields{
 				"port-no":   portNo,
 				"classifer": classifierInfo,
@@ -326,13 +326,13 @@
 	}
 
 	uni := getUniPortPath(f.deviceHandler.device.Id, intfID, int32(onuID), int32(uniID))
-	logger.Debugw("uni-port-path", log.Fields{
+	logger.Debugw(ctx, "uni-port-path", log.Fields{
 		"uni":       uni,
 		"device-id": f.deviceHandler.device.Id})
 
 	tpLockMapKey := tpLockKey{intfID, onuID, uniID}
 	if f.perUserFlowHandleLock.TryLock(tpLockMapKey) {
-		logger.Debugw("dividing-flow-create-tcont-gem-ports", log.Fields{
+		logger.Debugw(ctx, "dividing-flow-create-tcont-gem-ports", log.Fields{
 			"device-id":  f.deviceHandler.device.Id,
 			"intf-id":    intfID,
 			"onu-id":     onuID,
@@ -345,7 +345,7 @@
 			"tp-id":      TpID})
 		allocID, gemPorts, TpInst = f.createTcontGemports(ctx, intfID, onuID, uniID, uni, portNo, TpID, UsMeterID, DsMeterID, flowMetadata)
 		if allocID == 0 || gemPorts == nil || TpInst == nil {
-			logger.Error("alloc-id-gem-ports-tp-unavailable")
+			logger.Error(ctx, "alloc-id-gem-ports-tp-unavailable")
 			f.perUserFlowHandleLock.Unlock(tpLockMapKey)
 			return
 		}
@@ -362,7 +362,7 @@
 		f.checkAndAddFlow(ctx, args, classifierInfo, actionInfo, flow, TpInst, gemPorts, TpID, uni)
 		f.perUserFlowHandleLock.Unlock(tpLockMapKey)
 	} else {
-		logger.Errorw("failed-to-acquire-per-user-flow-handle-lock",
+		logger.Errorw(ctx, "failed-to-acquire-per-user-flow-handle-lock",
 			log.Fields{
 				"intf-id":     intfID,
 				"onu-id":      onuID,
@@ -377,7 +377,7 @@
 // CreateSchedulerQueues creates traffic schedulers on the device with the given scheduler configuration and traffic shaping info
 func (f *OpenOltFlowMgr) CreateSchedulerQueues(ctx context.Context, sq schedQueue) error {
 
-	logger.Debugw("CreateSchedulerQueues",
+	logger.Debugw(ctx, "CreateSchedulerQueues",
 		log.Fields{"dir": sq.direction,
 			"intf-id":      sq.intfID,
 			"onu-id":       sq.onuID,
@@ -410,7 +410,7 @@
 
 	if KvStoreMeter != nil {
 		if KvStoreMeter.MeterId == sq.meterID {
-			logger.Debugw("scheduler-already-created-for-upstream", log.Fields{"device-id": f.deviceHandler.device.Id})
+			logger.Debugw(ctx, "scheduler-already-created-for-upstream", log.Fields{"device-id": f.deviceHandler.device.Id})
 			return nil
 		}
 		return olterrors.NewErrInvalidValue(log.Fields{
@@ -420,16 +420,16 @@
 			"device-id":         f.deviceHandler.device.Id}, nil)
 	}
 
-	logger.Debugw("meter-does-not-exist-creating-new",
+	logger.Debugw(ctx, "meter-does-not-exist-creating-new",
 		log.Fields{
 			"meter-id":  sq.meterID,
 			"direction": Direction,
 			"device-id": f.deviceHandler.device.Id})
 
 	if sq.direction == tp_pb.Direction_UPSTREAM {
-		SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(sq.tpInst.(*tp.TechProfile))
+		SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
 	} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
-		SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(sq.tpInst.(*tp.TechProfile))
+		SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
 	}
 
 	if err != nil {
@@ -446,14 +446,14 @@
 		for _, meter := range sq.flowMetadata.Meters {
 			if sq.meterID == meter.MeterId {
 				meterConfig = meter
-				logger.Debugw("found-meter-config-from-flowmetadata",
+				logger.Debugw(ctx, "found-meter-config-from-flowmetadata",
 					log.Fields{"meterConfig": meterConfig,
 						"device-id": f.deviceHandler.device.Id})
 				break
 			}
 		}
 	} else {
-		logger.Errorw("flow-metadata-not-present-in-flow", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Errorw(ctx, "flow-metadata-not-present-in-flow", log.Fields{"device-id": f.deviceHandler.device.Id})
 	}
 	if meterConfig == nil {
 		return olterrors.NewErrNotFound("meterbands", log.Fields{
@@ -462,7 +462,7 @@
 			"meter-id":      sq.meterID,
 			"device-id":     f.deviceHandler.device.Id}, nil)
 	} else if len(meterConfig.Bands) < MaxMeterBand {
-		logger.Errorw("invalid-number-of-bands-in-meter",
+		logger.Errorw(ctx, "invalid-number-of-bands-in-meter",
 			log.Fields{"Bands": meterConfig.Bands,
 				"meter-id":  sq.meterID,
 				"device-id": f.deviceHandler.device.Id})
@@ -500,7 +500,7 @@
 				"meter-id":  sq.meterID,
 				"device-id": f.deviceHandler.device.Id}, err)
 	}
-	logger.Infow("updated-meter-info-into-kv-store-successfully",
+	logger.Infow(ctx, "updated-meter-info-into-kv-store-successfully",
 		log.Fields{"direction": Direction,
 			"Meter":     meterConfig,
 			"device-id": f.deviceHandler.device.Id})
@@ -508,8 +508,7 @@
 }
 
 func (f *OpenOltFlowMgr) pushSchedulerQueuesToDevice(ctx context.Context, sq schedQueue, TrafficShaping *tp_pb.TrafficShapingInfo, TrafficSched []*tp_pb.TrafficScheduler) error {
-
-	trafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(sq.tpInst.(*tp.TechProfile), sq.direction)
+	trafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile), sq.direction)
 
 	if err != nil {
 		return olterrors.NewErrAdapter("unable-to-construct-traffic-queue-configuration",
@@ -518,7 +517,7 @@
 				"device-id": f.deviceHandler.device.Id}, err)
 	}
 
-	logger.Debugw("sending-traffic-scheduler-create-to-device",
+	logger.Debugw(ctx, "sending-traffic-scheduler-create-to-device",
 		log.Fields{
 			"direction":     sq.direction,
 			"TrafficScheds": TrafficSched,
@@ -529,14 +528,14 @@
 		TrafficScheds: TrafficSched}); err != nil {
 		return olterrors.NewErrAdapter("failed-to-create-traffic-schedulers-in-device", log.Fields{"TrafficScheds": TrafficSched}, err)
 	}
-	logger.Infow("successfully-created-traffic-schedulers", log.Fields{
+	logger.Infow(ctx, "successfully-created-traffic-schedulers", log.Fields{
 		"direction":      sq.direction,
 		"traffic-queues": trafficQueues,
 		"device-id":      f.deviceHandler.device.Id})
 
 	// On receiving the CreateTrafficQueues request, the driver should create corresponding
 	// downstream queues.
-	logger.Debugw("sending-traffic-queues-create-to-device",
+	logger.Debugw(ctx, "sending-traffic-queues-create-to-device",
 		log.Fields{"direction": sq.direction,
 			"traffic-queues": trafficQueues,
 			"device-id":      f.deviceHandler.device.Id})
@@ -547,19 +546,19 @@
 			TechProfileId: TrafficSched[0].TechProfileId}); err != nil {
 		return olterrors.NewErrAdapter("failed-to-create-traffic-queues-in-device", log.Fields{"traffic-queues": trafficQueues}, err)
 	}
-	logger.Infow("successfully-created-traffic-schedulers", log.Fields{
+	logger.Infow(ctx, "successfully-created-traffic-schedulers", log.Fields{
 		"direction":      sq.direction,
 		"traffic-queues": trafficQueues,
 		"device-id":      f.deviceHandler.device.Id})
 
 	if sq.direction == tp_pb.Direction_DOWNSTREAM {
-		multicastTrafficQueues := f.techprofile[sq.intfID].GetMulticastTrafficQueues(sq.tpInst.(*tp.TechProfile))
+		multicastTrafficQueues := f.techprofile[sq.intfID].GetMulticastTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile))
 		if len(multicastTrafficQueues) > 0 {
 			if _, present := f.interfaceToMcastQueueMap[sq.intfID]; !present {
 				//assumed that there is only one queue per PON for the multicast service
 				//the default queue with multicastQueuePerPonPort.Priority per a pon interface is used for multicast service
 				//just put it in interfaceToMcastQueueMap to use for building group members
-				logger.Debugw("multicast-traffic-queues", log.Fields{"device-id": f.deviceHandler.device.Id})
+				logger.Debugw(ctx, "multicast-traffic-queues", log.Fields{"device-id": f.deviceHandler.device.Id})
 				multicastQueuePerPonPort := multicastTrafficQueues[0]
 				f.interfaceToMcastQueueMap[sq.intfID] = &queueInfoBrief{
 					gemPortID:       multicastQueuePerPonPort.GemportId,
@@ -570,7 +569,7 @@
 					multicastQueuePerPonPort.GemportId,
 					multicastQueuePerPonPort.Priority)
 
-				logger.Infow("multicast-queues-successfully-updated", log.Fields{"device-id": f.deviceHandler.device.Id})
+				logger.Infow(ctx, "multicast-queues-successfully-updated", log.Fields{"device-id": f.deviceHandler.device.Id})
 			}
 		}
 	}
@@ -583,7 +582,7 @@
 	var Direction string
 	var SchedCfg *tp_pb.SchedulerConfig
 	var err error
-	logger.Infow("removing-schedulers-and-queues-in-olt",
+	logger.Infow(ctx, "removing-schedulers-and-queues-in-olt",
 		log.Fields{
 			"direction": sq.direction,
 			"intf-id":   sq.intfID,
@@ -592,10 +591,10 @@
 			"uni-port":  sq.uniPort,
 			"device-id": f.deviceHandler.device.Id})
 	if sq.direction == tp_pb.Direction_UPSTREAM {
-		SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(sq.tpInst.(*tp.TechProfile))
+		SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
 		Direction = "upstream"
 	} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
-		SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(sq.tpInst.(*tp.TechProfile))
+		SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
 		Direction = "downstream"
 	}
 
@@ -615,7 +614,7 @@
 				"device-id": f.deviceHandler.device.Id}, err)
 	}
 	if KVStoreMeter == nil {
-		logger.Warnw("no-meter-installed-yet",
+		logger.Warnw(ctx, "no-meter-installed-yet",
 			log.Fields{
 				"direction": Direction,
 				"intf-id":   sq.intfID,
@@ -636,7 +635,7 @@
 	TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile[sq.intfID].GetTrafficScheduler(sq.tpInst.(*tp.TechProfile), SchedCfg, TrafficShaping)}
 	TrafficSched[0].TechProfileId = sq.tpID
 
-	TrafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(sq.tpInst.(*tp.TechProfile), sq.direction)
+	TrafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile), sq.direction)
 	if err != nil {
 		return olterrors.NewErrAdapter("unable-to-construct-traffic-queue-configuration",
 			log.Fields{
@@ -656,7 +655,7 @@
 				"traffic-queues": TrafficQueues,
 				"device-id":      f.deviceHandler.device.Id}, err)
 	}
-	logger.Infow("removed-traffic-queues-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
+	logger.Infow(ctx, "removed-traffic-queues-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
 	if _, err = f.deviceHandler.Client.RemoveTrafficSchedulers(ctx, &tp_pb.TrafficSchedulers{
 		IntfId: sq.intfID, OnuId: sq.onuID,
 		UniId: sq.uniID, PortNo: sq.uniPort,
@@ -667,7 +666,7 @@
 				"traffic-schedulers": TrafficSched}, err)
 	}
 
-	logger.Infow("removed-traffic-schedulers-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
+	logger.Infow(ctx, "removed-traffic-schedulers-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
 
 	/* After we successfully remove the scheduler configuration on the OLT device,
 	 * delete the meter id on the KV store.
@@ -680,7 +679,7 @@
 				"meter":     KVStoreMeter.MeterId,
 				"device-id": f.deviceHandler.device.Id}, err)
 	}
-	logger.Infow("removed-meter-from-KV-store-successfully",
+	logger.Infow(ctx, "removed-meter-from-KV-store-successfully",
 		log.Fields{
 			"meter-id":  KVStoreMeter.MeterId,
 			"dir":       Direction,
@@ -699,9 +698,9 @@
 	allocIDs = f.resourceMgr.GetCurrentAllocIDsForOnu(ctx, intfID, onuID, uniID)
 	allgemPortIDs = f.resourceMgr.GetCurrentGEMPortIDsForOnu(ctx, intfID, onuID, uniID)
 
-	tpPath := f.getTPpath(intfID, uni, TpID)
+	tpPath := f.getTPpath(ctx, intfID, uni, TpID)
 
-	logger.Debugw("creating-new-tcont-and-gem", log.Fields{
+	logger.Debugw(ctx, "creating-new-tcont-and-gem", log.Fields{
 		"intf-id":   intfID,
 		"onu-id":    onuID,
 		"uni-id":    uniID,
@@ -711,14 +710,14 @@
 	// Check tech profile instance already exists for derived port name
 	techProfileInstance, _ := f.techprofile[intfID].GetTPInstanceFromKVStore(ctx, TpID, tpPath)
 	if techProfileInstance == nil {
-		logger.Infow("tp-instance-not-found--creating-new",
+		logger.Infow(ctx, "tp-instance-not-found--creating-new",
 			log.Fields{
 				"path":      tpPath,
 				"device-id": f.deviceHandler.device.Id})
 		techProfileInstance, err = f.techprofile[intfID].CreateTechProfInstance(ctx, TpID, uni, intfID)
 		if err != nil {
 			// This should not happen, something wrong in KV backend transaction
-			logger.Errorw("tp-instance-create-failed",
+			logger.Errorw(ctx, "tp-instance-create-failed",
 				log.Fields{
 					"error":     err,
 					"tp-id":     TpID,
@@ -727,7 +726,7 @@
 		}
 		f.resourceMgr.UpdateTechProfileIDForOnu(ctx, intfID, onuID, uniID, TpID)
 	} else {
-		logger.Debugw("tech-profile-instance-already-exist-for-given port-name",
+		logger.Debugw(ctx, "tech-profile-instance-already-exist-for-given port-name",
 			log.Fields{
 				"uni":       uni,
 				"device-id": f.deviceHandler.device.Id})
@@ -740,7 +739,7 @@
 			sq := schedQueue{direction: tp_pb.Direction_UPSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
 				uniPort: uniPort, tpInst: techProfileInstance, meterID: UsMeterID, flowMetadata: flowMetadata}
 			if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
-				logger.Errorw("CreateSchedulerQueues-failed-upstream",
+				logger.Errorw(ctx, "CreateSchedulerQueues-failed-upstream",
 					log.Fields{
 						"error":     err,
 						"meter-id":  UsMeterID,
@@ -752,7 +751,7 @@
 			sq := schedQueue{direction: tp_pb.Direction_DOWNSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
 				uniPort: uniPort, tpInst: techProfileInstance, meterID: DsMeterID, flowMetadata: flowMetadata}
 			if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
-				logger.Errorw("CreateSchedulerQueues-failed-downstream",
+				logger.Errorw(ctx, "CreateSchedulerQueues-failed-downstream",
 					log.Fields{
 						"error":     err,
 						"meter-id":  DsMeterID,
@@ -773,7 +772,7 @@
 		for _, gemPortID := range gemPortIDs {
 			allgemPortIDs = appendUnique(allgemPortIDs, gemPortID)
 		}
-		logger.Infow("allocated-tcont-and-gem-ports",
+		logger.Infow(ctx, "allocated-tcont-and-gem-ports",
 			log.Fields{
 				"alloc-ids": allocIDs,
 				"gemports":  allgemPortIDs,
@@ -797,7 +796,7 @@
 		for _, gemPortID := range gemPortIDs {
 			allgemPortIDs = appendUnique(allgemPortIDs, gemPortID)
 		}
-		logger.Infow("allocated-tcont-and-gem-ports",
+		logger.Infow(ctx, "allocated-tcont-and-gem-ports",
 			log.Fields{
 				"alloc-ids": allocIDs,
 				"gemports":  allgemPortIDs,
@@ -806,17 +805,16 @@
 		f.storeTcontsGEMPortsIntoKVStore(ctx, intfID, onuID, uniID, allocIDs, allgemPortIDs)
 		return allocID, gemPortIDs, techProfileInstance
 	default:
-		logger.Errorw("unknown-tech",
+		logger.Errorw(ctx, "unknown-tech",
 			log.Fields{
 				"tpInst": tpInst})
 		return 0, nil, nil
 	}
-
 }
 
 func (f *OpenOltFlowMgr) storeTcontsGEMPortsIntoKVStore(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID []uint32, gemPortIDs []uint32) {
 
-	logger.Debugw("storing-allocated-tconts-and-gem-ports-into-KV-store",
+	logger.Debugw(ctx, "storing-allocated-tconts-and-gem-ports-into-KV-store",
 		log.Fields{
 			"intf-id":     intfID,
 			"onu-id":      onuID,
@@ -826,27 +824,27 @@
 			"device-id":   f.deviceHandler.device.Id})
 	/* Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV store  */
 	if err := f.resourceMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocID); err != nil {
-		logger.Errorw("error-while-uploading-allocid-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Errorw(ctx, "error-while-uploading-allocid-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
 	}
 	if err := f.resourceMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs); err != nil {
-		logger.Errorw("error-while-uploading-gemports-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Errorw(ctx, "error-while-uploading-gemports-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
 	}
 	if err := f.resourceMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, gemPortIDs, intfID, onuID, uniID); err != nil {
-		logger.Error("error-while-uploading-gemtopon-map-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Error(ctx, "error-while-uploading-gemtopon-map-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
 	}
-	logger.Infow("stored-tconts-and-gem-into-kv-store-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
+	logger.Infow(ctx, "stored-tconts-and-gem-into-kv-store-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
 	for _, gemPort := range gemPortIDs {
 		f.addGemPortToOnuInfoMap(ctx, intfID, onuID, gemPort)
 	}
 }
 
-func (f *OpenOltFlowMgr) populateTechProfilePerPonPort() error {
+func (f *OpenOltFlowMgr) populateTechProfilePerPonPort(ctx context.Context) error {
 	var tpCount int
 	for _, techRange := range f.resourceMgr.DevInfo.Ranges {
 		for _, intfID := range techRange.IntfIds {
 			f.techprofile[intfID] = f.resourceMgr.ResourceMgrs[uint32(intfID)].TechProfileMgr
 			tpCount++
-			logger.Debugw("init-tech-profile-done",
+			logger.Debugw(ctx, "init-tech-profile-done",
 				log.Fields{
 					"intf-id":   intfID,
 					"device-id": f.deviceHandler.device.Id})
@@ -860,7 +858,7 @@
 			"pon-port-count":     f.resourceMgr.DevInfo.GetPonPorts(),
 			"device-id":          f.deviceHandler.device.Id}, nil)
 	}
-	logger.Infow("populated-techprofile-for-ponports-successfully",
+	logger.Infow(ctx, "populated-techprofile-for-ponports-successfully",
 		log.Fields{
 			"numofTech":   tpCount,
 			"numPonPorts": f.resourceMgr.DevInfo.GetPonPorts(),
@@ -873,7 +871,7 @@
 	uplinkAction map[string]interface{}, logicalFlow *ofp.OfpFlowStats,
 	allocID uint32, gemportID uint32, tpID uint32) error {
 	uplinkClassifier[PacketTagType] = SingleTag
-	logger.Debugw("adding-upstream-data-flow",
+	logger.Debugw(ctx, "adding-upstream-data-flow",
 		log.Fields{
 			"uplinkClassifier": uplinkClassifier,
 			"uplinkAction":     uplinkAction})
@@ -887,7 +885,7 @@
 	downlinkAction map[string]interface{}, logicalFlow *ofp.OfpFlowStats,
 	allocID uint32, gemportID uint32, tpID uint32) error {
 	downlinkClassifier[PacketTagType] = DoubleTag
-	logger.Debugw("adding-downstream-data-flow",
+	logger.Debugw(ctx, "adding-downstream-data-flow",
 		log.Fields{
 			"downlinkClassifier": downlinkClassifier,
 			"downlinkAction":     downlinkAction})
@@ -895,8 +893,8 @@
 	if vlan, exists := downlinkClassifier[VlanVid]; exists {
 		if vlan.(uint32) == (uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 4000) { //private VLAN given by core
 			if metadata, exists := downlinkClassifier[Metadata]; exists { // inport is filled in metadata by core
-				if uint32(metadata.(uint64)) == MkUniPortNum(intfID, onuID, uniID) {
-					logger.Infow("ignoring-dl-trap-device-flow-from-core",
+				if uint32(metadata.(uint64)) == MkUniPortNum(ctx, intfID, onuID, uniID) {
+					logger.Infow(ctx, "ignoring-dl-trap-device-flow-from-core",
 						log.Fields{
 							"flow":      logicalFlow,
 							"device-id": f.deviceHandler.device.Id,
@@ -935,7 +933,7 @@
 	   takes priority over flow_cookie to find any available HSIA_FLOW
 	   id for the ONU.
 	*/
-	logger.Infow("adding-hsia-flow",
+	logger.Infow(ctx, "adding-hsia-flow",
 		log.Fields{
 			"intf-id":     intfID,
 			"onu-id":      onuID,
@@ -951,14 +949,14 @@
 	var vlanVid uint32
 	if _, ok := classifier[VlanPcp]; ok {
 		vlanPbit = classifier[VlanPcp].(uint32)
-		logger.Debugw("found-pbit-in-flow",
+		logger.Debugw(ctx, "found-pbit-in-flow",
 			log.Fields{
 				"vlan-pbit": vlanPbit,
 				"intf-id":   intfID,
 				"onu-id":    onuID,
 				"device-id": f.deviceHandler.device.Id})
 	} else {
-		logger.Debugw("pbit-not-found-in-flow",
+		logger.Debugw(ctx, "pbit-not-found-in-flow",
 			log.Fields{
 				"vlan-pcp":  VlanPcp,
 				"intf-id":   intfID,
@@ -974,9 +972,9 @@
 				"onu-id":    onuID,
 				"device-id": f.deviceHandler.device.Id})
 	}
-	flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+	flowStoreCookie := getFlowStoreCookie(ctx, classifier, gemPortID)
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Infow("flow-already-exists",
+		logger.Infow(ctx, "flow-already-exists",
 			log.Fields{
 				"device-id": f.deviceHandler.device.Id,
 				"intf-id":   intfID,
@@ -997,7 +995,7 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier, "device-id": f.deviceHandler.device.Id}, err).Log()
 	}
-	logger.Debugw("created-classifier-proto",
+	logger.Debugw(ctx, "created-classifier-proto",
 		log.Fields{
 			"classifier": *classifierProto,
 			"device-id":  f.deviceHandler.device.Id})
@@ -1005,11 +1003,11 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"action": action, "device-id": f.deviceHandler.device.Id}, err).Log()
 	}
-	logger.Debugw("created-action-proto",
+	logger.Debugw(ctx, "created-action-proto",
 		log.Fields{
 			"action":    *actionProto,
 			"device-id": f.deviceHandler.device.Id})
-	networkIntfID, err := getNniIntfID(classifier, action)
+	networkIntfID, err := getNniIntfID(ctx, classifier, action)
 	if err != nil {
 		return olterrors.NewErrNotFound("nni-interface-id",
 			log.Fields{
@@ -1036,7 +1034,7 @@
 	if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
 		return olterrors.NewErrFlowOp("add", flowID, nil, err).Log()
 	}
-	logger.Infow("hsia-flow-added-to-device-successfully",
+	logger.Infow(ctx, "hsia-flow-added-to-device-successfully",
 		log.Fields{"direction": direction,
 			"device-id": f.deviceHandler.device.Id,
 			"flow":      flow,
@@ -1061,7 +1059,7 @@
 	classifier map[string]interface{}, action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32,
 	gemPortID uint32, tpID uint32) error {
 
-	networkIntfID, err := getNniIntfID(classifier, action)
+	networkIntfID, err := getNniIntfID(ctx, classifier, action)
 	if err != nil {
 		return olterrors.NewErrNotFound("nni-interface-id", log.Fields{
 			"classifier": classifier,
@@ -1081,9 +1079,9 @@
 	classifier[PacketTagType] = SingleTag
 	delete(classifier, VlanVid)
 
-	flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+	flowStoreCookie := getFlowStoreCookie(ctx, classifier, gemPortID)
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Infow("flow-exists--not-re-adding",
+		logger.Infow(ctx, "flow-exists--not-re-adding",
 			log.Fields{
 				"device-id": f.deviceHandler.device.Id,
 				"intf-id":   intfID,
@@ -1103,7 +1101,7 @@
 			err).Log()
 	}
 
-	logger.Debugw("creating-ul-dhcp-flow",
+	logger.Debugw(ctx, "creating-ul-dhcp-flow",
 		log.Fields{
 			"ul_classifier": classifier,
 			"ul_action":     action,
@@ -1116,7 +1114,7 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
 	}
-	logger.Debugw("created-classifier-proto", log.Fields{"classifier": *classifierProto})
+	logger.Debugw(ctx, "created-classifier-proto", log.Fields{"classifier": *classifierProto})
 	actionProto, err := makeOpenOltActionField(action, classifier)
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"action": action}, err).Log()
@@ -1140,7 +1138,7 @@
 	if err := f.addFlowToDevice(ctx, logicalFlow, &dhcpFlow); err != nil {
 		return olterrors.NewErrFlowOp("add", flowID, log.Fields{"dhcp-flow": dhcpFlow}, err).Log()
 	}
-	logger.Infow("dhcp-ul-flow-added-to-device-successfully",
+	logger.Infow(ctx, "dhcp-ul-flow-added-to-device-successfully",
 		log.Fields{
 			"device-id": f.deviceHandler.device.Id,
 			"flow-id":   flowID,
@@ -1170,7 +1168,7 @@
 func (f *OpenOltFlowMgr) addUpstreamTrapFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{},
 	action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32, gemPortID uint32, flowType string, tpID uint32) error {
 
-	networkIntfID, err := getNniIntfID(classifier, action)
+	networkIntfID, err := getNniIntfID(ctx, classifier, action)
 	if err != nil {
 		return olterrors.NewErrNotFound("nni-interface-id",
 			log.Fields{
@@ -1189,9 +1187,9 @@
 	classifier[PacketTagType] = SingleTag
 	delete(classifier, VlanVid)
 
-	flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+	flowStoreCookie := getFlowStoreCookie(ctx, classifier, gemPortID)
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkIntfID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Infow("flow-exists-not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Infow(ctx, "flow-exists-not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
 		return nil
 	}
 
@@ -1209,7 +1207,7 @@
 			err).Log()
 	}
 
-	logger.Debugw("creating-upstream-trap-flow",
+	logger.Debugw(ctx, "creating-upstream-trap-flow",
 		log.Fields{
 			"ul_classifier": classifier,
 			"ul_action":     action,
@@ -1223,7 +1221,7 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier, "device-id": f.deviceHandler.device.Id}, err).Log()
 	}
-	logger.Debugw("created-classifier-proto",
+	logger.Debugw(ctx, "created-classifier-proto",
 		log.Fields{
 			"classifier": *classifierProto,
 			"device-id":  f.deviceHandler.device.Id})
@@ -1251,7 +1249,7 @@
 	if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
 		return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": flow, "device-id": f.deviceHandler.device.Id}, err).Log()
 	}
-	logger.Infof("%s ul-flow-added-to-device-successfully", flowType)
+	logger.Infof(ctx, "%s ul-flow-added-to-device-successfully", flowType)
 
 	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &flow, flowStoreCookie, flowType, flowID, logicalFlow.Id)
 	if err := f.updateFlowInfoToKVStore(ctx, flow.AccessIntfId,
@@ -1268,7 +1266,7 @@
 func (f *OpenOltFlowMgr) addEAPOLFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32,
 	classifier map[string]interface{}, action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32,
 	gemPortID uint32, vlanID uint32, tpID uint32) error {
-	logger.Infow("adding-eapol-to-device",
+	logger.Infow(ctx, "adding-eapol-to-device",
 		log.Fields{
 			"intf-id":    intfID,
 			"onu-id":     onuID,
@@ -1288,9 +1286,9 @@
 	uplinkClassifier[VlanPcp] = classifier[VlanPcp]
 	// Fill action
 	uplinkAction[TrapToHost] = true
-	flowStoreCookie := getFlowStoreCookie(uplinkClassifier, gemPortID)
+	flowStoreCookie := getFlowStoreCookie(ctx, uplinkClassifier, gemPortID)
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Infow("flow-exists-not-re-adding", log.Fields{
+		logger.Infow(ctx, "flow-exists-not-re-adding", log.Fields{
 			"device-id": f.deviceHandler.device.Id,
 			"onu-id":    onuID,
 			"intf-id":   intfID})
@@ -1307,7 +1305,7 @@
 				"device-id": f.deviceHandler.device.Id},
 			err).Log()
 	}
-	logger.Debugw("creating-ul-eapol-flow",
+	logger.Debugw(ctx, "creating-ul-eapol-flow",
 		log.Fields{
 			"ul_classifier": uplinkClassifier,
 			"ul_action":     uplinkAction,
@@ -1322,7 +1320,7 @@
 			"classifier": uplinkClassifier,
 			"device-id":  f.deviceHandler.device.Id}, err).Log()
 	}
-	logger.Debugw("created-classifier-proto",
+	logger.Debugw(ctx, "created-classifier-proto",
 		log.Fields{
 			"classifier": *classifierProto,
 			"device-id":  f.deviceHandler.device.Id})
@@ -1330,11 +1328,11 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"action": uplinkAction, "device-id": f.deviceHandler.device.Id}, err).Log()
 	}
-	logger.Debugw("created-action-proto",
+	logger.Debugw(ctx, "created-action-proto",
 		log.Fields{
 			"action":    *actionProto,
 			"device-id": f.deviceHandler.device.Id})
-	networkIntfID, err := getNniIntfID(classifier, action)
+	networkIntfID, err := getNniIntfID(ctx, classifier, action)
 	if err != nil {
 		return olterrors.NewErrNotFound("nni-interface-id", log.Fields{
 			"classifier": classifier,
@@ -1361,7 +1359,7 @@
 	if err := f.addFlowToDevice(ctx, logicalFlow, &upstreamFlow); err != nil {
 		return olterrors.NewErrFlowOp("add", uplinkFlowID, log.Fields{"flow": upstreamFlow}, err).Log()
 	}
-	logger.Infow("eapol-ul-flow-added-to-device-successfully",
+	logger.Infow(ctx, "eapol-ul-flow-added-to-device-successfully",
 		log.Fields{
 			"device-id": f.deviceHandler.device.Id,
 			"onu-id":    onuID,
@@ -1459,8 +1457,8 @@
 }
 
 // getTPpath return the ETCD path for a given UNI port
-func (f *OpenOltFlowMgr) getTPpath(intfID uint32, uniPath string, TpID uint32) string {
-	return f.techprofile[intfID].GetTechProfileInstanceKVPath(TpID, uniPath)
+func (f *OpenOltFlowMgr) getTPpath(ctx context.Context, intfID uint32, uniPath string, TpID uint32) string {
+	return f.techprofile[intfID].GetTechProfileInstanceKVPath(ctx, TpID, uniPath)
 }
 
 // DeleteTechProfileInstances removes the tech profile instances from persistent storage
@@ -1494,12 +1492,12 @@
 	return nil
 }
 
-func getFlowStoreCookie(classifier map[string]interface{}, gemPortID uint32) uint64 {
+func getFlowStoreCookie(ctx context.Context, classifier map[string]interface{}, gemPortID uint32) uint64 {
 	if len(classifier) == 0 { // should never happen
-		logger.Error("invalid-classfier-object")
+		logger.Error(ctx, "invalid-classfier-object")
 		return 0
 	}
-	logger.Debugw("generating-flow-store-cookie",
+	logger.Debugw(ctx, "generating-flow-store-cookie",
 		log.Fields{
 			"classifier": classifier,
 			"gemport-id": gemPortID})
@@ -1508,7 +1506,7 @@
 	var err error
 	// TODO: Do we need to marshall ??
 	if jsonData, err = json.Marshal(classifier); err != nil {
-		logger.Error("failed-to-encode-classifier")
+		logger.Error(ctx, "failed-to-encode-classifier")
 		return 0
 	}
 	flowString = string(jsonData)
@@ -1520,7 +1518,7 @@
 	hash := big.NewInt(0)
 	hash.SetBytes(h.Sum(nil))
 	generatedHash := hash.Uint64()
-	logger.Debugw("hash-generated", log.Fields{"hash": generatedHash})
+	logger.Debugw(ctx, "hash-generated", log.Fields{"hash": generatedHash})
 	return generatedHash
 }
 
@@ -1538,7 +1536,7 @@
 	// Get existing flows matching flowid for given subscriber from KV store
 	existingFlows := f.resourceMgr.GetFlowIDInfo(ctx, intfID, flow.OnuId, flow.UniId, flow.FlowId)
 	if existingFlows != nil {
-		logger.Debugw("flow-exists-for-given-flowID--appending-it-to-current-flow",
+		logger.Debugw(ctx, "flow-exists-for-given-flowID--appending-it-to-current-flow",
 			log.Fields{
 				"flow-id":   flow.FlowId,
 				"device-id": f.deviceHandler.device.Id,
@@ -1549,7 +1547,7 @@
 		//}
 		flows = append(flows, *existingFlows...)
 	}
-	logger.Debugw("updated-flows-for-given-flowID-and-onuid",
+	logger.Debugw(ctx, "updated-flows-for-given-flowID-and-onuid",
 		log.Fields{
 			"updatedflow": flows,
 			"flow-id":     flow.FlowId,
@@ -1572,30 +1570,30 @@
 //	// Get existing flows matching flowid for given subscriber from KV store
 //	existingFlows := f.resourceMgr.GetFlowIDInfo(intfId, uint32(flow.OnuId), uint32(flow.UniId), flow.FlowId)
 //	if existingFlows != nil {
-//		logger.Debugw("Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
+//		logger.Debugw(ctx, "Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
 //		for _, f := range *existingFlows {
 //			flows = append(flows, f)
 //		}
 //	}
-//	logger.Debugw("Updated flows for given flowID and onuid", log.Fields{"updatedflow": flows, "flowid": flow.FlowId, "onu": flow.OnuId})
+//	logger.Debugw(ctx, "Updated flows for given flowID and onuid", log.Fields{"updatedflow": flows, "flowid": flow.FlowId, "onu": flow.OnuId})
 //	return &flows
 //}
 
 func (f *OpenOltFlowMgr) updateFlowInfoToKVStore(ctx context.Context, intfID int32, onuID int32, uniID int32, flowID uint32, flows *[]rsrcMgr.FlowInfo) error {
-	logger.Debugw("storing-flow(s)-into-kv-store", log.Fields{
+	logger.Debugw(ctx, "storing-flow(s)-into-kv-store", log.Fields{
 		"flow-id":   flowID,
 		"device-id": f.deviceHandler.device.Id,
 		"intf-id":   intfID,
 		"onu-id":    onuID})
 	if err := f.resourceMgr.UpdateFlowIDInfo(ctx, intfID, onuID, uniID, flowID, flows); err != nil {
-		logger.Warnw("error-while-storing-flow-into-kv-store", log.Fields{
+		logger.Warnw(ctx, "error-while-storing-flow-into-kv-store", log.Fields{
 			"device-id": f.deviceHandler.device.Id,
 			"onu-id":    onuID,
 			"intf-id":   intfID,
 			"flow-id":   flowID})
 		return err
 	}
-	logger.Infow("stored-flow(s)-into-kv-store-successfully!", log.Fields{
+	logger.Infow(ctx, "stored-flow(s)-into-kv-store-successfully!", log.Fields{
 		"device-id": f.deviceHandler.device.Id,
 		"onu-id":    onuID,
 		"intf-id":   intfID,
@@ -1616,7 +1614,7 @@
 		intfID = uint32(deviceFlow.NetworkIntfId)
 	}
 
-	logger.Debugw("sending-flow-to-device-via-grpc", log.Fields{
+	logger.Debugw(ctx, "sending-flow-to-device-via-grpc", log.Fields{
 		"flow":      *deviceFlow,
 		"device-id": f.deviceHandler.device.Id,
 		"intf-id":   intfID})
@@ -1624,7 +1622,7 @@
 
 	st, _ := status.FromError(err)
 	if st.Code() == codes.AlreadyExists {
-		logger.Debug("flow-already-exists", log.Fields{
+		logger.Debug(ctx, "flow-already-exists", log.Fields{
 			"err":        err,
 			"deviceFlow": deviceFlow,
 			"device-id":  f.deviceHandler.device.Id,
@@ -1633,7 +1631,7 @@
 	}
 
 	if err != nil {
-		logger.Errorw("failed-to-add-flow-to-device",
+		logger.Errorw(ctx, "failed-to-add-flow-to-device",
 			log.Fields{"err": err,
 				"device-flow": deviceFlow,
 				"device-id":   f.deviceHandler.device.Id,
@@ -1645,7 +1643,7 @@
 		// No need to register the flow if it is a trap on nni flow.
 		f.registerFlow(ctx, logicalFlow, deviceFlow)
 	}
-	logger.Infow("flow-added-to-device-successfully ",
+	logger.Infow(ctx, "flow-added-to-device-successfully ",
 		log.Fields{
 			"flow":      *deviceFlow,
 			"device-id": f.deviceHandler.device.Id,
@@ -1653,15 +1651,15 @@
 	return nil
 }
 
-func (f *OpenOltFlowMgr) removeFlowFromDevice(deviceFlow *openoltpb2.Flow, ofFlowID uint64) error {
-	logger.Debugw("sending-flow-to-device-via-grpc",
+func (f *OpenOltFlowMgr) removeFlowFromDevice(ctx context.Context, deviceFlow *openoltpb2.Flow, ofFlowID uint64) error {
+	logger.Debugw(ctx, "sending-flow-to-device-via-grpc",
 		log.Fields{
 			"flow":      *deviceFlow,
 			"device-id": f.deviceHandler.device.Id})
 	_, err := f.deviceHandler.Client.FlowRemove(context.Background(), deviceFlow)
 	if err != nil {
 		if f.deviceHandler.device.ConnectStatus == common.ConnectStatus_UNREACHABLE {
-			logger.Warnw("can-not-remove-flow-from-device--unreachable",
+			logger.Warnw(ctx, "can-not-remove-flow-from-device--unreachable",
 				log.Fields{
 					"err":        err,
 					"deviceFlow": deviceFlow,
@@ -1672,7 +1670,7 @@
 		return olterrors.NewErrFlowOp("remove", deviceFlow.FlowId, log.Fields{"deviceFlow": deviceFlow}, err)
 
 	}
-	logger.Infow("flow-removed-from-device-successfully", log.Fields{
+	logger.Infow(ctx, "flow-removed-from-device-successfully", log.Fields{
 		"of-flow-id": ofFlowID,
 		"flow":       *deviceFlow,
 		"device-id":  f.deviceHandler.device.Id,
@@ -1687,13 +1685,13 @@
 func generateStoredId(flowId uint32, direction string)uint32{
 
 	if direction == Upstream{
-		logger.Debug("Upstream flow shifting flowid")
+		logger.Debug(ctx, "Upstream flow shifting flowid")
 		return ((0x1 << 15) | flowId)
 	}else if direction == Downstream{
-		logger.Debug("Downstream flow not shifting flowid")
+		logger.Debug(ctx, "Downstream flow not shifting flowid")
 		return flowId
 	}else{
-		logger.Errorw("Unrecognized direction",log.Fields{"direction": direction})
+		logger.Errorw(ctx, "Unrecognized direction",log.Fields{"direction": direction})
 		return flowId
 	}
 }
@@ -1725,13 +1723,13 @@
 	var uniID = -1
 	var gemPortID = -1
 
-	networkInterfaceID, err := IntfIDFromNniPortNum(portNo)
+	networkInterfaceID, err := IntfIDFromNniPortNum(ctx, portNo)
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"nni-port-number": portNo}, err).Log()
 	}
-	var flowStoreCookie = getFlowStoreCookie(classifierInfo, uint32(0))
+	var flowStoreCookie = getFlowStoreCookie(ctx, classifierInfo, uint32(0))
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Infow("flow-exists--not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
+		logger.Infow(ctx, "flow-exists--not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
 		return nil
 	}
 	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0)
@@ -1754,7 +1752,7 @@
 				"classifier": classifierInfo,
 				"device-id":  f.deviceHandler.device.Id}, err)
 	}
-	logger.Debugw("created-classifier-proto",
+	logger.Debugw(ctx, "created-classifier-proto",
 		log.Fields{
 			"classifier": *classifierProto,
 			"device-id":  f.deviceHandler.device.Id})
@@ -1765,7 +1763,7 @@
 				"action":    actionInfo,
 				"device-id": f.deviceHandler.device.Id}, err)
 	}
-	logger.Debugw("created-action-proto",
+	logger.Debugw(ctx, "created-action-proto",
 		log.Fields{
 			"action":    *actionProto,
 			"device-id": f.deviceHandler.device.Id})
@@ -1788,7 +1786,7 @@
 				"flow":      downstreamflow,
 				"device-id": f.deviceHandler.device.Id}, err)
 	}
-	logger.Infow("lldp-trap-on-nni-flow-added-to-device-successfully",
+	logger.Infow(ctx, "lldp-trap-on-nni-flow-added-to-device-successfully",
 		log.Fields{
 			"device-id": f.deviceHandler.device.Id,
 			"onu-id":    onuID,
@@ -1811,16 +1809,16 @@
 }
 
 //getOnuDevice to fetch onu from cache or core.
-func (f *OpenOltFlowMgr) getOnuDevice(intfID uint32, onuID uint32) (*OnuDevice, error) {
+func (f *OpenOltFlowMgr) getOnuDevice(ctx context.Context, intfID uint32, onuID uint32) (*OnuDevice, error) {
 	onuKey := f.deviceHandler.formOnuKey(intfID, onuID)
 	onuDev, ok := f.deviceHandler.onus.Load(onuKey)
 	if !ok {
-		logger.Debugw("couldnt-find-onu-in-cache",
+		logger.Debugw(ctx, "couldnt-find-onu-in-cache",
 			log.Fields{
 				"intf-id":   intfID,
 				"onu-id":    onuID,
 				"device-id": f.deviceHandler.device.Id})
-		onuDevice, err := f.getChildDevice(intfID, onuID)
+		onuDevice, err := f.getChildDevice(ctx, intfID, onuID)
 		if err != nil {
 			return nil, olterrors.NewErrNotFound("onu-child-device",
 				log.Fields{
@@ -1832,7 +1830,7 @@
 		//better to ad the device to cache here.
 		f.deviceHandler.StoreOnuDevice(onuDev.(*OnuDevice))
 	} else {
-		logger.Debugw("found-onu-in-cache",
+		logger.Debugw(ctx, "found-onu-in-cache",
 			log.Fields{
 				"intf-id":   intfID,
 				"onu-id":    onuID,
@@ -1843,14 +1841,14 @@
 }
 
 //getChildDevice to fetch onu
-func (f *OpenOltFlowMgr) getChildDevice(intfID uint32, onuID uint32) (*voltha.Device, error) {
-	logger.Infow("GetChildDevice",
+func (f *OpenOltFlowMgr) getChildDevice(ctx context.Context, intfID uint32, onuID uint32) (*voltha.Device, error) {
+	logger.Infow(ctx, "GetChildDevice",
 		log.Fields{
 			"pon-port":  intfID,
 			"onu-id":    onuID,
 			"device-id": f.deviceHandler.device.Id})
 	parentPortNo := IntfIDToPortNo(intfID, voltha.Port_PON_OLT)
-	onuDevice, err := f.deviceHandler.GetChildDevice(parentPortNo, onuID)
+	onuDevice, err := f.deviceHandler.GetChildDevice(ctx, parentPortNo, onuID)
 	if err != nil {
 		return nil, olterrors.NewErrNotFound("onu",
 			log.Fields{
@@ -1859,7 +1857,7 @@
 				"device-id":    f.deviceHandler.device.Id},
 			err)
 	}
-	logger.Infow("successfully-received-child-device-from-core",
+	logger.Infow(ctx, "successfully-received-child-device-from-core",
 		log.Fields{
 			"device-id":       f.deviceHandler.device.Id,
 			"child_device_id": onuDevice.Id,
@@ -1867,13 +1865,13 @@
 	return onuDevice, nil
 }
 
-func findNextFlow(flow *ofp.OfpFlowStats) *ofp.OfpFlowStats {
-	logger.Info("unimplemented-flow %v", flow)
+func findNextFlow(ctx context.Context, flow *ofp.OfpFlowStats) *ofp.OfpFlowStats {
+	logger.Info(ctx, "unimplemented-flow %v", flow)
 	return nil
 }
 
-func (f *OpenOltFlowMgr) clearFlowsAndSchedulerForLogicalPort(childDevice *voltha.Device, logicalPort *voltha.LogicalPort) {
-	logger.Info("unimplemented-device %v, logicalport %v", childDevice, logicalPort)
+func (f *OpenOltFlowMgr) clearFlowsAndSchedulerForLogicalPort(ctx context.Context, childDevice *voltha.Device, logicalPort *voltha.LogicalPort) {
+	logger.Info(ctx, "unimplemented-device %v, logicalport %v", childDevice, logicalPort)
 }
 
 func (f *OpenOltFlowMgr) decodeStoredID(id uint64) (uint64, string) {
@@ -1883,10 +1881,10 @@
 	return id, Downstream
 }
 
-func (f *OpenOltFlowMgr) sendDeleteGemPortToChild(intfID uint32, onuID uint32, uniID uint32, gemPortID uint32, tpPath string) error {
-	onuDev, err := f.getOnuDevice(intfID, onuID)
+func (f *OpenOltFlowMgr) sendDeleteGemPortToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, gemPortID uint32, tpPath string) error {
+	onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
 	if err != nil {
-		logger.Debugw("couldnt-find-onu-child-device",
+		logger.Debugw(ctx, "couldnt-find-onu-child-device",
 			log.Fields{
 				"intf-id":   intfID,
 				"onu-id":    onuID,
@@ -1896,7 +1894,7 @@
 	}
 
 	delGemPortMsg := &ic.InterAdapterDeleteGemPortMessage{UniId: uniID, TpPath: tpPath, GemPortId: gemPortID}
-	logger.Debugw("sending-gem-port-delete-to-openonu-adapter",
+	logger.Debugw(ctx, "sending-gem-port-delete-to-openonu-adapter",
 		log.Fields{
 			"msg":       *delGemPortMsg,
 			"device-id": f.deviceHandler.device.Id})
@@ -1915,7 +1913,7 @@
 				"proxyDeviceID": onuDev.proxyDeviceID,
 				"device-id":     f.deviceHandler.device.Id}, sendErr)
 	}
-	logger.Infow("success-sending-del-gem-port-to-onu-adapter",
+	logger.Infow(ctx, "success-sending-del-gem-port-to-onu-adapter",
 		log.Fields{
 			"msg":          delGemPortMsg,
 			"from-adapter": f.deviceHandler.device.Type,
@@ -1924,10 +1922,10 @@
 	return nil
 }
 
-func (f *OpenOltFlowMgr) sendDeleteTcontToChild(intfID uint32, onuID uint32, uniID uint32, allocID uint32, tpPath string) error {
-	onuDev, err := f.getOnuDevice(intfID, onuID)
+func (f *OpenOltFlowMgr) sendDeleteTcontToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID uint32, tpPath string) error {
+	onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
 	if err != nil {
-		logger.Warnw("couldnt-find-onu-child-device",
+		logger.Warnw(ctx, "couldnt-find-onu-child-device",
 			log.Fields{
 				"intf-id":   intfID,
 				"onu-id":    onuID,
@@ -1937,7 +1935,7 @@
 	}
 
 	delTcontMsg := &ic.InterAdapterDeleteTcontMessage{UniId: uniID, TpPath: tpPath, AllocId: allocID}
-	logger.Debugw("sending-tcont-delete-to-openonu-adapter",
+	logger.Debugw(ctx, "sending-tcont-delete-to-openonu-adapter",
 		log.Fields{
 			"msg":       *delTcontMsg,
 			"device-id": f.deviceHandler.device.Id})
@@ -1955,20 +1953,20 @@
 				"proxyDeviceID": onuDev.proxyDeviceID,
 				"device-id":     f.deviceHandler.device.Id}, sendErr)
 	}
-	logger.Infow("success-sending-del-tcont-to-onu-adapter",
+	logger.Infow(ctx, "success-sending-del-tcont-to-onu-adapter",
 		log.Fields{
 			"msg":       delTcontMsg,
 			"device-id": f.deviceHandler.device.Id})
 	return nil
 }
 
-func (f *OpenOltFlowMgr) deletePendingFlows(Intf uint32, onuID int32, uniID int32) {
+func (f *OpenOltFlowMgr) deletePendingFlows(ctx context.Context, Intf uint32, onuID int32, uniID int32) {
 	pnFlDelKey := pendingFlowDeleteKey{Intf, uint32(onuID), uint32(uniID)}
 	if val, ok := f.pendingFlowDelete.Load(pnFlDelKey); ok {
 		if val.(int) > 0 {
 			pnFlDels := val.(int) - 1
 			if pnFlDels > 0 {
-				logger.Debugw("flow-delete-succeeded--more-pending",
+				logger.Debugw(ctx, "flow-delete-succeeded--more-pending",
 					log.Fields{
 						"intf":               Intf,
 						"onu-id":             onuID,
@@ -1977,7 +1975,7 @@
 						"device-id":          f.deviceHandler.device.Id})
 				f.pendingFlowDelete.Store(pnFlDelKey, pnFlDels)
 			} else {
-				logger.Debugw("all-pending-flow-deletes-handled--removing-entry-from-map",
+				logger.Debugw(ctx, "all-pending-flow-deletes-handled--removing-entry-from-map",
 					log.Fields{
 						"intf":      Intf,
 						"onu-id":    onuID,
@@ -1987,7 +1985,7 @@
 			}
 		}
 	} else {
-		logger.Debugw("no-pending-delete-flows-found",
+		logger.Debugw(ctx, "no-pending-delete-flows-found",
 			log.Fields{
 				"intf":      Intf,
 				"onu-id":    onuID,
@@ -2002,12 +2000,12 @@
 // which was used for deriving the gemport->logicalPortNo during packet-in.
 // Otherwise stale info continues to exist after gemport is freed and wrong logicalPortNo
 // is conveyed to ONOS during packet-in OF message.
-func (f *OpenOltFlowMgr) deleteGemPortFromLocalCache(intfID uint32, onuID uint32, gemPortID uint32) {
+func (f *OpenOltFlowMgr) deleteGemPortFromLocalCache(ctx context.Context, intfID uint32, onuID uint32, gemPortID uint32) {
 
 	f.onuGemInfoLock[intfID].Lock()
 	defer f.onuGemInfoLock[intfID].Unlock()
 
-	logger.Infow("deleting-gem-from-local-cache",
+	logger.Infow(ctx, "deleting-gem-from-local-cache",
 		log.Fields{
 			"gem":       gemPortID,
 			"intf-id":   intfID,
@@ -2022,7 +2020,7 @@
 				if gem == gemPortID {
 					onu.GemPorts = append(onu.GemPorts[:j], onu.GemPorts[j+1:]...)
 					onugem[i] = onu
-					logger.Infow("removed-gemport-from-local-cache",
+					logger.Infow(ctx, "removed-gemport-from-local-cache",
 						log.Fields{
 							"intf-id":           intfID,
 							"onu-id":            onuID,
@@ -2043,7 +2041,7 @@
 	gemPortID int32, flowID uint32, flowDirection string,
 	portNum uint32, updatedFlows []rsrcMgr.FlowInfo) error {
 
-	tpID, err := getTpIDFromFlow(flow)
+	tpID, err := getTpIDFromFlow(ctx, flow)
 	if err != nil {
 		return olterrors.NewErrNotFound("tp-id",
 			log.Fields{
@@ -2070,7 +2068,7 @@
 			if onuID != -1 && uniID != -1 {
 				pnFlDelKey := pendingFlowDeleteKey{Intf, uint32(onuID), uint32(uniID)}
 				if val, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok {
-					logger.Debugw("creating-entry-for-pending-flow-delete",
+					logger.Debugw(ctx, "creating-entry-for-pending-flow-delete",
 						log.Fields{
 							"flow-id":   flowID,
 							"intf":      Intf,
@@ -2080,7 +2078,7 @@
 					f.pendingFlowDelete.Store(pnFlDelKey, 1)
 				} else {
 					pnFlDels := val.(int) + 1
-					logger.Debugw("updating-flow-delete-entry",
+					logger.Debugw(ctx, "updating-flow-delete-entry",
 						log.Fields{
 							"flow-id":            flowID,
 							"intf":               Intf,
@@ -2091,10 +2089,10 @@
 					f.pendingFlowDelete.Store(pnFlDelKey, pnFlDels)
 				}
 
-				defer f.deletePendingFlows(Intf, onuID, uniID)
+				defer f.deletePendingFlows(ctx, Intf, onuID, uniID)
 			}
 
-			logger.Debugw("releasing-flow-id-to-resource-manager",
+			logger.Debugw(ctx, "releasing-flow-id-to-resource-manager",
 				log.Fields{
 					"Intf":      Intf,
 					"onu-id":    onuID,
@@ -2104,8 +2102,8 @@
 			f.resourceMgr.FreeFlowID(ctx, Intf, int32(onuID), int32(uniID), flowID)
 
 			uni := getUniPortPath(f.deviceHandler.device.Id, Intf, onuID, uniID)
-			tpPath := f.getTPpath(Intf, uni, tpID)
-			logger.Debugw("getting-techprofile-instance-for-subscriber",
+			tpPath := f.getTPpath(ctx, Intf, uni, tpID)
+			logger.Debugw(ctx, "getting-techprofile-instance-for-subscriber",
 				log.Fields{
 					"TP-PATH":   tpPath,
 					"device-id": f.deviceHandler.device.Id})
@@ -2130,19 +2128,19 @@
 						break
 					}
 				}
-				logger.Debugw("gem-port-id-is-still-used-by-other-flows",
+				logger.Debugw(ctx, "gem-port-id-is-still-used-by-other-flows",
 					log.Fields{
 						"gemport-id":  gemPortID,
 						"usedByFlows": flowIDs,
 						"device-id":   f.deviceHandler.device.Id})
 				return nil
 			}
-			logger.Debugf("gem-port-id %d is-not-used-by-another-flow--releasing-the-gem-port", gemPortID)
+			logger.Debugf(ctx, "gem-port-id %d is-not-used-by-another-flow--releasing-the-gem-port", gemPortID)
 			f.resourceMgr.RemoveGemPortIDForOnu(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
 			// TODO: The TrafficQueue corresponding to this gem-port also should be removed immediately.
 			// But it is anyway eventually  removed later when the TechProfile is freed, so not a big issue for now.
 			f.resourceMgr.RemoveGEMportPonportToOnuMapOnKVStore(ctx, uint32(gemPortID), Intf)
-			f.deleteGemPortFromLocalCache(Intf, uint32(onuID), uint32(gemPortID))
+			f.deleteGemPortFromLocalCache(ctx, Intf, uint32(onuID), uint32(gemPortID))
 			f.onuIdsLock.Lock()
 			//everytime an entry is deleted from flowsUsedByGemPort cache, the same should be updated in kv as well
 			// by calling DeleteFlowIDsForGem
@@ -2151,8 +2149,8 @@
 			f.resourceMgr.FreeGemPortID(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
 			f.onuIdsLock.Unlock()
 			// Delete the gem port on the ONU.
-			if err := f.sendDeleteGemPortToChild(Intf, uint32(onuID), uint32(uniID), uint32(gemPortID), tpPath); err != nil {
-				logger.Errorw("error-processing-delete-gem-port-towards-onu",
+			if err := f.sendDeleteGemPortToChild(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID), tpPath); err != nil {
+				logger.Errorw(ctx, "error-processing-delete-gem-port-towards-onu",
 					log.Fields{
 						"err":        err,
 						"intf":       Intf,
@@ -2171,8 +2169,8 @@
 					f.RemoveSchedulerQueues(ctx, schedQueue{direction: tp_pb.Direction_DOWNSTREAM, intfID: Intf, onuID: uint32(onuID), uniID: uint32(uniID), tpID: tpID, uniPort: portNum, tpInst: techprofileInst})
 					f.resourceMgr.FreeAllocID(ctx, Intf, uint32(onuID), uint32(uniID), techprofileInst.UsScheduler.AllocID)
 					// Delete the TCONT on the ONU.
-					if err := f.sendDeleteTcontToChild(Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.UsScheduler.AllocID), tpPath); err != nil {
-						logger.Errorw("error-processing-delete-tcont-towards-onu",
+					if err := f.sendDeleteTcontToChild(ctx, Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.UsScheduler.AllocID), tpPath); err != nil {
+						logger.Errorw(ctx, "error-processing-delete-tcont-towards-onu",
 							log.Fields{
 								"intf":      Intf,
 								"onu-id":    onuID,
@@ -2186,8 +2184,8 @@
 				f.DeleteTechProfileInstance(ctx, Intf, uint32(onuID), uint32(uniID), "", tpID)
 				f.resourceMgr.FreeAllocID(ctx, Intf, uint32(onuID), uint32(uniID), techprofileInst.AllocID)
 				// Delete the TCONT on the ONU.
-				if err := f.sendDeleteTcontToChild(Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.AllocID), tpPath); err != nil {
-					logger.Errorw("error-processing-delete-tcont-towards-onu",
+				if err := f.sendDeleteTcontToChild(ctx, Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.AllocID), tpPath); err != nil {
+					logger.Errorw(ctx, "error-processing-delete-tcont-towards-onu",
 						log.Fields{
 							"intf":      Intf,
 							"onu-id":    onuID,
@@ -2196,7 +2194,7 @@
 							"alloc-id":  techprofileInst.AllocID})
 				}
 			default:
-				logger.Errorw("error-unknown-tech",
+				logger.Errorw(ctx, "error-unknown-tech",
 					log.Fields{
 						"techprofileInst": techprofileInst})
 			}
@@ -2208,7 +2206,7 @@
 // nolint: gocyclo
 func (f *OpenOltFlowMgr) clearFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats, flowDirection string) {
 
-	logger.Infow("clear-flow-from-resource-manager",
+	logger.Infow(ctx, "clear-flow-from-resource-manager",
 		log.Fields{
 			"flowDirection": flowDirection,
 			"flow":          *flow,
@@ -2222,9 +2220,9 @@
 	var updatedFlows []rsrcMgr.FlowInfo
 	classifierInfo := make(map[string]interface{})
 
-	portNum, Intf, onu, uni, inPort, ethType, err := FlowExtractInfo(flow, flowDirection)
+	portNum, Intf, onu, uni, inPort, ethType, err := FlowExtractInfo(ctx, flow, flowDirection)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return
 	}
 
@@ -2234,10 +2232,10 @@
 	for _, field := range flows.GetOfbFields(flow) {
 		if field.Type == flows.IP_PROTO {
 			classifierInfo[IPProto] = field.GetIpProto()
-			logger.Debugw("field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
+			logger.Debugw(ctx, "field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
 		}
 	}
-	logger.Infow("extracted-access-info-from-flow-to-be-deleted",
+	logger.Infow(ctx, "extracted-access-info-from-flow-to-be-deleted",
 		log.Fields{
 			"flow-id": flow.Id,
 			"intf-id": Intf,
@@ -2247,10 +2245,10 @@
 	if ethType == LldpEthType || ((classifierInfo[IPProto] == IPProtoDhcp) && (flowDirection == "downstream")) {
 		onuID = -1
 		uniID = -1
-		logger.Debug("trap-on-nni-flow-set-oni--uni-to- -1")
-		Intf, err = IntfIDFromNniPortNum(inPort)
+		logger.Debug(ctx, "trap-on-nni-flow-set-oni--uni-to- -1")
+		Intf, err = IntfIDFromNniPortNum(ctx, inPort)
 		if err != nil {
-			logger.Errorw("invalid-in-port-number",
+			logger.Errorw(ctx, "invalid-in-port-number",
 				log.Fields{
 					"port-number": inPort,
 					"error":       err})
@@ -2261,7 +2259,7 @@
 	for _, flowID := range flowIds {
 		flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, Intf, onuID, uniID, flowID)
 		if flowInfo == nil {
-			logger.Debugw("no-flowinfo-found-in-kv-store",
+			logger.Debugw(ctx, "no-flowinfo-found-in-kv-store",
 				log.Fields{
 					"intf":    Intf,
 					"onu-id":  onuID,
@@ -2277,13 +2275,13 @@
 		for i, storedFlow := range updatedFlows {
 			if flow.Id == storedFlow.LogicalFlowID {
 				removeFlowMessage := openoltpb2.Flow{FlowId: storedFlow.Flow.FlowId, FlowType: storedFlow.Flow.FlowType}
-				logger.Debugw("flow-to-be-deleted", log.Fields{"flow": storedFlow})
+				logger.Debugw(ctx, "flow-to-be-deleted", log.Fields{"flow": storedFlow})
 				// DKB
-				if err = f.removeFlowFromDevice(&removeFlowMessage, flow.Id); err != nil {
-					logger.Errorw("failed-to-remove-flow", log.Fields{"error": err})
+				if err = f.removeFlowFromDevice(ctx, &removeFlowMessage, flow.Id); err != nil {
+					logger.Errorw(ctx, "failed-to-remove-flow", log.Fields{"error": err})
 					return
 				}
-				logger.Info("flow-removed-from-device-successfully", log.Fields{
+				logger.Info(ctx, "flow-removed-from-device-successfully", log.Fields{
 					"flow-id":        flow.Id,
 					"stored-flow":    storedFlow,
 					"device-id":      f.deviceHandler.device.Id,
@@ -2295,7 +2293,7 @@
 				updatedFlows = append(updatedFlows[:i], updatedFlows[i+1:]...)
 				if err = f.clearResources(ctx, flow, Intf, onuID, uniID, storedFlow.Flow.GemportId,
 					flowID, flowDirection, portNum, updatedFlows); err != nil {
-					logger.Error("failed-to-clear-resources-for-flow", log.Fields{
+					logger.Error(ctx, "failed-to-clear-resources-for-flow", log.Fields{
 						"flow-id":        flow.Id,
 						"stored-flow":    storedFlow,
 						"device-id":      f.deviceHandler.device.Id,
@@ -2314,11 +2312,11 @@
 // clears resources reserved for this multicast flow
 func (f *OpenOltFlowMgr) clearMulticastFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats) {
 	classifierInfo := make(map[string]interface{})
-	formulateClassifierInfoFromFlow(classifierInfo, flow)
+	formulateClassifierInfoFromFlow(ctx, classifierInfo, flow)
 	networkInterfaceID, err := f.getNNIInterfaceIDOfMulticastFlow(ctx, classifierInfo)
 
 	if err != nil {
-		logger.Warnw("no-inport-found--cannot-release-resources-of-the-multicast-flow", log.Fields{"flowId:": flow.Id})
+		logger.Warnw(ctx, "no-inport-found--cannot-release-resources-of-the-multicast-flow", log.Fields{"flowId:": flow.Id})
 		return
 	}
 
@@ -2332,7 +2330,7 @@
 	for _, flowID = range flowIds {
 		flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, networkInterfaceID, onuID, uniID, flowID)
 		if flowInfo == nil {
-			logger.Debugw("no-multicast-flowinfo-found-in-the-kv-store",
+			logger.Debugw(ctx, "no-multicast-flowinfo-found-in-the-kv-store",
 				log.Fields{
 					"intf":    networkInterfaceID,
 					"onu-id":  onuID,
@@ -2347,31 +2345,31 @@
 		for i, storedFlow := range updatedFlows {
 			if flow.Id == storedFlow.LogicalFlowID {
 				removeFlowMessage := openoltpb2.Flow{FlowId: storedFlow.Flow.FlowId, FlowType: storedFlow.Flow.FlowType}
-				logger.Debugw("multicast-flow-to-be-deleted",
+				logger.Debugw(ctx, "multicast-flow-to-be-deleted",
 					log.Fields{
 						"flow":      storedFlow,
 						"flow-id":   flow.Id,
 						"device-id": f.deviceHandler.device.Id})
 				//remove from device
-				if err := f.removeFlowFromDevice(&removeFlowMessage, flow.Id); err != nil {
+				if err := f.removeFlowFromDevice(ctx, &removeFlowMessage, flow.Id); err != nil {
 					// DKB
-					logger.Errorw("failed-to-remove-multicast-flow",
+					logger.Errorw(ctx, "failed-to-remove-multicast-flow",
 						log.Fields{
 							"flow-id": flow.Id,
 							"error":   err})
 					return
 				}
-				logger.Infow("multicast-flow-removed-from-device-successfully", log.Fields{"flow-id": flow.Id})
+				logger.Infow(ctx, "multicast-flow-removed-from-device-successfully", log.Fields{"flow-id": flow.Id})
 				//Remove the Flow from FlowInfo
 				updatedFlows = append(updatedFlows[:i], updatedFlows[i+1:]...)
 				if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID), NoneOnuID, NoneUniID, flowID, &updatedFlows); err != nil {
-					logger.Errorw("failed-to-delete-multicast-flow-from-the-kv-store",
+					logger.Errorw(ctx, "failed-to-delete-multicast-flow-from-the-kv-store",
 						log.Fields{"flow": storedFlow,
 							"err": err})
 					return
 				}
 				//release flow id
-				logger.Debugw("releasing-multicast-flow-id",
+				logger.Debugw(ctx, "releasing-multicast-flow-id",
 					log.Fields{"flow-id": flowID,
 						"interfaceID": networkInterfaceID})
 				f.resourceMgr.FreeFlowID(ctx, uint32(networkInterfaceID), NoneOnuID, NoneUniID, flowID)
@@ -2382,7 +2380,7 @@
 
 //RemoveFlow removes the flow from the device
 func (f *OpenOltFlowMgr) RemoveFlow(ctx context.Context, flow *ofp.OfpFlowStats) error {
-	logger.Infow("removing-flow", log.Fields{"flow": *flow})
+	logger.Infow(ctx, "removing-flow", log.Fields{"flow": *flow})
 	var direction string
 	actionInfo := make(map[string]interface{})
 
@@ -2390,9 +2388,9 @@
 		if action.Type == flows.OUTPUT {
 			if out := action.GetOutput(); out != nil {
 				actionInfo[Output] = out.GetPort()
-				logger.Debugw("action-type-output", log.Fields{"out_port": actionInfo[Output].(uint32)})
+				logger.Debugw(ctx, "action-type-output", log.Fields{"out_port": actionInfo[Output].(uint32)})
 			} else {
-				logger.Error("invalid-output-port-in-action")
+				logger.Error(ctx, "invalid-output-port-in-action")
 				return olterrors.NewErrInvalidValue(log.Fields{"invalid-out-port-action": 0}, nil)
 			}
 		}
@@ -2408,7 +2406,7 @@
 		direction = Downstream
 	}
 
-	_, intfID, onuID, uniID, _, _, err := FlowExtractInfo(flow, direction)
+	_, intfID, onuID, uniID, _, _, err := FlowExtractInfo(ctx, flow, direction)
 	if err != nil {
 		return err
 	}
@@ -2421,7 +2419,7 @@
 		f.perUserFlowHandleLock.Unlock(userKey)
 	} else {
 		// Ideally this should never happen
-		logger.Errorw("failed-to-acquire-lock-to-remove-flow--remove-aborted", log.Fields{"flow": flow})
+		logger.Errorw(ctx, "failed-to-acquire-lock-to-remove-flow--remove-aborted", log.Fields{"flow": flow})
 		return errors.New("failed-to-acquire-per-user-lock")
 	}
 
@@ -2435,12 +2433,12 @@
 		select {
 		case <-time.After(20 * time.Millisecond):
 			if flowDelRefCnt, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok || flowDelRefCnt == 0 {
-				logger.Debug("pending-flow-deletes-completed")
+				logger.Debug(ctx, "pending-flow-deletes-completed")
 				ch <- true
 				return
 			}
 		case <-ctx.Done():
-			logger.Error("flow-delete-wait-handler-routine-canceled")
+			logger.Error(ctx, "flow-delete-wait-handler-routine-canceled")
 			return
 		}
 	}
@@ -2470,13 +2468,13 @@
 	var UsMeterID uint32
 	var DsMeterID uint32
 
-	logger.Infow("adding-flow",
+	logger.Infow(ctx, "adding-flow",
 		log.Fields{
 			"flow":         flow,
 			"flowmetadata": flowMetadata})
-	formulateClassifierInfoFromFlow(classifierInfo, flow)
+	formulateClassifierInfoFromFlow(ctx, classifierInfo, flow)
 
-	err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flow)
+	err := formulateActionInfoFromFlow(ctx, actionInfo, classifierInfo, flow)
 	if err != nil {
 		// Error logging is already done in the called function
 		// So just return in case of error
@@ -2489,13 +2487,13 @@
 	}
 
 	/* Controller bound trap flows */
-	err = formulateControllerBoundTrapFlowInfo(actionInfo, classifierInfo, flow)
+	err = formulateControllerBoundTrapFlowInfo(ctx, actionInfo, classifierInfo, flow)
 	if err != nil {
 		// error if any, already logged in the called function
 		return err
 	}
 
-	logger.Debugw("flow-ports",
+	logger.Debugw(ctx, "flow-ports",
 		log.Fields{
 			"classifierinfo_inport": classifierInfo[InPort],
 			"action_output":         actionInfo[Output]})
@@ -2503,7 +2501,7 @@
 
 	if ethType, ok := classifierInfo[EthType]; ok {
 		if ethType.(uint32) == LldpEthType {
-			logger.Info("adding-lldp-flow")
+			logger.Info(ctx, "adding-lldp-flow")
 			return f.addLLDPFlow(ctx, flow, portNo)
 		}
 	}
@@ -2511,21 +2509,21 @@
 		if ipProto.(uint32) == IPProtoDhcp {
 			if udpSrc, ok := classifierInfo[UDPSrc]; ok {
 				if udpSrc.(uint32) == uint32(67) || udpSrc.(uint32) == uint32(546) {
-					logger.Debug("trap-dhcp-from-nni-flow")
+					logger.Debug(ctx, "trap-dhcp-from-nni-flow")
 					return f.addDHCPTrapFlowOnNNI(ctx, flow, classifierInfo, portNo)
 				}
 			}
 		}
 	}
 	if isIgmpTrapDownstreamFlow(classifierInfo) {
-		logger.Debug("trap-igmp-from-nni-flow")
+		logger.Debug(ctx, "trap-igmp-from-nni-flow")
 		return f.addIgmpTrapFlowOnNNI(ctx, flow, classifierInfo, portNo)
 	}
 
-	f.deviceHandler.AddUniPortToOnu(intfID, onuID, portNo)
+	f.deviceHandler.AddUniPortToOnu(ctx, intfID, onuID, portNo)
 	f.resourceMgr.AddUniPortToOnuInfo(ctx, intfID, onuID, portNo)
 
-	TpID, err := getTpIDFromFlow(flow)
+	TpID, err := getTpIDFromFlow(ctx, flow)
 	if err != nil {
 		return olterrors.NewErrNotFound("tpid-for-flow",
 			log.Fields{
@@ -2534,7 +2532,7 @@
 				"onu-id":  onuID,
 				"uni-id":  uniID}, err)
 	}
-	logger.Debugw("tpid-for-this-subcriber",
+	logger.Debugw(ctx, "tpid-for-this-subcriber",
 		log.Fields{
 			"tp-id":   TpID,
 			"intf-id": intfID,
@@ -2542,16 +2540,16 @@
 			"uni-id":  uniID})
 	if IsUpstream(actionInfo[Output].(uint32)) {
 		UsMeterID = flows.GetMeterIdFromFlow(flow)
-		logger.Debugw("upstream-flow-meter-id", log.Fields{"us-meter-id": UsMeterID})
+		logger.Debugw(ctx, "upstream-flow-meter-id", log.Fields{"us-meter-id": UsMeterID})
 	} else {
 		DsMeterID = flows.GetMeterIdFromFlow(flow)
-		logger.Debugw("downstream-flow-meter-id", log.Fields{"ds-meter-id": DsMeterID})
+		logger.Debugw(ctx, "downstream-flow-meter-id", log.Fields{"ds-meter-id": DsMeterID})
 
 	}
 
 	pnFlDelKey := pendingFlowDeleteKey{intfID, onuID, uniID}
 	if _, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok {
-		logger.Debugw("no-pending-flows-found--going-ahead-with-flow-install",
+		logger.Debugw(ctx, "no-pending-flows-found--going-ahead-with-flow-install",
 			log.Fields{
 				"intf-id": intfID,
 				"onu-id":  onuID,
@@ -2562,7 +2560,7 @@
 		go f.waitForFlowDeletesToCompleteForOnu(ctx, intfID, onuID, uniID, pendingFlowDelComplete)
 		select {
 		case <-pendingFlowDelComplete:
-			logger.Debugw("all-pending-flow-deletes-completed",
+			logger.Debugw(ctx, "all-pending-flow-deletes-completed",
 				log.Fields{
 					"intf-id": intfID,
 					"onu-id":  onuID,
@@ -2583,7 +2581,7 @@
 // handleFlowWithGroup adds multicast flow to the device.
 func (f *OpenOltFlowMgr) handleFlowWithGroup(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
 	classifierInfo[PacketTagType] = DoubleTag
-	logger.Debugw("add-multicast-flow", log.Fields{
+	logger.Debugw(ctx, "add-multicast-flow", log.Fields{
 		"classifier-info": classifierInfo,
 		"actionInfo":      actionInfo})
 
@@ -2604,7 +2602,7 @@
 			multicastMac := flows.ConvertToMulticastMacBytes(ipv4Dst.(uint32))
 			delete(classifierInfo, Ipv4Dst)
 			classifierInfo[EthDst] = multicastMac
-			logger.Debugw("multicast-ip-to-mac-conversion-success",
+			logger.Debugw(ctx, "multicast-ip-to-mac-conversion-success",
 				log.Fields{
 					"ip:":  ipv4Dst.(uint32),
 					"mac:": multicastMac})
@@ -2616,9 +2614,9 @@
 	uniID := NoneUniID
 	gemPortID := NoneGemPortID
 
-	flowStoreCookie := getFlowStoreCookie(classifierInfo, uint32(0))
+	flowStoreCookie := getFlowStoreCookie(ctx, classifierInfo, uint32(0))
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Infow("multicast-flow-exists-not-re-adding", log.Fields{"classifier-info": classifierInfo})
+		logger.Infow(ctx, "multicast-flow-exists-not-re-adding", log.Fields{"classifier-info": classifierInfo})
 		return nil
 	}
 	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
@@ -2649,7 +2647,7 @@
 	if err = f.addFlowToDevice(ctx, flow, &multicastFlow); err != nil {
 		return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": multicastFlow}, err)
 	}
-	logger.Info("multicast-flow-added-to-device-successfully")
+	logger.Info(ctx, "multicast-flow-added-to-device-successfully")
 	//get cached group
 	group, _, err := f.GetFlowGroupFromKVStore(ctx, groupID, true)
 	if err == nil {
@@ -2675,7 +2673,7 @@
 //getNNIInterfaceIDOfMulticastFlow returns associated NNI interface id of the inPort criterion if exists; returns the first NNI interface of the device otherwise
 func (f *OpenOltFlowMgr) getNNIInterfaceIDOfMulticastFlow(ctx context.Context, classifierInfo map[string]interface{}) (uint32, error) {
 	if inPort, ok := classifierInfo[InPort]; ok {
-		nniInterfaceID, err := IntfIDFromNniPortNum(inPort.(uint32))
+		nniInterfaceID, err := IntfIDFromNniPortNum(ctx, inPort.(uint32))
 		if err != nil {
 			return 0, olterrors.NewErrInvalidValue(log.Fields{"nni-in-port-number": inPort}, err)
 		}
@@ -2691,7 +2689,7 @@
 
 // AddGroup add or update the group
 func (f *OpenOltFlowMgr) AddGroup(ctx context.Context, group *ofp.OfpGroupEntry) error {
-	logger.Infow("add-group", log.Fields{"group": group})
+	logger.Infow(ctx, "add-group", log.Fields{"group": group})
 	if group == nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"group": group}, nil)
 	}
@@ -2702,7 +2700,7 @@
 		Action:  f.buildGroupAction(),
 	}
 
-	logger.Debugw("sending-group-to-device", log.Fields{"groupToOlt": groupToOlt})
+	logger.Debugw(ctx, "sending-group-to-device", log.Fields{"groupToOlt": groupToOlt})
 	_, err := f.deviceHandler.Client.PerformGroupOperation(ctx, &groupToOlt)
 	if err != nil {
 		return olterrors.NewErrAdapter("add-group-operation-failed", log.Fields{"groupToOlt": groupToOlt}, err)
@@ -2711,7 +2709,7 @@
 	if err := f.resourceMgr.AddFlowGroupToKVStore(ctx, group, true); err != nil {
 		return olterrors.NewErrPersistence("add", "flow-group", group.Desc.GroupId, log.Fields{"group": group}, err)
 	}
-	logger.Infow("add-group-operation-performed-on-the-device-successfully ", log.Fields{"groupToOlt": groupToOlt})
+	logger.Infow(ctx, "add-group-operation-performed-on-the-device-successfully ", log.Fields{"groupToOlt": groupToOlt})
 	return nil
 }
 
@@ -2727,12 +2725,12 @@
 
 // ModifyGroup updates the group
 func (f *OpenOltFlowMgr) ModifyGroup(ctx context.Context, group *ofp.OfpGroupEntry) error {
-	logger.Infow("modify-group", log.Fields{"group": group})
+	logger.Infow(ctx, "modify-group", log.Fields{"group": group})
 	if group == nil || group.Desc == nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"group": group}, nil)
 	}
 
-	newGroup := f.buildGroup(group.Desc.GroupId, group.Desc.Buckets)
+	newGroup := f.buildGroup(ctx, group.Desc.GroupId, group.Desc.Buckets)
 	//get existing members of the group
 	val, groupExists, err := f.GetFlowGroupFromKVStore(ctx, group.Desc.GroupId, false)
 
@@ -2743,16 +2741,16 @@
 	var current *openoltpb2.Group // represents the group on the device
 	if groupExists {
 		// group already exists
-		current = f.buildGroup(group.Desc.GroupId, val.Desc.GetBuckets())
-		logger.Debugw("modify-group--group exists",
+		current = f.buildGroup(ctx, group.Desc.GroupId, val.Desc.GetBuckets())
+		logger.Debugw(ctx, "modify-group--group exists",
 			log.Fields{
 				"group on the device": val,
 				"new":                 group})
 	} else {
-		current = f.buildGroup(group.Desc.GroupId, nil)
+		current = f.buildGroup(ctx, group.Desc.GroupId, nil)
 	}
 
-	logger.Debugw("modify-group--comparing-current-and-new",
+	logger.Debugw(ctx, "modify-group--comparing-current-and-new",
 		log.Fields{
 			"group on the device": current,
 			"new":                 newGroup})
@@ -2761,7 +2759,7 @@
 	// get members to be removed
 	membersToBeRemoved := f.findDiff(newGroup, current)
 
-	logger.Infow("modify-group--differences found", log.Fields{
+	logger.Infow(ctx, "modify-group--differences found", log.Fields{
 		"membersToBeAdded":   membersToBeAdded,
 		"membersToBeRemoved": membersToBeRemoved,
 		"groupId":            group.Desc.GroupId})
@@ -2774,13 +2772,13 @@
 		groupToOlt.Command = openoltpb2.Group_ADD_MEMBERS
 		groupToOlt.Members = membersToBeAdded
 		//execute addMembers
-		errAdd = f.callGroupAddRemove(&groupToOlt)
+		errAdd = f.callGroupAddRemove(ctx, &groupToOlt)
 	}
 	if membersToBeRemoved != nil && len(membersToBeRemoved) > 0 {
 		groupToOlt.Command = openoltpb2.Group_REMOVE_MEMBERS
 		groupToOlt.Members = membersToBeRemoved
 		//execute removeMembers
-		errRemoved = f.callGroupAddRemove(&groupToOlt)
+		errRemoved = f.callGroupAddRemove(ctx, &groupToOlt)
 	}
 
 	//save the modified group
@@ -2788,12 +2786,12 @@
 		if err := f.resourceMgr.AddFlowGroupToKVStore(ctx, group, false); err != nil {
 			return olterrors.NewErrPersistence("add", "flow-group", group.Desc.GroupId, log.Fields{"group": group}, err)
 		}
-		logger.Infow("modify-group-was-success--storing-group",
+		logger.Infow(ctx, "modify-group-was-success--storing-group",
 			log.Fields{
 				"group":         group,
 				"existingGroup": current})
 	} else {
-		logger.Warnw("one-of-the-group-add/remove-operations-failed--cannot-save-group-modifications",
+		logger.Warnw(ctx, "one-of-the-group-add/remove-operations-failed--cannot-save-group-modifications",
 			log.Fields{"group": group})
 		if errAdd != nil {
 			return errAdd
@@ -2804,8 +2802,8 @@
 }
 
 //callGroupAddRemove performs add/remove buckets operation for the indicated group
-func (f *OpenOltFlowMgr) callGroupAddRemove(group *openoltpb2.Group) error {
-	if err := f.performGroupOperation(group); err != nil {
+func (f *OpenOltFlowMgr) callGroupAddRemove(ctx context.Context, group *openoltpb2.Group) error {
+	if err := f.performGroupOperation(ctx, group); err != nil {
 		st, _ := status.FromError(err)
 		//ignore already exists error code
 		if st.Code() != codes.AlreadyExists {
@@ -2838,8 +2836,8 @@
 }
 
 //performGroupOperation call performGroupOperation operation of openolt proto
-func (f *OpenOltFlowMgr) performGroupOperation(group *openoltpb2.Group) error {
-	logger.Debugw("sending-group-to-device",
+func (f *OpenOltFlowMgr) performGroupOperation(ctx context.Context, group *openoltpb2.Group) error {
+	logger.Debugw(ctx, "sending-group-to-device",
 		log.Fields{
 			"groupToOlt": group,
 			"command":    group.Command})
@@ -2851,13 +2849,13 @@
 }
 
 //buildGroup build openoltpb2.Group from given group id and bucket list
-func (f *OpenOltFlowMgr) buildGroup(groupID uint32, buckets []*ofp.OfpBucket) *openoltpb2.Group {
+func (f *OpenOltFlowMgr) buildGroup(ctx context.Context, groupID uint32, buckets []*ofp.OfpBucket) *openoltpb2.Group {
 	group := openoltpb2.Group{
 		GroupId: groupID}
 	// create members of the group
 	if buckets != nil {
 		for _, ofBucket := range buckets {
-			member := f.buildMember(ofBucket)
+			member := f.buildMember(ctx, ofBucket)
 			if member != nil && !f.contains(group.Members, member) {
 				group.Members = append(group.Members, member)
 			}
@@ -2867,7 +2865,7 @@
 }
 
 //buildMember builds openoltpb2.GroupMember from an OpenFlow bucket
-func (f *OpenOltFlowMgr) buildMember(ofBucket *ofp.OfpBucket) *openoltpb2.GroupMember {
+func (f *OpenOltFlowMgr) buildMember(ctx context.Context, ofBucket *ofp.OfpBucket) *openoltpb2.GroupMember {
 	var outPort uint32
 	outPortFound := false
 	for _, ofAction := range ofBucket.Actions {
@@ -2878,11 +2876,11 @@
 	}
 
 	if !outPortFound {
-		logger.Debugw("bucket-skipped-since-no-out-port-found-in-it", log.Fields{"ofBucket": ofBucket})
+		logger.Debugw(ctx, "bucket-skipped-since-no-out-port-found-in-it", log.Fields{"ofBucket": ofBucket})
 		return nil
 	}
 	interfaceID := IntfIDFromUniPortNum(outPort)
-	logger.Debugw("got-associated-interface-id-of-the-port",
+	logger.Debugw(ctx, "got-associated-interface-id-of-the-port",
 		log.Fields{
 			"portNumber:":  outPort,
 			"interfaceId:": interfaceID})
@@ -2896,27 +2894,27 @@
 		//add member to the group
 		return &member
 	}
-	logger.Warnf("bucket-skipped-since-interface-2-gem-mapping-cannot-be-found", log.Fields{"ofBucket": ofBucket})
+	logger.Warnf(ctx, "bucket-skipped-since-interface-2-gem-mapping-cannot-be-found", log.Fields{"ofBucket": ofBucket})
 	return nil
 }
 
 //sendTPDownloadMsgToChild send payload
-func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32) error {
+func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32) error {
 
-	onuDev, err := f.getOnuDevice(intfID, onuID)
+	onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
 	if err != nil {
-		logger.Errorw("couldnt-find-onu-child-device",
+		logger.Errorw(ctx, "couldnt-find-onu-child-device",
 			log.Fields{
 				"intf-id": intfID,
 				"onu-id":  onuID,
 				"uni-id":  uniID})
 		return err
 	}
-	logger.Debugw("got-child-device-from-olt-device-handler", log.Fields{"onu-id": onuDev.deviceID})
+	logger.Debugw(ctx, "got-child-device-from-olt-device-handler", log.Fields{"onu-id": onuDev.deviceID})
 
-	tpPath := f.getTPpath(intfID, uni, TpID)
+	tpPath := f.getTPpath(ctx, intfID, uni, TpID)
 	tpDownloadMsg := &ic.InterAdapterTechProfileDownloadMessage{UniId: uniID, Path: tpPath}
-	logger.Debugw("sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
+	logger.Debugw(ctx, "sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
 	sendErr := f.deviceHandler.AdapterProxy.SendInterAdapterMessage(context.Background(),
 		tpDownloadMsg,
 		ic.InterAdapterMessageType_TECH_PROFILE_DOWNLOAD_REQUEST,
@@ -2932,7 +2930,7 @@
 				"onu-id":        onuDev.deviceID,
 				"proxyDeviceID": onuDev.proxyDeviceID}, sendErr)
 	}
-	logger.Infow("success-sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
+	logger.Infow(ctx, "success-sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
 	return nil
 }
 
@@ -2947,7 +2945,7 @@
 	if err := f.resourceMgr.AddOnuGemInfo(ctx, intfID, onu); err != nil {
 		return err
 	}
-	logger.Infow("updated-onuinfo",
+	logger.Infow(ctx, "updated-onuinfo",
 		log.Fields{
 			"intf-id":    intfID,
 			"onu-id":     onuID,
@@ -2963,7 +2961,7 @@
 	f.onuGemInfoLock[intfID].Lock()
 	defer f.onuGemInfoLock[intfID].Unlock()
 
-	logger.Infow("adding-gem-to-onu-info-map",
+	logger.Infow(ctx, "adding-gem-to-onu-info-map",
 		log.Fields{
 			"gem":       gemPort,
 			"intf":      intfID,
@@ -2977,7 +2975,7 @@
 			// check if gem already exists , else update the cache and kvstore
 			for _, gem := range onu.GemPorts {
 				if gem == gemPort {
-					logger.Debugw("gem-already-in-cache-no-need-to-update-cache-and-kv-store",
+					logger.Debugw(ctx, "gem-already-in-cache-no-need-to-update-cache-and-kv-store",
 						log.Fields{
 							"gem":       gemPort,
 							"device-id": f.deviceHandler.device.Id})
@@ -2990,7 +2988,7 @@
 	}
 	err := f.resourceMgr.AddGemToOnuGemInfo(ctx, intfID, onuID, gemPort)
 	if err != nil {
-		logger.Errorw("failed-to-add-gem-to-onu",
+		logger.Errorw(ctx, "failed-to-add-gem-to-onu",
 			log.Fields{
 				"intf-id":   intfID,
 				"onu-id":    onuID,
@@ -2998,7 +2996,7 @@
 				"device-id": f.deviceHandler.device.Id})
 		return
 	}
-	logger.Infow("gem-added-to-onu-info-map",
+	logger.Infow(ctx, "gem-added-to-onu-info-map",
 		log.Fields{
 			"gem":       gemPort,
 			"intf":      intfID,
@@ -3010,12 +3008,12 @@
 // This function Lookup maps  by serialNumber or (intfId, gemPort)
 
 //getOnuIDfromGemPortMap Returns OnuID,nil if found or set 0,error if no onuId is found for serialNumber or (intfId, gemPort)
-func (f *OpenOltFlowMgr) getOnuIDfromGemPortMap(intfID uint32, gemPortID uint32) (uint32, error) {
+func (f *OpenOltFlowMgr) getOnuIDfromGemPortMap(ctx context.Context, intfID uint32, gemPortID uint32) (uint32, error) {
 
 	f.onuGemInfoLock[intfID].Lock()
 	defer f.onuGemInfoLock[intfID].Unlock()
 
-	logger.Infow("getting-onu-id-from-gem-port-and-pon-port",
+	logger.Infow(ctx, "getting-onu-id-from-gem-port-and-pon-port",
 		log.Fields{
 			"device-id":   f.deviceHandler.device.Id,
 			"onu-geminfo": f.onuGemInfo[intfID],
@@ -3046,7 +3044,7 @@
 
 	if packetIn.IntfType == "pon" {
 		// packet indication does not have serial number , so sending as nil
-		if onuID, err = f.getOnuIDfromGemPortMap(packetIn.IntfId, packetIn.GemportId); err != nil {
+		if onuID, err = f.getOnuIDfromGemPortMap(ctx, packetIn.IntfId, packetIn.GemportId); err != nil {
 			// Called method is returning error with all data populated; just return the same
 			return logicalPortNum, err
 		}
@@ -3054,14 +3052,14 @@
 			logicalPortNum = packetIn.PortNo
 		} else {
 			uniID := uint32(0) //  FIXME - multi-uni support
-			logicalPortNum = MkUniPortNum(packetIn.IntfId, onuID, uniID)
+			logicalPortNum = MkUniPortNum(ctx, packetIn.IntfId, onuID, uniID)
 		}
 		// Store the gem port through which the packet_in came. Use the same gem port for packet_out
 		f.UpdateGemPortForPktIn(ctx, packetIn.IntfId, onuID, logicalPortNum, packetIn.GemportId)
 	} else if packetIn.IntfType == "nni" {
 		logicalPortNum = IntfIDToPortNo(packetIn.IntfId, voltha.Port_ETHERNET_NNI)
 	}
-	logger.Infow("retrieved-logicalport-from-packet-in",
+	logger.Infow(ctx, "retrieved-logicalport-from-packet-in",
 		log.Fields{
 			"logical-port-num": logicalPortNum,
 			"intf-type":        packetIn.IntfType,
@@ -3082,7 +3080,7 @@
 
 	gemPortID, ok := f.packetInGemPort[pktInkey]
 	if ok {
-		logger.Debugw("found-gemport-for-pktin-key",
+		logger.Debugw(ctx, "found-gemport-for-pktin-key",
 			log.Fields{
 				"pktinkey": pktInkey,
 				"gem":      gemPortID})
@@ -3093,7 +3091,7 @@
 	if err == nil {
 		if gemPortID != 0 {
 			f.packetInGemPort[pktInkey] = gemPortID
-			logger.Infow("found-gem-port-from-kv-store-and-updating-cache-with-gemport",
+			logger.Infow(ctx, "found-gem-port-from-kv-store-and-updating-cache-with-gemport",
 				log.Fields{
 					"pktinkey": pktInkey,
 					"gem":      gemPortID})
@@ -3124,7 +3122,7 @@
 	direction string,
 	tpID uint32,
 	vlanID ...uint32) {
-	logger.Debugw("installing-flow-on-all-gem-ports",
+	logger.Debugw(ctx, "installing-flow-on-all-gem-ports",
 		log.Fields{
 			"FlowType": FlowType,
 			"gemPorts": gemPorts,
@@ -3222,7 +3220,7 @@
 			}
 		}
 	default:
-		logger.Errorw("unknown-tech", log.Fields{"tpInst": TpInst})
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tpInst": TpInst})
 	}
 }
 
@@ -3236,7 +3234,7 @@
 }
 
 func (f *OpenOltFlowMgr) addDHCPTrapFlowOnNNI(ctx context.Context, logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) error {
-	logger.Debug("adding-trap-dhcp-of-nni-flow")
+	logger.Debug(ctx, "adding-trap-dhcp-of-nni-flow")
 	action := make(map[string]interface{})
 	classifier[PacketTagType] = DoubleTag
 	action[TrapToHost] = true
@@ -3255,7 +3253,7 @@
 	uniID := -1
 	gemPortID := -1
 	allocID := -1
-	networkInterfaceID, err := getNniIntfID(classifier, action)
+	networkInterfaceID, err := getNniIntfID(ctx, classifier, action)
 	if err != nil {
 		return olterrors.NewErrNotFound("nni-intreface-id",
 			log.Fields{
@@ -3264,9 +3262,9 @@
 			err)
 	}
 
-	flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+	flowStoreCookie := getFlowStoreCookie(ctx, classifier, uint32(0))
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Info("flow-exists-not-re-adding")
+		logger.Info(ctx, "flow-exists-not-re-adding")
 		return nil
 	}
 	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
@@ -3284,12 +3282,12 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier}, err)
 	}
-	logger.Debugw("created-classifier-proto", log.Fields{"classifier": *classifierProto})
+	logger.Debugw(ctx, "created-classifier-proto", log.Fields{"classifier": *classifierProto})
 	actionProto, err := makeOpenOltActionField(action, classifier)
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"action": action}, err)
 	}
-	logger.Debugw("created-action-proto", log.Fields{"action": *actionProto})
+	logger.Debugw(ctx, "created-action-proto", log.Fields{"action": *actionProto})
 	downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
 		OnuId:         int32(onuID), // OnuId not required
 		UniId:         int32(uniID), // UniId not used
@@ -3306,7 +3304,7 @@
 	if err := f.addFlowToDevice(ctx, logicalFlow, &downstreamflow); err != nil {
 		return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err)
 	}
-	logger.Info("dhcp-trap-on-nni-flow-added–to-device-successfully")
+	logger.Info(ctx, "dhcp-trap-on-nni-flow-added–to-device-successfully")
 	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, logicalFlow.Id)
 	if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
 		int32(onuID),
@@ -3345,7 +3343,7 @@
 
 //addIgmpTrapFlowOnNNI adds a trap-to-host flow on NNI
 func (f *OpenOltFlowMgr) addIgmpTrapFlowOnNNI(ctx context.Context, logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) error {
-	logger.Infow("adding-igmp-trap-of-nni-flow", log.Fields{"classifier-info": classifier})
+	logger.Infow(ctx, "adding-igmp-trap-of-nni-flow", log.Fields{"classifier-info": classifier})
 	action := make(map[string]interface{})
 	classifier[PacketTagType] = getPacketTypeFromClassifiers(classifier)
 	action[TrapToHost] = true
@@ -3364,16 +3362,16 @@
 	uniID := -1
 	gemPortID := -1
 	allocID := -1
-	networkInterfaceID, err := getNniIntfID(classifier, action)
+	networkInterfaceID, err := getNniIntfID(ctx, classifier, action)
 	if err != nil {
 		return olterrors.NewErrNotFound("nni-interface-id", log.Fields{
 			"classifier": classifier,
 			"action":     action},
 			err)
 	}
-	flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+	flowStoreCookie := getFlowStoreCookie(ctx, classifier, uint32(0))
 	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
-		logger.Info("igmp-flow-exists-not-re-adding")
+		logger.Info(ctx, "igmp-flow-exists-not-re-adding")
 		return nil
 	}
 	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
@@ -3391,12 +3389,12 @@
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier}, err)
 	}
-	logger.Debugw("created-classifier-proto-for-the-igmp-flow", log.Fields{"classifier": *classifierProto})
+	logger.Debugw(ctx, "created-classifier-proto-for-the-igmp-flow", log.Fields{"classifier": *classifierProto})
 	actionProto, err := makeOpenOltActionField(action, classifier)
 	if err != nil {
 		return olterrors.NewErrInvalidValue(log.Fields{"action": action}, err)
 	}
-	logger.Debugw("created-action-proto-for-the-igmp-flow", log.Fields{"action": *actionProto})
+	logger.Debugw(ctx, "created-action-proto-for-the-igmp-flow", log.Fields{"action": *actionProto})
 	downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
 		OnuId:         int32(onuID), // OnuId not required
 		UniId:         int32(uniID), // UniId not used
@@ -3413,7 +3411,7 @@
 	if err := f.addFlowToDevice(ctx, logicalFlow, &downstreamflow); err != nil {
 		return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err)
 	}
-	logger.Info("igmp-trap-on-nni-flow-added-to-device-successfully")
+	logger.Info(ctx, "igmp-trap-on-nni-flow-added-to-device-successfully")
 	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, logicalFlow.Id)
 	if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
 		int32(onuID),
@@ -3447,7 +3445,7 @@
 	allocID := args[AllocID]
 	if ipProto, ok := classifierInfo[IPProto]; ok {
 		if ipProto.(uint32) == IPProtoDhcp {
-			logger.Infow("adding-dhcp-flow", log.Fields{
+			logger.Infow(ctx, "adding-dhcp-flow", log.Fields{
 				"tp-id":    tpID,
 				"alloc-id": allocID,
 				"intf-id":  intfID,
@@ -3455,7 +3453,7 @@
 				"uni-id":   uniID,
 			})
 			if pcp, ok := classifierInfo[VlanPcp]; ok {
-				gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+				gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
 					tp_pb.Direction_UPSTREAM,
 					pcp.(uint32))
 				//Adding DHCP upstream flow
@@ -3467,14 +3465,14 @@
 			}
 
 		} else if ipProto.(uint32) == IgmpProto {
-			logger.Infow("adding-us-igmp-flow",
+			logger.Infow(ctx, "adding-us-igmp-flow",
 				log.Fields{
 					"intf-id":          intfID,
 					"onu-id":           onuID,
 					"uni-id":           uniID,
 					"classifier-info:": classifierInfo})
 			if pcp, ok := classifierInfo[VlanPcp]; ok {
-				gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+				gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
 					tp_pb.Direction_UPSTREAM,
 					pcp.(uint32))
 				f.addIGMPTrapFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort, tpID)
@@ -3483,12 +3481,12 @@
 				installFlowOnAllGemports(ctx, f.addIGMPTrapFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, IgmpFlow, Upstream, tpID)
 			}
 		} else {
-			logger.Errorw("invalid-classifier-to-handle", log.Fields{"classifier": classifierInfo, "action": actionInfo})
+			logger.Errorw(ctx, "invalid-classifier-to-handle", log.Fields{"classifier": classifierInfo, "action": actionInfo})
 			return
 		}
 	} else if ethType, ok := classifierInfo[EthType]; ok {
 		if ethType.(uint32) == EapEthType {
-			logger.Infow("adding-eapol-flow", log.Fields{
+			logger.Infow(ctx, "adding-eapol-flow", log.Fields{
 				"intf-id": intfID,
 				"onu-id":  onuID,
 				"uni-id":  uniID,
@@ -3500,7 +3498,7 @@
 				vlanID = DefaultMgmtVlan
 			}
 			if pcp, ok := classifierInfo[VlanPcp]; ok {
-				gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+				gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
 					tp_pb.Direction_UPSTREAM,
 					pcp.(uint32))
 
@@ -3510,13 +3508,13 @@
 			}
 		}
 	} else if _, ok := actionInfo[PushVlan]; ok {
-		logger.Infow("adding-upstream-data-rule", log.Fields{
+		logger.Infow(ctx, "adding-upstream-data-rule", log.Fields{
 			"intf-id": intfID,
 			"onu-id":  onuID,
 			"uni-id":  uniID,
 		})
 		if pcp, ok := classifierInfo[VlanPcp]; ok {
-			gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+			gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
 				tp_pb.Direction_UPSTREAM,
 				pcp.(uint32))
 			//Adding HSIA upstream flow
@@ -3526,13 +3524,13 @@
 			installFlowOnAllGemports(ctx, f.addUpstreamDataFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, HsiaFlow, Upstream, tpID)
 		}
 	} else if _, ok := actionInfo[PopVlan]; ok {
-		logger.Infow("adding-downstream-data-rule", log.Fields{
+		logger.Infow(ctx, "adding-downstream-data-rule", log.Fields{
 			"intf-id": intfID,
 			"onu-id":  onuID,
 			"uni-id":  uniID,
 		})
 		if pcp, ok := classifierInfo[VlanPcp]; ok {
-			gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+			gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
 				tp_pb.Direction_DOWNSTREAM,
 				pcp.(uint32))
 			//Adding HSIA downstream flow
@@ -3542,7 +3540,7 @@
 			installFlowOnAllGemports(ctx, f.addDownstreamDataFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, HsiaFlow, Downstream, tpID)
 		}
 	} else {
-		logger.Errorw("invalid-flow-type-to-handle",
+		logger.Errorw(ctx, "invalid-flow-type-to-handle",
 			log.Fields{
 				"intf-id":    intfID,
 				"onu-id":     onuID,
@@ -3553,7 +3551,7 @@
 		return
 	}
 	// Send Techprofile download event to child device in go routine as it takes time
-	go f.sendTPDownloadMsgToChild(intfID, onuID, uniID, uni, tpID)
+	go f.sendTPDownloadMsgToChild(ctx, intfID, onuID, uniID, uni, tpID)
 }
 
 func (f *OpenOltFlowMgr) isGemPortUsedByAnotherFlow(gemPK gemPortKey) bool {
@@ -3584,87 +3582,87 @@
 		// So, we need to check and make sure that no other gem port is referring to the given TP ID
 		// on any other uni port.
 		tpInstances := f.techprofile[ponIntf].FindAllTpInstances(ctx, tpID, ponIntf, onuID).([]tp.TechProfile)
-		logger.Debugw("got-single-instance-tp-instances", log.Fields{"tp-instances": tpInstances})
+		logger.Debugw(ctx, "got-single-instance-tp-instances", log.Fields{"tp-instances": tpInstances})
 		for i := 0; i < len(tpInstances); i++ {
 			tpI := tpInstances[i]
 			tpGemPorts := tpI.UpstreamGemPortAttributeList
 			for _, tpGemPort := range tpGemPorts {
 				if tpGemPort.GemportID != gemPortID {
-					logger.Debugw("single-instance-tp-is-in-use-by-gem", log.Fields{"gemPort": tpGemPort.GemportID})
+					logger.Debugw(ctx, "single-instance-tp-is-in-use-by-gem", log.Fields{"gemPort": tpGemPort.GemportID})
 					return true, tpGemPort.GemportID
 				}
 			}
 		}
 	}
-	logger.Debug("tech-profile-is-not-in-use-by-any-gem")
+	logger.Debug(ctx, "tech-profile-is-not-in-use-by-any-gem")
 	return false, 0
 }
 
-func formulateClassifierInfoFromFlow(classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) {
+func formulateClassifierInfoFromFlow(ctx context.Context, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) {
 	for _, field := range flows.GetOfbFields(flow) {
 		if field.Type == flows.ETH_TYPE {
 			classifierInfo[EthType] = field.GetEthType()
-			logger.Debug("field-type-eth-type", log.Fields{"classifierInfo[ETH_TYPE]": classifierInfo[EthType].(uint32)})
+			logger.Debug(ctx, "field-type-eth-type", log.Fields{"classifierInfo[ETH_TYPE]": classifierInfo[EthType].(uint32)})
 		} else if field.Type == flows.ETH_DST {
 			classifierInfo[EthDst] = field.GetEthDst()
-			logger.Debug("field-type-eth-type", log.Fields{"classifierInfo[ETH_DST]": classifierInfo[EthDst].([]uint8)})
+			logger.Debug(ctx, "field-type-eth-type", log.Fields{"classifierInfo[ETH_DST]": classifierInfo[EthDst].([]uint8)})
 		} else if field.Type == flows.IP_PROTO {
 			classifierInfo[IPProto] = field.GetIpProto()
-			logger.Debug("field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
+			logger.Debug(ctx, "field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
 		} else if field.Type == flows.IN_PORT {
 			classifierInfo[InPort] = field.GetPort()
-			logger.Debug("field-type-in-port", log.Fields{"classifierInfo[IN_PORT]": classifierInfo[InPort].(uint32)})
+			logger.Debug(ctx, "field-type-in-port", log.Fields{"classifierInfo[IN_PORT]": classifierInfo[InPort].(uint32)})
 		} else if field.Type == flows.VLAN_VID {
 			classifierInfo[VlanVid] = field.GetVlanVid() & 0xfff
-			logger.Debug("field-type-vlan-vid", log.Fields{"classifierInfo[VLAN_VID]": classifierInfo[VlanVid].(uint32)})
+			logger.Debug(ctx, "field-type-vlan-vid", log.Fields{"classifierInfo[VLAN_VID]": classifierInfo[VlanVid].(uint32)})
 		} else if field.Type == flows.VLAN_PCP {
 			classifierInfo[VlanPcp] = field.GetVlanPcp()
-			logger.Debug("field-type-vlan-pcp", log.Fields{"classifierInfo[VLAN_PCP]": classifierInfo[VlanPcp].(uint32)})
+			logger.Debug(ctx, "field-type-vlan-pcp", log.Fields{"classifierInfo[VLAN_PCP]": classifierInfo[VlanPcp].(uint32)})
 		} else if field.Type == flows.UDP_DST {
 			classifierInfo[UDPDst] = field.GetUdpDst()
-			logger.Debug("field-type-udp-dst", log.Fields{"classifierInfo[UDP_DST]": classifierInfo[UDPDst].(uint32)})
+			logger.Debug(ctx, "field-type-udp-dst", log.Fields{"classifierInfo[UDP_DST]": classifierInfo[UDPDst].(uint32)})
 		} else if field.Type == flows.UDP_SRC {
 			classifierInfo[UDPSrc] = field.GetUdpSrc()
-			logger.Debug("field-type-udp-src", log.Fields{"classifierInfo[UDP_SRC]": classifierInfo[UDPSrc].(uint32)})
+			logger.Debug(ctx, "field-type-udp-src", log.Fields{"classifierInfo[UDP_SRC]": classifierInfo[UDPSrc].(uint32)})
 		} else if field.Type == flows.IPV4_DST {
 			classifierInfo[Ipv4Dst] = field.GetIpv4Dst()
-			logger.Debug("field-type-ipv4-dst", log.Fields{"classifierInfo[IPV4_DST]": classifierInfo[Ipv4Dst].(uint32)})
+			logger.Debug(ctx, "field-type-ipv4-dst", log.Fields{"classifierInfo[IPV4_DST]": classifierInfo[Ipv4Dst].(uint32)})
 		} else if field.Type == flows.IPV4_SRC {
 			classifierInfo[Ipv4Src] = field.GetIpv4Src()
-			logger.Debug("field-type-ipv4-src", log.Fields{"classifierInfo[IPV4_SRC]": classifierInfo[Ipv4Src].(uint32)})
+			logger.Debug(ctx, "field-type-ipv4-src", log.Fields{"classifierInfo[IPV4_SRC]": classifierInfo[Ipv4Src].(uint32)})
 		} else if field.Type == flows.METADATA {
 			classifierInfo[Metadata] = field.GetTableMetadata()
-			logger.Debug("field-type-metadata", log.Fields{"classifierInfo[Metadata]": classifierInfo[Metadata].(uint64)})
+			logger.Debug(ctx, "field-type-metadata", log.Fields{"classifierInfo[Metadata]": classifierInfo[Metadata].(uint64)})
 		} else if field.Type == flows.TUNNEL_ID {
 			classifierInfo[TunnelID] = field.GetTunnelId()
-			logger.Debug("field-type-tunnelId", log.Fields{"classifierInfo[TUNNEL_ID]": classifierInfo[TunnelID].(uint64)})
+			logger.Debug(ctx, "field-type-tunnelId", log.Fields{"classifierInfo[TUNNEL_ID]": classifierInfo[TunnelID].(uint64)})
 		} else {
-			logger.Errorw("un-supported-field-type", log.Fields{"type": field.Type})
+			logger.Errorw(ctx, "un-supported-field-type", log.Fields{"type": field.Type})
 			return
 		}
 	}
 }
 
-func formulateActionInfoFromFlow(actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+func formulateActionInfoFromFlow(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
 	for _, action := range flows.GetActions(flow) {
 		if action.Type == flows.OUTPUT {
 			if out := action.GetOutput(); out != nil {
 				actionInfo[Output] = out.GetPort()
-				logger.Debugw("action-type-output", log.Fields{"out-port": actionInfo[Output].(uint32)})
+				logger.Debugw(ctx, "action-type-output", log.Fields{"out-port": actionInfo[Output].(uint32)})
 			} else {
 				return olterrors.NewErrInvalidValue(log.Fields{"output-port": nil}, nil)
 			}
 		} else if action.Type == flows.POP_VLAN {
 			actionInfo[PopVlan] = true
-			logger.Debugw("action-type-pop-vlan", log.Fields{"in_port": classifierInfo[InPort].(uint32)})
+			logger.Debugw(ctx, "action-type-pop-vlan", log.Fields{"in_port": classifierInfo[InPort].(uint32)})
 		} else if action.Type == flows.PUSH_VLAN {
 			if out := action.GetPush(); out != nil {
 				if tpid := out.GetEthertype(); tpid != 0x8100 {
-					logger.Errorw("invalid ethertype in push action", log.Fields{"ethertype": actionInfo[PushVlan].(int32)})
+					logger.Errorw(ctx, "invalid ethertype in push action", log.Fields{"ethertype": actionInfo[PushVlan].(int32)})
 				} else {
 					actionInfo[PushVlan] = true
 					actionInfo[TPID] = tpid
-					logger.Debugw("action-type-push-vlan",
+					logger.Debugw(ctx, "action-type-push-vlan",
 						log.Fields{
 							"push-tpid": actionInfo[TPID].(uint32),
 							"in-port":   classifierInfo[InPort].(uint32)})
@@ -3676,12 +3674,12 @@
 					if ofClass := field.GetOxmClass(); ofClass != ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
 						return olterrors.NewErrInvalidValue(log.Fields{"openflow-class": ofClass}, nil)
 					}
-					/*logger.Debugw("action-type-set-field",log.Fields{"field": field, "in_port": classifierInfo[IN_PORT].(uint32)})*/
-					formulateSetFieldActionInfoFromFlow(field, actionInfo)
+					/*logger.Debugw(ctx, "action-type-set-field",log.Fields{"field": field, "in_port": classifierInfo[IN_PORT].(uint32)})*/
+					formulateSetFieldActionInfoFromFlow(ctx, field, actionInfo)
 				}
 			}
 		} else if action.Type == flows.GROUP {
-			formulateGroupActionInfoFromFlow(action, actionInfo)
+			formulateGroupActionInfoFromFlow(ctx, action, actionInfo)
 		} else {
 			return olterrors.NewErrInvalidValue(log.Fields{"action-type": action.Type}, nil)
 		}
@@ -3689,43 +3687,43 @@
 	return nil
 }
 
-func formulateSetFieldActionInfoFromFlow(field *ofp.OfpOxmField, actionInfo map[string]interface{}) {
+func formulateSetFieldActionInfoFromFlow(ctx context.Context, field *ofp.OfpOxmField, actionInfo map[string]interface{}) {
 	if ofbField := field.GetOfbField(); ofbField != nil {
 		fieldtype := ofbField.GetType()
 		if fieldtype == ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID {
 			if vlan := ofbField.GetVlanVid(); vlan != 0 {
 				actionInfo[VlanVid] = vlan & 0xfff
-				logger.Debugw("action-set-vlan-vid", log.Fields{"actionInfo[VLAN_VID]": actionInfo[VlanVid].(uint32)})
+				logger.Debugw(ctx, "action-set-vlan-vid", log.Fields{"actionInfo[VLAN_VID]": actionInfo[VlanVid].(uint32)})
 			} else {
-				logger.Error("no-invalid-vlan-id-in-set-vlan-vid-action")
+				logger.Error(ctx, "no-invalid-vlan-id-in-set-vlan-vid-action")
 			}
 		} else if fieldtype == ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP {
 			pcp := ofbField.GetVlanPcp()
 			actionInfo[VlanPcp] = pcp
 			log.Debugw("action-set-vlan-pcp", log.Fields{"actionInfo[VLAN_PCP]": actionInfo[VlanPcp].(uint32)})
 		} else {
-			logger.Errorw("unsupported-action-set-field-type", log.Fields{"type": fieldtype})
+			logger.Errorw(ctx, "unsupported-action-set-field-type", log.Fields{"type": fieldtype})
 		}
 	}
 }
 
-func formulateGroupActionInfoFromFlow(action *ofp.OfpAction, actionInfo map[string]interface{}) {
+func formulateGroupActionInfoFromFlow(ctx context.Context, action *ofp.OfpAction, actionInfo map[string]interface{}) {
 	if action.GetGroup() == nil {
-		logger.Warn("no-group-entry-found-in-the-group-action")
+		logger.Warn(ctx, "no-group-entry-found-in-the-group-action")
 	} else {
 		actionInfo[GroupID] = action.GetGroup().GroupId
-		logger.Debugw("action-group-id", log.Fields{"actionInfo[GroupID]": actionInfo[GroupID].(uint32)})
+		logger.Debugw(ctx, "action-group-id", log.Fields{"actionInfo[GroupID]": actionInfo[GroupID].(uint32)})
 	}
 }
 
-func formulateControllerBoundTrapFlowInfo(actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+func formulateControllerBoundTrapFlowInfo(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
 	if isControllerFlow := IsControllerBoundFlow(actionInfo[Output].(uint32)); isControllerFlow {
-		logger.Debug("controller-bound-trap-flows--getting-inport-from-tunnelid")
+		logger.Debug(ctx, "controller-bound-trap-flows--getting-inport-from-tunnelid")
 		/* Get UNI port/ IN Port from tunnel ID field for upstream controller bound flows  */
 		if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_PON_OLT {
 			if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
 				classifierInfo[InPort] = uniPort
-				logger.Debugw("upstream-pon-to-controller-flow--inport-in-tunnelid",
+				logger.Debugw(ctx, "upstream-pon-to-controller-flow--inport-in-tunnelid",
 					log.Fields{
 						"newinport": classifierInfo[InPort].(uint32),
 						"outport":   actionInfo[Output].(uint32)})
@@ -3737,12 +3735,12 @@
 			}
 		}
 	} else {
-		logger.Debug("non-controller-flows--getting-uniport-from-tunnelid")
+		logger.Debug(ctx, "non-controller-flows--getting-uniport-from-tunnelid")
 		// Downstream flow from NNI to PON port , Use tunnel ID as new OUT port / UNI port
 		if portType := IntfIDToPortTypeName(actionInfo[Output].(uint32)); portType == voltha.Port_PON_OLT {
 			if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
 				actionInfo[Output] = uniPort
-				logger.Debugw("downstream-nni-to-pon-port-flow, outport-in-tunnelid",
+				logger.Debugw(ctx, "downstream-nni-to-pon-port-flow, outport-in-tunnelid",
 					log.Fields{
 						"newoutport": actionInfo[Output].(uint32),
 						"outport":    actionInfo[Output].(uint32)})
@@ -3756,7 +3754,7 @@
 		} else if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_PON_OLT {
 			if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
 				classifierInfo[InPort] = uniPort
-				logger.Debugw("upstream-pon-to-nni-port-flow, inport-in-tunnelid",
+				logger.Debugw(ctx, "upstream-pon-to-nni-port-flow, inport-in-tunnelid",
 					log.Fields{
 						"newinport": actionInfo[Output].(uint32),
 						"outport":   actionInfo[Output].(uint32)})
@@ -3773,7 +3771,7 @@
 	return nil
 }
 
-func getTpIDFromFlow(flow *ofp.OfpFlowStats) (uint32, error) {
+func getTpIDFromFlow(ctx context.Context, flow *ofp.OfpFlowStats) (uint32, error) {
 	/*     Metadata 8 bytes:
 		   Most Significant 2 Bytes = Inner VLAN
 		   Next 2 Bytes = Tech Profile ID(TPID)
@@ -3781,11 +3779,11 @@
 	       Flow Metadata carries Tech-Profile (TP) ID and is mandatory in all
 	       subscriber related flows.
 	*/
-	metadata := flows.GetMetadataFromWriteMetadataAction(flow)
+	metadata := flows.GetMetadataFromWriteMetadataAction(ctx, flow)
 	if metadata == 0 {
 		return 0, olterrors.NewErrNotFound("metadata", log.Fields{"flow": flow}, nil)
 	}
-	TpID := flows.GetTechProfileIDFromWriteMetaData(metadata)
+	TpID := flows.GetTechProfileIDFromWriteMetaData(ctx, metadata)
 	return uint32(TpID), nil
 }
 
@@ -3799,30 +3797,30 @@
 }
 
 // getNniIntfID gets nni intf id from the flow classifier/action
-func getNniIntfID(classifier map[string]interface{}, action map[string]interface{}) (uint32, error) {
+func getNniIntfID(ctx context.Context, classifier map[string]interface{}, action map[string]interface{}) (uint32, error) {
 
 	portType := IntfIDToPortTypeName(classifier[InPort].(uint32))
 	if portType == voltha.Port_PON_OLT {
-		intfID, err := IntfIDFromNniPortNum(action[Output].(uint32))
+		intfID, err := IntfIDFromNniPortNum(ctx, action[Output].(uint32))
 		if err != nil {
-			logger.Debugw("invalid-action-port-number",
+			logger.Debugw(ctx, "invalid-action-port-number",
 				log.Fields{
 					"port-number": action[Output].(uint32),
 					"error":       err})
 			return uint32(0), err
 		}
-		logger.Infow("output-nni-intfId-is", log.Fields{"intf-id": intfID})
+		logger.Infow(ctx, "output-nni-intfId-is", log.Fields{"intf-id": intfID})
 		return intfID, nil
 	} else if portType == voltha.Port_ETHERNET_NNI {
-		intfID, err := IntfIDFromNniPortNum(classifier[InPort].(uint32))
+		intfID, err := IntfIDFromNniPortNum(ctx, classifier[InPort].(uint32))
 		if err != nil {
-			logger.Debugw("invalid-classifier-port-number",
+			logger.Debugw(ctx, "invalid-classifier-port-number",
 				log.Fields{
 					"port-number": action[Output].(uint32),
 					"error":       err})
 			return uint32(0), err
 		}
-		logger.Infow("input-nni-intfId-is", log.Fields{"intf-id": intfID})
+		logger.Infow(ctx, "input-nni-intfId-is", log.Fields{"intf-id": intfID})
 		return intfID, nil
 	}
 	return uint32(0), nil
@@ -3838,7 +3836,7 @@
 	lookupGemPort, ok := f.packetInGemPort[pktInkey]
 	if ok {
 		if lookupGemPort == gemPort {
-			logger.Infow("pktin-key/value-found-in-cache--no-need-to-update-kv--assume-both-in-sync",
+			logger.Infow(ctx, "pktin-key/value-found-in-cache--no-need-to-update-kv--assume-both-in-sync",
 				log.Fields{
 					"pktinkey": pktInkey,
 					"gem":      gemPort})
@@ -3848,7 +3846,7 @@
 	f.packetInGemPort[pktInkey] = gemPort
 
 	f.resourceMgr.UpdateGemPortForPktIn(ctx, pktInkey, gemPort)
-	logger.Infow("pktin-key-not-found-in-local-cache-value-is-different--updating-cache-and-kv-store",
+	logger.Infow(ctx, "pktin-key-not-found-in-local-cache-value-is-different--updating-cache-and-kv-store",
 		log.Fields{
 			"pktinkey": pktInkey,
 			"gem":      gemPort})
@@ -3866,7 +3864,7 @@
 		if onu.OnuID == onuID {
 			for _, uni := range onu.UniPorts {
 				if uni == portNum {
-					logger.Infow("uni-already-in-cache--no-need-to-update-cache-and-kv-store", log.Fields{"uni": portNum})
+					logger.Infow(ctx, "uni-already-in-cache--no-need-to-update-cache-and-kv-store", log.Fields{"uni": portNum})
 					return
 				}
 			}
@@ -3880,7 +3878,7 @@
 func (f *OpenOltFlowMgr) loadFlowIDlistForGem(ctx context.Context, intf uint32) {
 	flowIDsList, err := f.resourceMgr.GetFlowIDsGemMapForInterface(ctx, intf)
 	if err != nil {
-		logger.Error("failed-to-get-flowid-list-per-gem", log.Fields{"intf": intf})
+		logger.Error(ctx, "failed-to-get-flowid-list-per-gem", log.Fields{"intf": intf})
 		return
 	}
 	for gem, FlowIDs := range flowIDsList {
@@ -3895,7 +3893,7 @@
 func (f *OpenOltFlowMgr) loadInterfaceToMulticastQueueMap(ctx context.Context) {
 	storedMulticastQueueMap, err := f.resourceMgr.GetMcastQueuePerInterfaceMap(ctx)
 	if err != nil {
-		logger.Error("failed-to-get-pon-interface-to-multicast-queue-map")
+		logger.Error(ctx, "failed-to-get-pon-interface-to-multicast-queue-map")
 		return
 	}
 	for intf, queueInfo := range storedMulticastQueueMap {
diff --git a/internal/pkg/core/openolt_flowmgr_test.go b/internal/pkg/core/openolt_flowmgr_test.go
index 4a74e9a..26697aa 100644
--- a/internal/pkg/core/openolt_flowmgr_test.go
+++ b/internal/pkg/core/openolt_flowmgr_test.go
@@ -265,8 +265,9 @@
 }
 
 func TestOpenOltFlowMgr_RemoveFlow(t *testing.T) {
+	ctx := context.Background()
 	// flowMgr := newMockFlowmgr()
-	logger.Debug("Info Warning Error: Starting RemoveFlow() test")
+	logger.Debug(ctx, "Info Warning Error: Starting RemoveFlow() test")
 	fa := &fu.FlowArgs{
 		MatchFields: []*ofp.OfpOxmOfbField{
 			fu.InPort(2),
@@ -652,7 +653,7 @@
 				flowMgr.addGemPortToOnuInfoMap(ctx, tt.args.intfID, tt.args.onuID, gemPort)
 			}
 			for _, gemPortDeleted := range tt.args.gemPortIDsToBeDeleted {
-				flowMgr.deleteGemPortFromLocalCache(tt.args.intfID, tt.args.onuID, gemPortDeleted)
+				flowMgr.deleteGemPortFromLocalCache(context.Background(), tt.args.intfID, tt.args.onuID, gemPortDeleted)
 			}
 			lenofGemPorts := len(flowMgr.onuGemInfo[tt.args.intfID][0].GemPorts)
 			if lenofGemPorts != tt.args.finalLength {
@@ -764,6 +765,7 @@
 }
 
 func TestOpenOltFlowMgr_checkAndAddFlow(t *testing.T) {
+	ctx := context.Background()
 	// flowMgr := newMockFlowmgr()
 	kw := make(map[string]uint64)
 	kw["table_id"] = 1
@@ -851,33 +853,33 @@
 	flowState2, _ := fu.MkFlowStat(fa2)
 	flowState3, _ := fu.MkFlowStat(fa3)
 	flowState4, _ := fu.MkFlowStat(fa4)
-	formulateClassifierInfoFromFlow(classifierInfo, flowState)
-	formulateClassifierInfoFromFlow(classifierInfo2, flowState2)
-	formulateClassifierInfoFromFlow(classifierInfo3, flowState3)
-	formulateClassifierInfoFromFlow(classifierInfo4, flowState4)
+	formulateClassifierInfoFromFlow(ctx, classifierInfo, flowState)
+	formulateClassifierInfoFromFlow(ctx, classifierInfo2, flowState2)
+	formulateClassifierInfoFromFlow(ctx, classifierInfo3, flowState3)
+	formulateClassifierInfoFromFlow(ctx, classifierInfo4, flowState4)
 
-	err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flowState)
+	err := formulateActionInfoFromFlow(ctx, actionInfo, classifierInfo, flowState)
 	if err != nil {
 		// Error logging is already done in the called function
 		// So just return in case of error
 		return
 	}
 
-	err = formulateActionInfoFromFlow(actionInfo2, classifierInfo2, flowState2)
+	err = formulateActionInfoFromFlow(ctx, actionInfo2, classifierInfo2, flowState2)
 	if err != nil {
 		// Error logging is already done in the called function
 		// So just return in case of error
 		return
 	}
 
-	err = formulateActionInfoFromFlow(actionInfo3, classifierInfo3, flowState3)
+	err = formulateActionInfoFromFlow(ctx, actionInfo3, classifierInfo3, flowState3)
 	if err != nil {
 		// Error logging is already done in the called function
 		// So just return in case of error
 		return
 	}
 
-	err = formulateActionInfoFromFlow(actionInfo4, classifierInfo4, flowState4)
+	err = formulateActionInfoFromFlow(ctx, actionInfo4, classifierInfo4, flowState4)
 	if err != nil {
 		// Error logging is already done in the called function
 		// So just return in case of error
diff --git a/internal/pkg/core/openolt_test.go b/internal/pkg/core/openolt_test.go
index 22a61c2..764cb20 100644
--- a/internal/pkg/core/openolt_test.go
+++ b/internal/pkg/core/openolt_test.go
@@ -141,7 +141,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Abandon_device(tt.args.device); err != tt.wantErr {
+			if err := oo.Abandon_device(context.Background(), tt.args.device); err != tt.wantErr {
 				t.Errorf("Abandon_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -170,7 +170,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Activate_image_update(tt.args.device, tt.args.request)
+			got, err := oo.Activate_image_update(context.Background(), tt.args.device, tt.args.request)
 			if err != tt.wantErr && got == nil {
 				t.Errorf("Activate_image_update() error = %v, wantErr %v", err, tt.wantErr)
 			}
@@ -191,7 +191,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Adapter_descriptor(); err != tt.wantErr {
+			if err := oo.Adapter_descriptor(context.Background()); err != tt.wantErr {
 				t.Errorf("Adapter_descriptor() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -218,7 +218,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			err := oo.Adopt_device(tt.args.device)
+			err := oo.Adopt_device(context.Background(), tt.args.device)
 			if (err != nil) && (reflect.TypeOf(err) !=
 				reflect.TypeOf(tt.wantErr)) && (tt.args.device == nil) {
 				t.Errorf("Adopt_device() error = %v, wantErr %v", err, tt.wantErr)
@@ -252,7 +252,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Cancel_image_download(tt.args.device, tt.args.request)
+			got, err := oo.Cancel_image_download(context.Background(), tt.args.device, tt.args.request)
 			if err != tt.wantErr && got == nil {
 				t.Errorf("Cancel_image_download() error = %v, wantErr %v", err, tt.wantErr)
 			}
@@ -276,7 +276,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Delete_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Delete_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Delete_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -300,7 +300,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Device_types()
+			got, err := oo.Device_types(context.Background())
 			if err != tt.wantErr && got == nil {
 				t.Errorf("Device_types() error = %v, wantErr %v", err, tt.wantErr)
 			}
@@ -325,7 +325,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Disable_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Disable_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Disable_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -354,7 +354,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Download_image(tt.args.device, tt.args.request)
+			got, err := oo.Download_image(context.Background(), tt.args.device, tt.args.request)
 			if err != tt.wantErr && got == nil {
 				t.Errorf("Download_image() error = %v, wantErr %v", err, tt.wantErr)
 			}
@@ -379,7 +379,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Get_device_details(tt.args.device); err != tt.wantErr {
+			if err := oo.Get_device_details(context.Background(), tt.args.device); err != tt.wantErr {
 				t.Errorf("Get_device_details() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -408,7 +408,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Get_image_download_status(tt.args.device, tt.args.request)
+			got, err := oo.Get_image_download_status(context.Background(), tt.args.device, tt.args.request)
 			if err != tt.wantErr && got == nil {
 				t.Errorf("Get_image_download_status() got = %v want = %v error = %v, wantErr %v",
 					got, tt.want, err, tt.wantErr)
@@ -446,7 +446,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Get_ofp_device_info(tt.args.device)
+			got, err := oo.Get_ofp_device_info(context.Background(), tt.args.device)
 			if !reflect.DeepEqual(err, tt.wantErr) || !reflect.DeepEqual(got, tt.want) {
 				t.Errorf("Get_ofp_device_info() got = %v want = %v error = %v, wantErr = %v",
 					got, tt.want, err, tt.wantErr)
@@ -469,7 +469,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Health()
+			got, err := oo.Health(context.Background())
 			if err != tt.wantErr && got == nil {
 				t.Errorf("Get_ofp_port_info() error = %v, wantErr %v", err, tt.wantErr)
 			}
@@ -526,7 +526,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Process_inter_adapter_message(tt.args.msg); reflect.TypeOf(err) != tt.wantErrType {
+			if err := oo.Process_inter_adapter_message(context.Background(), tt.args.msg); reflect.TypeOf(err) != tt.wantErrType {
 				t.Errorf("Process_inter_adapter_message() error = %v, wantErr %v",
 					reflect.TypeOf(err), tt.wantErrType)
 			}
@@ -551,7 +551,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Reboot_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Reboot_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Reboot_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -585,7 +585,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Receive_packet_out(tt.args.deviceID, tt.args.egressPortNo, tt.args.packet); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Receive_packet_out(context.Background(), tt.args.deviceID, tt.args.egressPortNo, tt.args.packet); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Receive_packet_out() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -610,7 +610,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Reconcile_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Reconcile_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Reconcile_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -634,7 +634,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Reenable_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Reenable_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Reenable_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -663,7 +663,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			got, err := oo.Revert_image_update(tt.args.device, tt.args.request)
+			got, err := oo.Revert_image_update(context.Background(), tt.args.device, tt.args.request)
 			if err != tt.wantErr && got == nil {
 				t.Log("error :", err)
 			}
@@ -688,7 +688,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Self_test_device(tt.args.device); err != tt.wantErr {
+			if err := oo.Self_test_device(context.Background(), tt.args.device); err != tt.wantErr {
 				t.Errorf("Self_test_device() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -757,7 +757,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Suppress_event(tt.args.filter); err != tt.wantErr {
+			if err := oo.Suppress_event(context.Background(), tt.args.filter); err != tt.wantErr {
 				t.Errorf("Suppress_event() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -781,7 +781,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Unsuppress_event(tt.args.filter); err != tt.wantErr {
+			if err := oo.Unsuppress_event(context.Background(), tt.args.filter); err != tt.wantErr {
 				t.Errorf("Unsuppress_event() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -808,7 +808,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Update_flows_bulk(tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); err != tt.wantErr {
+			if err := oo.Update_flows_bulk(context.Background(), tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); err != tt.wantErr {
 				t.Errorf("Update_flows_bulk() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -836,7 +836,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Update_flows_incrementally(tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); !reflect.DeepEqual(err, tt.wantErr) {
+			if err := oo.Update_flows_incrementally(context.Background(), tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Update_flows_incrementally() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -860,7 +860,8 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Update_pm_config(tt.args.device, tt.args.pmConfigs); !reflect.DeepEqual(err, tt.wantErr) {
+
+			if err := oo.Update_pm_config(context.Background(), tt.args.device, tt.args.pmConfigs); !reflect.DeepEqual(err, tt.wantErr) {
 				t.Errorf("Update_pm_config() error = %v, wantErr %v", err, tt.wantErr)
 			}
 
@@ -908,7 +909,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Enable_port(tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
+			if err := oo.Enable_port(context.Background(), tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
 				t.Errorf("OpenOLT.Enable_port() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
@@ -933,7 +934,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			oo := testOltObject(tt.fields)
-			if err := oo.Disable_port(tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
+			if err := oo.Disable_port(context.Background(), tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
 				t.Errorf("OpenOLT.Disable_port() error = %v, wantErr %v", err, tt.wantErr)
 			}
 		})
diff --git a/internal/pkg/core/statsmanager.go b/internal/pkg/core/statsmanager.go
index c7bd546..6427a0c 100755
--- a/internal/pkg/core/statsmanager.go
+++ b/internal/pkg/core/statsmanager.go
@@ -18,12 +18,12 @@
 package core
 
 import (
+	"context"
 	"fmt"
+	"strconv"
 	"sync"
 	"time"
 
-	"strconv"
-
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
 	"github.com/opencord/voltha-protos/v3/go/openolt"
@@ -190,7 +190,7 @@
 }
 
 // NewOpenOltStatsMgr returns a new instance of the OpenOltStatisticsMgr
-func NewOpenOltStatsMgr(Dev *DeviceHandler) *OpenOltStatisticsMgr {
+func NewOpenOltStatsMgr(ctx context.Context, Dev *DeviceHandler) *OpenOltStatisticsMgr {
 
 	var StatMgr OpenOltStatisticsMgr
 
@@ -199,16 +199,16 @@
 	// Northbound and Southbound ports
 	// added to initialize the pm_metrics
 	var Ports interface{}
-	Ports, _ = InitPorts("nni", Dev.device.Id, 1)
+	Ports, _ = InitPorts(ctx, "nni", Dev.device.Id, 1)
 	StatMgr.NorthBoundPort, _ = Ports.(map[uint32]*NniPort)
 	NumPonPorts := Dev.resourceMgr.DevInfo.GetPonPorts()
-	Ports, _ = InitPorts("pon", Dev.device.Id, NumPonPorts)
+	Ports, _ = InitPorts(ctx, "pon", Dev.device.Id, NumPonPorts)
 	StatMgr.SouthBoundPort, _ = Ports.(map[uint32]*PonPort)
 	return &StatMgr
 }
 
 // InitPorts collects the port objects:  nni and pon that are updated with the current data from the OLT
-func InitPorts(Intftype string, DeviceID string, numOfPorts uint32) (interface{}, error) {
+func InitPorts(ctx context.Context, Intftype string, DeviceID string, numOfPorts uint32) (interface{}, error) {
 	/*
 	     This method collects the port objects:  nni and pon that are updated with the
 	     current data from the OLT
@@ -224,25 +224,25 @@
 	if Intftype == "nni" {
 		NniPorts := make(map[uint32]*NniPort)
 		for i = 0; i < numOfPorts; i++ {
-			Port := BuildPortObject(i, "nni", DeviceID).(*NniPort)
+			Port := BuildPortObject(ctx, i, "nni", DeviceID).(*NniPort)
 			NniPorts[Port.IntfID] = Port
 		}
 		return NniPorts, nil
 	} else if Intftype == "pon" {
 		PONPorts := make(map[uint32]*PonPort)
 		for i = 0; i < numOfPorts; i++ {
-			PONPort := BuildPortObject(i, "pon", DeviceID).(*PonPort)
+			PONPort := BuildPortObject(ctx, i, "pon", DeviceID).(*PonPort)
 			PONPorts[PortNoToIntfID(PONPort.IntfID, voltha.Port_PON_OLT)] = PONPort
 		}
 		return PONPorts, nil
 	} else {
-		logger.Errorw("invalid-type-of-interface", log.Fields{"interface-type": Intftype})
+		logger.Errorw(ctx, "invalid-type-of-interface", log.Fields{"interface-type": Intftype})
 		return nil, olterrors.NewErrInvalidValue(log.Fields{"interface-type": Intftype}, nil)
 	}
 }
 
 // BuildPortObject allows for updating north and southbound ports, newly discovered ports, and devices
-func BuildPortObject(PortNum uint32, IntfType string, DeviceID string) interface{} {
+func BuildPortObject(ctx context.Context, PortNum uint32, IntfType string, DeviceID string) interface{} {
 	/*
 	   Separate method to allow for updating north and southbound ports
 	   newly discovered ports and devices
@@ -257,7 +257,7 @@
 	if IntfType == "nni" {
 		IntfID := IntfIDToPortNo(PortNum, voltha.Port_ETHERNET_NNI)
 		nniID := PortNoToIntfID(IntfID, voltha.Port_ETHERNET_NNI)
-		logger.Debugw("interface-type-nni",
+		logger.Debugw(ctx, "interface-type-nni",
 			log.Fields{
 				"nni-id":    nniID,
 				"intf-type": IntfType})
@@ -267,13 +267,13 @@
 		//  intf_id and pon_id are currently equal.
 		IntfID := IntfIDToPortNo(PortNum, voltha.Port_PON_OLT)
 		PONID := PortNoToIntfID(IntfID, voltha.Port_PON_OLT)
-		logger.Debugw("interface-type-pon",
+		logger.Debugw(ctx, "interface-type-pon",
 			log.Fields{
 				"pon-id":    PONID,
 				"intf-type": IntfType})
 		return NewPONPort(PONID, DeviceID, IntfID, PortNum)
 	} else {
-		logger.Errorw("invalid-type-of-interface", log.Fields{"intf-type": IntfType})
+		logger.Errorw(ctx, "invalid-type-of-interface", log.Fields{"intf-type": IntfType})
 		return nil
 	}
 }
@@ -368,9 +368,9 @@
 }
 
 // publishMatrics will publish the pon port metrics
-func (StatMgr OpenOltStatisticsMgr) publishMetrics(val map[string]float32,
+func (StatMgr OpenOltStatisticsMgr) publishMetrics(ctx context.Context, val map[string]float32,
 	port *voltha.Port, devID string, devType string) {
-	logger.Debugw("publish-metrics",
+	logger.Debugw(ctx, "publish-metrics",
 		log.Fields{
 			"port":    port.Label,
 			"metrics": val})
@@ -405,26 +405,26 @@
 	ke.Type = voltha.KpiEventType_slice
 	ke.Ts = float64(time.Now().UnixNano())
 
-	if err := StatMgr.Device.EventProxy.SendKpiEvent("STATS_EVENT", &ke, voltha.EventCategory_EQUIPMENT, volthaEventSubCatgry, raisedTs); err != nil {
-		logger.Errorw("failed-to-send-pon-stats", log.Fields{"err": err})
+	if err := StatMgr.Device.EventProxy.SendKpiEvent(ctx, "STATS_EVENT", &ke, voltha.EventCategory_EQUIPMENT, volthaEventSubCatgry, raisedTs); err != nil {
+		logger.Errorw(ctx, "failed-to-send-pon-stats", log.Fields{"err": err})
 	}
 }
 
 // PortStatisticsIndication handles the port statistics indication
-func (StatMgr *OpenOltStatisticsMgr) PortStatisticsIndication(PortStats *openolt.PortStatistics, NumPonPorts uint32) {
-	StatMgr.PortsStatisticsKpis(PortStats, NumPonPorts)
-	logger.Debugw("received-port-stats-indication", log.Fields{"port-stats": PortStats})
+func (StatMgr *OpenOltStatisticsMgr) PortStatisticsIndication(ctx context.Context, PortStats *openolt.PortStatistics, NumPonPorts uint32) {
+	StatMgr.PortsStatisticsKpis(ctx, PortStats, NumPonPorts)
+	logger.Debugw(ctx, "received-port-stats-indication", log.Fields{"port-stats": PortStats})
 	// TODO send stats to core topic to the voltha kafka or a different kafka ?
 }
 
 // FlowStatisticsIndication to be implemented
-func FlowStatisticsIndication(self, FlowStats *openolt.FlowStatistics) {
-	logger.Debugw("flow-stats-collected", log.Fields{"flow-stats": FlowStats})
+func FlowStatisticsIndication(ctx context.Context, self, FlowStats *openolt.FlowStatistics) {
+	logger.Debugw(ctx, "flow-stats-collected", log.Fields{"flow-stats": FlowStats})
 	//TODO send to kafka ?
 }
 
 // PortsStatisticsKpis map the port stats values into a dictionary, creates the kpiEvent and then publish to Kafka
-func (StatMgr *OpenOltStatisticsMgr) PortsStatisticsKpis(PortStats *openolt.PortStatistics, NumPonPorts uint32) {
+func (StatMgr *OpenOltStatisticsMgr) PortsStatisticsKpis(ctx context.Context, PortStats *openolt.PortStatistics, NumPonPorts uint32) {
 
 	/*map the port stats values into a dictionary
 	  Create a kpoEvent and publish to Kafka
@@ -461,7 +461,7 @@
 		mutex.Lock()
 		StatMgr.NorthBoundPort[0] = &portNNIStat
 		mutex.Unlock()
-		logger.Debugw("received-nni-stats", log.Fields{"nni-stats": StatMgr.NorthBoundPort})
+		logger.Debugw(ctx, "received-nni-stats", log.Fields{"nni-stats": StatMgr.NorthBoundPort})
 	}
 	for i := uint32(0); i < NumPonPorts; i++ {
 
@@ -483,7 +483,7 @@
 			mutex.Lock()
 			StatMgr.SouthBoundPort[i] = &portPonStat
 			mutex.Unlock()
-			logger.Debugw("received-pon-stats-for-port", log.Fields{"port-pon-stats": portPonStat})
+			logger.Debugw(ctx, "received-pon-stats-for-port", log.Fields{"port-pon-stats": portPonStat})
 		}
 	}
 
@@ -506,7 +506,7 @@
 	       err = UpdatePortObjectKpiData(SouthboundPorts[PortStats.IntfID], PMData)
 	   }
 	   if (err != nil) {
-	       logger.Error("Error publishing statistics data")
+	       logger.Error(ctx, "Error publishing statistics data")
 	   }
 	*/
 
diff --git a/internal/pkg/core/statsmanager_test.go b/internal/pkg/core/statsmanager_test.go
index f7f8246..67b9a6a 100644
--- a/internal/pkg/core/statsmanager_test.go
+++ b/internal/pkg/core/statsmanager_test.go
@@ -18,6 +18,7 @@
 package core
 
 import (
+	"context"
 	"fmt"
 	"github.com/opencord/voltha-protos/v3/go/openolt"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
@@ -44,7 +45,7 @@
 	}
 	dh := newMockDeviceHandler()
 	dh.device = device
-	StatMgr := NewOpenOltStatsMgr(dh)
+	StatMgr := NewOpenOltStatsMgr(context.Background(), dh)
 
 	type args struct {
 		PortStats *openolt.PortStatistics
@@ -59,7 +60,7 @@
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 
-			StatMgr.PortStatisticsIndication(tt.args.PortStats, 16)
+			StatMgr.PortStatisticsIndication(context.Background(), tt.args.PortStats, 16)
 		})
 	}
 }
@@ -168,7 +169,7 @@
 				NorthBoundPort: tt.fields.NorthBoundPort,
 				SouthBoundPort: tt.fields.SouthBoundPort,
 			}
-			StatMgr.publishMetrics(tt.args.val, tt.args.port, "onu1", "openolt")
+			StatMgr.publishMetrics(context.Background(), tt.args.val, tt.args.port, "onu1", "openolt")
 		})
 	}
 }
diff --git a/internal/pkg/resourcemanager/common.go b/internal/pkg/resourcemanager/common.go
index b2a4112..3703526 100644
--- a/internal/pkg/resourcemanager/common.go
+++ b/internal/pkg/resourcemanager/common.go
@@ -21,12 +21,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "resourcemanager"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "resourcemanager"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/pkg/resourcemanager/resourcemanager.go b/internal/pkg/resourcemanager/resourcemanager.go
index 8ab03f0..3557bbf 100755
--- a/internal/pkg/resourcemanager/resourcemanager.go
+++ b/internal/pkg/resourcemanager/resourcemanager.go
@@ -122,24 +122,24 @@
 	flowIDToGemInfoLock sync.RWMutex
 }
 
-func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
-	logger.Infow("kv-store-type", log.Fields{"store": storeType})
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+	logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.FatalLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
 // SetKVClient sets the KV client and return a kv backend
-func SetKVClient(backend string, addr string, DeviceID string) *db.Backend {
+func SetKVClient(ctx context.Context, backend string, addr string, DeviceID string) *db.Backend {
 	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
 	// issue between kv store and backend , core is not calling NewBackend directly
-	kvClient, err := newKVClient(backend, addr, KvstoreTimeout)
+	kvClient, err := newKVClient(ctx, backend, addr, KvstoreTimeout)
 	if err != nil {
-		logger.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+		logger.Fatalw(ctx, "Failed to init KV client\n", log.Fields{"err": err})
 		return nil
 	}
 
@@ -158,16 +158,16 @@
 // the resources.
 func NewResourceMgr(ctx context.Context, deviceID string, KVStoreAddress string, kvStoreType string, deviceType string, devInfo *openolt.DeviceInfo) *OpenOltResourceMgr {
 	var ResourceMgr OpenOltResourceMgr
-	logger.Debugf("Init new resource manager , address: %s, deviceid: %s", KVStoreAddress, deviceID)
+	logger.Debugf(ctx, "Init new resource manager , address: %s, deviceid: %s", KVStoreAddress, deviceID)
 	ResourceMgr.Address = KVStoreAddress
 	ResourceMgr.DeviceType = deviceType
 	ResourceMgr.DevInfo = devInfo
 	NumPONPorts := devInfo.GetPonPorts()
 
 	Backend := kvStoreType
-	ResourceMgr.KVStore = SetKVClient(Backend, ResourceMgr.Address, deviceID)
+	ResourceMgr.KVStore = SetKVClient(ctx, Backend, ResourceMgr.Address, deviceID)
 	if ResourceMgr.KVStore == nil {
-		logger.Error("Failed to setup KV store")
+		logger.Error(ctx, "Failed to setup KV store")
 	}
 	Ranges := make(map[string]*openolt.DeviceInfo_DeviceResourceRanges)
 	RsrcMgrsByTech := make(map[string]*ponrmgr.PONResourceManager)
@@ -230,12 +230,13 @@
 	var err error
 	for _, TechRange := range devInfo.Ranges {
 		technology := TechRange.Technology
-		logger.Debugf("Device info technology %s", technology)
+		logger.Debugf(ctx, "Device info technology %s", technology)
 		Ranges[technology] = TechRange
-		RsrcMgrsByTech[technology], err = ponrmgr.NewPONResourceManager(technology, deviceType, deviceID,
+
+		RsrcMgrsByTech[technology], err = ponrmgr.NewPONResourceManager(ctx, technology, deviceType, deviceID,
 			Backend, ResourceMgr.Address)
 		if err != nil {
-			logger.Errorf("Failed to create pon resource manager instance for technology %s", technology)
+			logger.Errorf(ctx, "Failed to create pon resource manager instance for technology %s", technology)
 			return nil
 		}
 		// resource_mgrs_by_tech[technology] = resource_mgr
@@ -254,7 +255,7 @@
 	for _, PONRMgr := range RsrcMgrsByTech {
 		_ = PONRMgr.InitDeviceResourcePool(ctx)
 	}
-	logger.Info("Initialization of  resource manager success!")
+	logger.Info(ctx, "Initialization of  resource manager success!")
 	return &ResourceMgr
 }
 
@@ -267,11 +268,11 @@
 
 	// init the resource range pool according to the sharing type
 
-	logger.Debugf("Resource range pool init for technology %s", ponRMgr.Technology)
+	logger.Debugf(ctx, "Resource range pool init for technology %s", ponRMgr.Technology)
 	// first load from KV profiles
 	status := ponRMgr.InitResourceRangesFromKVStore(ctx)
 	if !status {
-		logger.Debugf("Failed to load resource ranges from KV store for tech %s", ponRMgr.Technology)
+		logger.Debugf(ctx, "Failed to load resource ranges from KV store for tech %s", ponRMgr.Technology)
 	}
 
 	/*
@@ -279,7 +280,7 @@
 	   or is broader than the device, the device's information will
 	   dictate the range limits
 	*/
-	logger.Debugw("Using device info to init pon resource ranges", log.Fields{"Tech": ponRMgr.Technology})
+	logger.Debugw(ctx, "Using device info to init pon resource ranges", log.Fields{"Tech": ponRMgr.Technology})
 
 	ONUIDStart := devInfo.OnuIdStart
 	ONUIDEnd := devInfo.OnuIdEnd
@@ -344,7 +345,7 @@
 		}
 	}
 
-	logger.Debugw("Device info init", log.Fields{"technology": techRange.Technology,
+	logger.Debugw(ctx, "Device info init", log.Fields{"technology": techRange.Technology,
 		"onu_id_start": ONUIDStart, "onu_id_end": ONUIDEnd, "onu_id_shared_pool_id": ONUIDSharedPoolID,
 		"alloc_id_start": AllocIDStart, "alloc_id_end": AllocIDEnd,
 		"alloc_id_shared_pool_id": AllocIDSharedPoolID,
@@ -358,7 +359,7 @@
 		"uni_id_end_idx":            1, /*MaxUNIIDperONU()*/
 	})
 
-	ponRMgr.InitDefaultPONResourceRanges(ONUIDStart, ONUIDEnd, ONUIDSharedPoolID,
+	ponRMgr.InitDefaultPONResourceRanges(ctx, ONUIDStart, ONUIDEnd, ONUIDSharedPoolID,
 		AllocIDStart, AllocIDEnd, AllocIDSharedPoolID,
 		GEMPortIDStart, GEMPortIDEnd, GEMPortIDSharedPoolID,
 		FlowIDStart, FlowIDEnd, FlowIDSharedPoolID, 0, 1,
@@ -367,33 +368,33 @@
 	// For global sharing, make sure to refresh both local and global resource manager instances' range
 
 	if ONUIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
+		globalPONRMgr.UpdateRanges(ctx, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
 			"", 0, nil)
-		ponRMgr.UpdateRanges(ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
+		ponRMgr.UpdateRanges(ctx, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
 			"", 0, globalPONRMgr)
 	}
 	if AllocIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
+		globalPONRMgr.UpdateRanges(ctx, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
 			"", 0, nil)
 
-		ponRMgr.UpdateRanges(ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
+		ponRMgr.UpdateRanges(ctx, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
 			"", 0, globalPONRMgr)
 	}
 	if GEMPortIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
+		globalPONRMgr.UpdateRanges(ctx, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
 			"", 0, nil)
-		ponRMgr.UpdateRanges(ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
+		ponRMgr.UpdateRanges(ctx, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
 			"", 0, globalPONRMgr)
 	}
 	if FlowIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-		globalPONRMgr.UpdateRanges(ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
+		globalPONRMgr.UpdateRanges(ctx, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
 			"", 0, nil)
-		ponRMgr.UpdateRanges(ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
+		ponRMgr.UpdateRanges(ctx, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
 			"", 0, globalPONRMgr)
 	}
 
 	// Make sure loaded range fits the platform bit encoding ranges
-	ponRMgr.UpdateRanges(ponrmgr.UNI_ID_START_IDX, 0, ponrmgr.UNI_ID_END_IDX /* TODO =OpenOltPlatform.MAX_UNIS_PER_ONU-1*/, 1, "", 0, nil)
+	ponRMgr.UpdateRanges(ctx, ponrmgr.UNI_ID_START_IDX, 0, ponrmgr.UNI_ID_END_IDX /* TODO =OpenOltPlatform.MAX_UNIS_PER_ONU-1*/, 1, "", 0, nil)
 }
 
 // Delete clears used resources for the particular olt device being deleted
@@ -421,11 +422,11 @@
 	*/
 	for _, rsrcMgr := range RsrcMgr.ResourceMgrs {
 		if err := rsrcMgr.ClearDeviceResourcePool(ctx); err != nil {
-			logger.Debug("Failed to clear device resource pool")
+			logger.Debug(ctx, "Failed to clear device resource pool")
 			return err
 		}
 	}
-	logger.Debug("Cleared device resource pool")
+	logger.Debug(ctx, "Cleared device resource pool")
 	return nil
 }
 
@@ -443,7 +444,7 @@
 	ONUID, err := RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(ctx, ponIntfID,
 		ponrmgr.ONU_ID, 1)
 	if err != nil {
-		logger.Errorf("Failed to get resource for interface %d for type %s",
+		logger.Errorf(ctx, "Failed to get resource for interface %d for type %s",
 			ponIntfID, ponrmgr.ONU_ID)
 		return 0, err
 	}
@@ -463,11 +464,11 @@
 
 	FlowPath := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
 	if err := RsrcMgr.ResourceMgrs[ponIntfID].GetFlowIDInfo(ctx, FlowPath, flowID, &flows); err != nil {
-		logger.Errorw("Error while getting flows from KV store", log.Fields{"flowId": flowID})
+		logger.Errorw(ctx, "Error while getting flows from KV store", log.Fields{"flowId": flowID})
 		return nil
 	}
 	if len(flows) == 0 {
-		logger.Debugw("No flowInfo found in KV store", log.Fields{"flowPath": FlowPath})
+		logger.Debugw(ctx, "No flowInfo found in KV store", log.Fields{"flowPath": FlowPath})
 		return nil
 	}
 	return &flows
@@ -508,10 +509,10 @@
 
 	FlowIDs := RsrcMgr.ResourceMgrs[ponIntfID].GetCurrentFlowIDsForOnu(ctx, FlowPath)
 	if FlowIDs != nil {
-		logger.Debugw("Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "ONUID": ONUID, "uniID": uniID, "KVpath": FlowPath})
+		logger.Debugw(ctx, "Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "ONUID": ONUID, "uniID": uniID, "KVpath": FlowPath})
 		for _, flowID := range FlowIDs {
 			FlowInfo := RsrcMgr.GetFlowIDInfo(ctx, ponIntfID, int32(ONUID), int32(uniID), uint32(flowID))
-			er := getFlowIDFromFlowInfo(FlowInfo, flowID, gemportID, flowStoreCookie, flowCategory, vlanVid, vlanPcp...)
+			er := getFlowIDFromFlowInfo(ctx, FlowInfo, flowID, gemportID, flowStoreCookie, flowCategory, vlanVid, vlanPcp...)
 			if er == nil {
 				log.Debugw("Found flowid for the vlan, pcp, and gem",
 					log.Fields{"flowID": flowID, "vlanVid": vlanVid, "vlanPcp": vlanPcp, "gemPortID": gemportID})
@@ -519,11 +520,11 @@
 			}
 		}
 	}
-	logger.Debug("No matching flows with flow cookie or flow category, allocating new flowid")
+	logger.Debug(ctx, "No matching flows with flow cookie or flow category, allocating new flowid")
 	FlowIDs, err = RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(ctx, ponIntfID,
 		ponrmgr.FLOW_ID, 1)
 	if err != nil {
-		logger.Errorf("Failed to get resource for interface %d for type %s",
+		logger.Errorf(ctx, "Failed to get resource for interface %d for type %s",
 			ponIntfID, ponrmgr.FLOW_ID)
 		return uint32(0), err
 	}
@@ -551,24 +552,24 @@
 		// Since we support only one alloc_id for the ONU at the moment,
 		// return the first alloc_id in the list, if available, for that
 		// ONU.
-		logger.Debugw("Retrieved alloc ID from pon resource mgr", log.Fields{"AllocID": AllocID})
+		logger.Debugw(ctx, "Retrieved alloc ID from pon resource mgr", log.Fields{"AllocID": AllocID})
 		return AllocID[0]
 	}
 	AllocID, err = RsrcMgr.ResourceMgrs[intfID].GetResourceID(ctx, intfID,
 		ponrmgr.ALLOC_ID, 1)
 
 	if AllocID == nil || err != nil {
-		logger.Error("Failed to allocate alloc id")
+		logger.Error(ctx, "Failed to allocate alloc id")
 		return 0
 	}
 	// update the resource map on KV store with the list of alloc_id
 	// allocated for the pon_intf_onu_id tuple
 	err = RsrcMgr.ResourceMgrs[intfID].UpdateAllocIdsForOnu(ctx, IntfOnuIDUniID, AllocID)
 	if err != nil {
-		logger.Error("Failed to update Alloc ID")
+		logger.Error(ctx, "Failed to update Alloc ID")
 		return 0
 	}
-	logger.Debugw("Allocated new Tcont from pon resource mgr", log.Fields{"AllocID": AllocID})
+	logger.Debugw(ctx, "Allocated new Tcont from pon resource mgr", log.Fields{"AllocID": AllocID})
 	return AllocID[0]
 }
 
@@ -612,7 +613,7 @@
 	}
 	err := RsrcMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocIDs)
 	if err != nil {
-		logger.Errorf("Failed to Remove Alloc Id For Onu. IntfID %d onuID %d uniID %d allocID %d",
+		logger.Errorf(ctx, "Failed to Remove Alloc Id For Onu. IntfID %d onuID %d uniID %d allocID %d",
 			intfID, onuID, uniID, allocID)
 	}
 }
@@ -628,7 +629,7 @@
 	}
 	err := RsrcMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs)
 	if err != nil {
-		logger.Errorf("Failed to Remove Gem Id For Onu. IntfID %d onuID %d uniID %d gemPortId %d",
+		logger.Errorf(ctx, "Failed to Remove Gem Id For Onu. IntfID %d onuID %d uniID %d gemPortId %d",
 			intfID, onuID, uniID, gemPortID)
 	}
 }
@@ -646,12 +647,12 @@
 		IntfGEMPortPath = fmt.Sprintf("%d,%d", PonPort, GEM)
 		Val, err := json.Marshal(Data)
 		if err != nil {
-			logger.Error("failed to Marshal")
+			logger.Error(ctx, "failed to Marshal")
 			return err
 		}
 
 		if err = RsrcMgr.KVStore.Put(ctx, IntfGEMPortPath, Val); err != nil {
-			logger.Errorf("Failed to update resource %s", IntfGEMPortPath)
+			logger.Errorf(ctx, "Failed to update resource %s", IntfGEMPortPath)
 			return err
 		}
 	}
@@ -663,7 +664,7 @@
 	IntfGEMPortPath := fmt.Sprintf("%d,%d", PonPort, GemPort)
 	err := RsrcMgr.KVStore.Delete(ctx, IntfGEMPortPath)
 	if err != nil {
-		logger.Errorf("Failed to Remove Gem port-Pon port to onu map on kv store. Gem %d PonPort %d", GemPort, PonPort)
+		logger.Errorf(ctx, "Failed to Remove Gem port-Pon port to onu map on kv store. Gem %d PonPort %d", GemPort, PonPort)
 	}
 }
 
@@ -690,7 +691,7 @@
 	GEMPortList, err = RsrcMgr.ResourceMgrs[ponPort].GetResourceID(ctx, ponPort,
 		ponrmgr.GEMPORT_ID, NumOfPorts)
 	if err != nil && GEMPortList == nil {
-		logger.Errorf("Failed to get gem port id for %s", IntfOnuIDUniID)
+		logger.Errorf(ctx, "Failed to get gem port id for %s", IntfOnuIDUniID)
 		return nil, err
 	}
 
@@ -699,7 +700,7 @@
 	err = RsrcMgr.ResourceMgrs[ponPort].UpdateGEMPortIDsForOnu(ctx, IntfOnuIDUniID,
 		GEMPortList)
 	if err != nil {
-		logger.Errorf("Failed to update GEM ports to kv store for %s", IntfOnuIDUniID)
+		logger.Errorf(ctx, "Failed to update GEM ports to kv store for %s", IntfOnuIDUniID)
 		return nil, err
 	}
 	_ = RsrcMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, GEMPortList, ponPort,
@@ -746,7 +747,7 @@
 	IntfONUID = fmt.Sprintf("%d,%d,%d", IntfID, onuID, uniID)
 	err = RsrcMgr.ResourceMgrs[IntfID].UpdateFlowIDForOnu(ctx, IntfONUID, FlowID, false)
 	if err != nil {
-		logger.Errorw("Failed to Update flow id  for", log.Fields{"intf": IntfONUID})
+		logger.Errorw(ctx, "Failed to Update flow id  for", log.Fields{"intf": IntfONUID})
 	}
 	RsrcMgr.ResourceMgrs[IntfID].RemoveFlowIDInfo(ctx, IntfONUID, FlowID)
 
@@ -767,7 +768,7 @@
 		IntfOnuIDUniID = fmt.Sprintf("%d,%d,%d", IntfID, onuID, uniID)
 		err = RsrcMgr.ResourceMgrs[IntfID].UpdateFlowIDForOnu(ctx, IntfOnuIDUniID, flow, false)
 		if err != nil {
-			logger.Errorw("Failed to Update flow id for", log.Fields{"intf": IntfOnuIDUniID})
+			logger.Errorw(ctx, "Failed to Update flow id for", log.Fields{"intf": IntfOnuIDUniID})
 		}
 		RsrcMgr.ResourceMgrs[IntfID].RemoveFlowIDInfo(ctx, IntfOnuIDUniID, flow)
 	}
@@ -843,14 +844,14 @@
 	FlowPath := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
 	FlowIDs := RsrcMgr.ResourceMgrs[ponIntfID].GetCurrentFlowIDsForOnu(ctx, FlowPath)
 	if FlowIDs != nil {
-		logger.Debugw("Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "onuID": onuID, "uniID": uniID, "KVpath": FlowPath})
+		logger.Debugw(ctx, "Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "onuID": onuID, "uniID": uniID, "KVpath": FlowPath})
 		for _, flowID := range FlowIDs {
 			FlowInfo := RsrcMgr.GetFlowIDInfo(ctx, ponIntfID, int32(onuID), int32(uniID), uint32(flowID))
 			if FlowInfo != nil {
-				logger.Debugw("Found flows", log.Fields{"flows": *FlowInfo, "flowId": flowID})
+				logger.Debugw(ctx, "Found flows", log.Fields{"flows": *FlowInfo, "flowId": flowID})
 				for _, Info := range *FlowInfo {
 					if Info.FlowStoreCookie == flowStoreCookie {
-						logger.Debug("Found flow matching with flowStore cookie", log.Fields{"flowId": flowID, "flowStoreCookie": flowStoreCookie})
+						logger.Debug(ctx, "Found flow matching with flowStore cookie", log.Fields{"flowId": flowID, "flowStoreCookie": flowStoreCookie})
 						return true
 					}
 				}
@@ -870,18 +871,18 @@
 		if Value != nil {
 			Val, err := kvstore.ToByte(Value.Value)
 			if err != nil {
-				logger.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+				logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": err})
 				return Data
 			}
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Error("Failed to unmarshal", log.Fields{"error": err})
+				logger.Error(ctx, "Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
 	} else {
-		logger.Errorf("Failed to get TP id from kvstore for path %s", Path)
+		logger.Errorf(ctx, "Failed to get TP id from kvstore for path %s", Path)
 	}
-	logger.Debugf("Getting TP id %d from path %s", Data, Path)
+	logger.Debugf(ctx, "Getting TP id %d from path %s", Data, Path)
 	return Data
 
 }
@@ -891,7 +892,7 @@
 func (RsrcMgr *OpenOltResourceMgr) RemoveTechProfileIDsForOnu(ctx context.Context, IntfID uint32, OnuID uint32, UniID uint32) error {
 	IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
 	if err := RsrcMgr.KVStore.Delete(ctx, IntfOnuUniID); err != nil {
-		logger.Errorw("Failed to delete techprofile id resource in KV store", log.Fields{"path": IntfOnuUniID})
+		logger.Errorw(ctx, "Failed to delete techprofile id resource in KV store", log.Fields{"path": IntfOnuUniID})
 		return err
 	}
 	return nil
@@ -909,11 +910,11 @@
 	IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
 	Value, err := json.Marshal(tpIDList)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 	if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", IntfOnuUniID)
+		logger.Errorf(ctx, "Failed to update resource %s", IntfOnuUniID)
 		return err
 	}
 	return err
@@ -931,19 +932,19 @@
 	tpIDList := RsrcMgr.GetTechProfileIDForOnu(ctx, IntfID, OnuID, UniID)
 	for _, value := range tpIDList {
 		if value == TpID {
-			logger.Debugf("TpID %d is already in tpIdList for the path %s", TpID, IntfOnuUniID)
+			logger.Debugf(ctx, "TpID %d is already in tpIdList for the path %s", TpID, IntfOnuUniID)
 			return err
 		}
 	}
-	logger.Debugf("updating tp id %d on path %s", TpID, IntfOnuUniID)
+	logger.Debugf(ctx, "updating tp id %d on path %s", TpID, IntfOnuUniID)
 	tpIDList = append(tpIDList, TpID)
 	Value, err = json.Marshal(tpIDList)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 	if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", IntfOnuUniID)
+		logger.Errorf(ctx, "Failed to update resource %s", IntfOnuUniID)
 		return err
 	}
 	return err
@@ -959,11 +960,11 @@
 	IntfOnuUniID := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
 	Value, err = json.Marshal(*MeterConfig)
 	if err != nil {
-		logger.Error("failed to Marshal meter config")
+		logger.Error(ctx, "failed to Marshal meter config")
 		return err
 	}
 	if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
-		logger.Errorf("Failed to store meter into KV store %s", IntfOnuUniID)
+		logger.Errorf(ctx, "Failed to store meter into KV store %s", IntfOnuUniID)
 		return err
 	}
 	return err
@@ -978,22 +979,22 @@
 	Value, err := RsrcMgr.KVStore.Get(ctx, Path)
 	if err == nil {
 		if Value != nil {
-			logger.Debug("Found meter in KV store", log.Fields{"Direction": Direction})
+			logger.Debug(ctx, "Found meter in KV store", log.Fields{"Direction": Direction})
 			Val, er := kvstore.ToByte(Value.Value)
 			if er != nil {
-				logger.Errorw("Failed to convert into byte array", log.Fields{"error": er})
+				logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": er})
 				return nil, er
 			}
 			if er = json.Unmarshal(Val, &meterConfig); er != nil {
-				logger.Error("Failed to unmarshal meterconfig", log.Fields{"error": er})
+				logger.Error(ctx, "Failed to unmarshal meterconfig", log.Fields{"error": er})
 				return nil, er
 			}
 		} else {
-			logger.Debug("meter-does-not-exists-in-KVStore")
+			logger.Debug(ctx, "meter-does-not-exists-in-KVStore")
 			return nil, err
 		}
 	} else {
-		logger.Errorf("Failed to get Meter config from kvstore for path %s", Path)
+		logger.Errorf(ctx, "Failed to get Meter config from kvstore for path %s", Path)
 
 	}
 	return &meterConfig, err
@@ -1005,18 +1006,18 @@
 	UniID uint32, TpID uint32) error {
 	Path := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
 	if err := RsrcMgr.KVStore.Delete(ctx, Path); err != nil {
-		logger.Errorf("Failed to delete meter id %s from kvstore ", Path)
+		logger.Errorf(ctx, "Failed to delete meter id %s from kvstore ", Path)
 		return err
 	}
 	return nil
 }
 
-func getFlowIDFromFlowInfo(FlowInfo *[]FlowInfo, flowID, gemportID uint32, flowStoreCookie uint64, flowCategory string,
+func getFlowIDFromFlowInfo(ctx context.Context, FlowInfo *[]FlowInfo, flowID, gemportID uint32, flowStoreCookie uint64, flowCategory string,
 	vlanVid uint32, vlanPcp ...uint32) error {
 	if FlowInfo != nil {
 		for _, Info := range *FlowInfo {
 			if int32(gemportID) == Info.Flow.GemportId && flowCategory != "" && Info.FlowCategory == flowCategory {
-				logger.Debug("Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
+				logger.Debug(ctx, "Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
 				if Info.FlowCategory == "HSIA_FLOW" {
 					if err := checkVlanAndPbitEqualityForFlows(vlanVid, Info, vlanPcp[0]); err == nil {
 						return nil
@@ -1025,13 +1026,13 @@
 			}
 			if int32(gemportID) == Info.Flow.GemportId && flowStoreCookie != 0 && Info.FlowStoreCookie == flowStoreCookie {
 				if flowCategory != "" && Info.FlowCategory == flowCategory {
-					logger.Debug("Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
+					logger.Debug(ctx, "Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
 					return nil
 				}
 			}
 		}
 	}
-	logger.Debugw("the flow can be related to a different service", log.Fields{"flow_info": FlowInfo})
+	logger.Debugw(ctx, "the flow can be related to a different service", log.Fields{"flow_info": FlowInfo})
 	return errors.New("invalid flow-info")
 }
 
@@ -1068,11 +1069,11 @@
 	var err error
 
 	if err = RsrcMgr.ResourceMgrs[intfID].GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
-		logger.Errorf("failed to get onuifo for intfid %d", intfID)
+		logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
 		return err
 	}
 	if len(onuGemData) == 0 {
-		logger.Errorw("failed to ger Onuid info ", log.Fields{"intfid": intfID, "onuid": onuID})
+		logger.Errorw(ctx, "failed to ger Onuid info ", log.Fields{"intfid": intfID, "onuid": onuID})
 		return err
 	}
 
@@ -1080,18 +1081,18 @@
 		if onugem.OnuID == onuID {
 			for _, gem := range onuGemData[idx].GemPorts {
 				if gem == gemPort {
-					logger.Debugw("Gem already present in onugem info, skpping addition", log.Fields{"gem": gem})
+					logger.Debugw(ctx, "Gem already present in onugem info, skpping addition", log.Fields{"gem": gem})
 					return nil
 				}
 			}
-			logger.Debugw("Added gem to onugem info", log.Fields{"gem": gemPort})
+			logger.Debugw(ctx, "Added gem to onugem info", log.Fields{"gem": gemPort})
 			onuGemData[idx].GemPorts = append(onuGemData[idx].GemPorts, gemPort)
 			break
 		}
 	}
 	err = RsrcMgr.ResourceMgrs[intfID].AddOnuGemInfo(ctx, intfID, onuGemData)
 	if err != nil {
-		logger.Error("Failed to add onugem to kv store")
+		logger.Error(ctx, "Failed to add onugem to kv store")
 		return err
 	}
 	return err
@@ -1102,7 +1103,7 @@
 	var onuGemData []OnuGemInfo
 
 	if err := RsrcMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
-		logger.Errorf("failed to get onuifo for intfid %d", IntfID)
+		logger.Errorf(ctx, "failed to get onuifo for intfid %d", IntfID)
 		return nil, err
 	}
 
@@ -1115,19 +1116,19 @@
 	var err error
 
 	if err = RsrcMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
-		logger.Errorf("failed to get onuifo for intfid %d", IntfID)
+		logger.Errorf(ctx, "failed to get onuifo for intfid %d", IntfID)
 		return olterrors.NewErrPersistence("get", "OnuGemInfo", IntfID,
 			log.Fields{"onuGem": onuGem, "intfID": IntfID}, err)
 	}
 	onuGemData = append(onuGemData, onuGem)
 	err = RsrcMgr.ResourceMgrs[IntfID].AddOnuGemInfo(ctx, IntfID, onuGemData)
 	if err != nil {
-		logger.Error("Failed to add onugem to kv store")
+		logger.Error(ctx, "Failed to add onugem to kv store")
 		return olterrors.NewErrPersistence("set", "OnuGemInfo", IntfID,
 			log.Fields{"onuGemData": onuGemData, "intfID": IntfID}, err)
 	}
 
-	logger.Debugw("added onu to onugeminfo", log.Fields{"intf": IntfID, "onugem": onuGem})
+	logger.Debugw(ctx, "added onu to onugeminfo", log.Fields{"intf": IntfID, "onugem": onuGem})
 	return nil
 }
 
@@ -1137,14 +1138,14 @@
 	var err error
 
 	if err = RsrcMgr.ResourceMgrs[intfID].GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
-		logger.Errorf("failed to get onuifo for intfid %d", intfID)
+		logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
 		return
 	}
 	for idx, onu := range onuGemData {
 		if onu.OnuID == onuID {
 			for _, uni := range onu.UniPorts {
 				if uni == portNo {
-					logger.Debugw("uni already present in onugem info", log.Fields{"uni": portNo})
+					logger.Debugw(ctx, "uni already present in onugem info", log.Fields{"uni": portNo})
 					return
 				}
 			}
@@ -1154,7 +1155,7 @@
 	}
 	err = RsrcMgr.ResourceMgrs[intfID].AddOnuGemInfo(ctx, intfID, onuGemData)
 	if err != nil {
-		logger.Errorw("Failed to add uin port in onugem to kv store", log.Fields{"uni": portNo})
+		logger.Errorw(ctx, "Failed to add uin port in onugem to kv store", log.Fields{"uni": portNo})
 		return
 	}
 	return
@@ -1166,14 +1167,14 @@
 	path := fmt.Sprintf(OnuPacketINPath, pktIn.IntfID, pktIn.OnuID, pktIn.LogicalPort)
 	Value, err := json.Marshal(gemPort)
 	if err != nil {
-		logger.Error("Failed to marshal data")
+		logger.Error(ctx, "Failed to marshal data")
 		return
 	}
 	if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
-		logger.Errorw("Failed to put to kvstore", log.Fields{"path": path, "value": gemPort})
+		logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"path": path, "value": gemPort})
 		return
 	}
-	logger.Debugw("added gem packet in successfully", log.Fields{"path": path, "gem": gemPort})
+	logger.Debugw(ctx, "added gem packet in successfully", log.Fields{"path": path, "gem": gemPort})
 
 	return
 }
@@ -1188,22 +1189,22 @@
 
 	value, err := RsrcMgr.KVStore.Get(ctx, path)
 	if err != nil {
-		logger.Errorw("Failed to get from kv store", log.Fields{"path": path})
+		logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
 		return uint32(0), err
 	} else if value == nil {
-		logger.Debugw("No pkt in gem found", log.Fields{"path": path})
+		logger.Debugw(ctx, "No pkt in gem found", log.Fields{"path": path})
 		return uint32(0), nil
 	}
 
 	if Val, err = kvstore.ToByte(value.Value); err != nil {
-		logger.Error("Failed to convert to byte array")
+		logger.Error(ctx, "Failed to convert to byte array")
 		return uint32(0), err
 	}
 	if err = json.Unmarshal(Val, &gemPort); err != nil {
-		logger.Error("Failed to unmarshall")
+		logger.Error(ctx, "Failed to unmarshall")
 		return uint32(0), err
 	}
-	logger.Debugw("found packein gemport from path", log.Fields{"path": path, "gem": gemPort})
+	logger.Debugw(ctx, "found packein gemport from path", log.Fields{"path": path, "gem": gemPort})
 
 	return gemPort, nil
 }
@@ -1213,7 +1214,7 @@
 
 	path := fmt.Sprintf(OnuPacketINPath, intfID, onuID, logicalPort)
 	if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
-		logger.Errorf("Falied to remove resource %s", path)
+		logger.Errorf(ctx, "Falied to remove resource %s", path)
 		return err
 	}
 	return nil
@@ -1222,7 +1223,7 @@
 // DelOnuGemInfoForIntf deletes the onugem info from kvstore per interface
 func (RsrcMgr *OpenOltResourceMgr) DelOnuGemInfoForIntf(ctx context.Context, intfID uint32) error {
 	if err := RsrcMgr.ResourceMgrs[intfID].DelOnuGemInfoForIntf(ctx, intfID); err != nil {
-		logger.Errorw("failed to delete onu gem info for", log.Fields{"intfid": intfID})
+		logger.Errorw(ctx, "failed to delete onu gem info for", log.Fields{"intfid": intfID})
 		return err
 	}
 	return nil
@@ -1237,16 +1238,16 @@
 	path := fmt.Sprintf(NnniIntfID)
 	value, err := RsrcMgr.KVStore.Get(ctx, path)
 	if err != nil {
-		logger.Error("failed to get data from kv store")
+		logger.Error(ctx, "failed to get data from kv store")
 		return nil, err
 	}
 	if value != nil {
 		if Val, err = kvstore.ToByte(value.Value); err != nil {
-			logger.Error("Failed to convert to byte array")
+			logger.Error(ctx, "Failed to convert to byte array")
 			return nil, err
 		}
 		if err = json.Unmarshal(Val, &nni); err != nil {
-			logger.Error("Failed to unmarshall")
+			logger.Error(ctx, "Failed to unmarshall")
 			return nil, err
 		}
 	}
@@ -1259,7 +1260,7 @@
 
 	nni, err := RsrcMgr.GetNNIFromKVStore(ctx)
 	if err != nil {
-		logger.Error("failed to fetch nni interfaces from kv store")
+		logger.Error(ctx, "failed to fetch nni interfaces from kv store")
 		return err
 	}
 
@@ -1267,13 +1268,13 @@
 	nni = append(nni, nniIntf)
 	Value, err = json.Marshal(nni)
 	if err != nil {
-		logger.Error("Failed to marshal data")
+		logger.Error(ctx, "Failed to marshal data")
 	}
 	if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
-		logger.Errorw("Failed to put to kvstore", log.Fields{"path": path, "value": Value})
+		logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"path": path, "value": Value})
 		return err
 	}
-	logger.Debugw("added nni to kv successfully", log.Fields{"path": path, "nni": nniIntf})
+	logger.Debugw(ctx, "added nni to kv successfully", log.Fields{"path": path, "nni": nniIntf})
 	return nil
 }
 
@@ -1283,7 +1284,7 @@
 	path := fmt.Sprintf(NnniIntfID)
 
 	if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
-		logger.Errorw("Failed to delete nni interfaces from kv store", log.Fields{"path": path})
+		logger.Errorw(ctx, "Failed to delete nni interfaces from kv store", log.Fields{"path": path})
 		return err
 	}
 	return nil
@@ -1296,7 +1297,7 @@
 
 	flowsForGem, err := RsrcMgr.GetFlowIDsGemMapForInterface(ctx, intf)
 	if err != nil {
-		logger.Error("Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
+		logger.Error(ctx, "Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
 		return err
 	}
 	if flowsForGem == nil {
@@ -1305,17 +1306,17 @@
 	flowsForGem[gem] = flowIDs
 	val, err = json.Marshal(flowsForGem)
 	if err != nil {
-		logger.Error("Failed to marshal data", log.Fields{"error": err})
+		logger.Error(ctx, "Failed to marshal data", log.Fields{"error": err})
 		return err
 	}
 
 	RsrcMgr.flowIDToGemInfoLock.Lock()
 	defer RsrcMgr.flowIDToGemInfoLock.Unlock()
 	if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
-		logger.Errorw("Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+		logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
 		return err
 	}
-	logger.Debugw("added flowid list for gem to kv successfully", log.Fields{"path": path, "flowidlist": flowsForGem[gem]})
+	logger.Debugw(ctx, "added flowid list for gem to kv successfully", log.Fields{"path": path, "flowidlist": flowsForGem[gem]})
 	return nil
 }
 
@@ -1326,11 +1327,11 @@
 
 	flowsForGem, err := RsrcMgr.GetFlowIDsGemMapForInterface(ctx, intf)
 	if err != nil {
-		logger.Error("Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
+		logger.Error(ctx, "Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
 		return
 	}
 	if flowsForGem == nil {
-		logger.Error("No flowids found ", log.Fields{"intf": intf, "gemport": gem})
+		logger.Error(ctx, "No flowids found ", log.Fields{"intf": intf, "gemport": gem})
 		return
 	}
 	// once we get the flows per gem map from kv , just delete the gem entry from the map
@@ -1338,14 +1339,14 @@
 	// once gem entry is deleted update the kv store.
 	val, err = json.Marshal(flowsForGem)
 	if err != nil {
-		logger.Error("Failed to marshal data", log.Fields{"error": err})
+		logger.Error(ctx, "Failed to marshal data", log.Fields{"error": err})
 		return
 	}
 
 	RsrcMgr.flowIDToGemInfoLock.Lock()
 	defer RsrcMgr.flowIDToGemInfoLock.Unlock()
 	if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
-		logger.Errorw("Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+		logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
 		return
 	}
 	return
@@ -1360,16 +1361,16 @@
 	value, err := RsrcMgr.KVStore.Get(ctx, path)
 	RsrcMgr.flowIDToGemInfoLock.RUnlock()
 	if err != nil {
-		logger.Error("failed to get data from kv store")
+		logger.Error(ctx, "failed to get data from kv store")
 		return nil, err
 	}
 	if value != nil && value.Value != nil {
 		if val, err = kvstore.ToByte(value.Value); err != nil {
-			logger.Error("Failed to convert to byte array ", log.Fields{"error": err})
+			logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"error": err})
 			return nil, err
 		}
 		if err = json.Unmarshal(val, &flowsForGem); err != nil {
-			logger.Error("Failed to unmarshall", log.Fields{"error": err})
+			logger.Error(ctx, "Failed to unmarshall", log.Fields{"error": err})
 			return nil, err
 		}
 	}
@@ -1382,7 +1383,7 @@
 	RsrcMgr.flowIDToGemInfoLock.Lock()
 	defer RsrcMgr.flowIDToGemInfoLock.Unlock()
 	if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
-		logger.Errorw("Failed to delete nni interfaces from kv store", log.Fields{"path": path})
+		logger.Errorw(ctx, "Failed to delete nni interfaces from kv store", log.Fields{"path": path})
 		return
 	}
 	return
@@ -1402,16 +1403,16 @@
 
 	kvPair, err := RsrcMgr.KVStore.Get(ctx, path)
 	if err != nil {
-		logger.Error("failed to get data from kv store")
+		logger.Error(ctx, "failed to get data from kv store")
 		return nil, err
 	}
 	if kvPair != nil && kvPair.Value != nil {
 		if val, err = kvstore.ToByte(kvPair.Value); err != nil {
-			logger.Error("Failed to convert to byte array ", log.Fields{"error": err})
+			logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"error": err})
 			return nil, err
 		}
 		if err = json.Unmarshal(val, &mcastQueueToIntfMap); err != nil {
-			logger.Error("Failed to unmarshall ", log.Fields{"error": err})
+			logger.Error(ctx, "Failed to unmarshall ", log.Fields{"error": err})
 			return nil, err
 		}
 	}
@@ -1425,7 +1426,7 @@
 
 	mcastQueues, err := RsrcMgr.GetMcastQueuePerInterfaceMap(ctx)
 	if err != nil {
-		logger.Errorw("Failed to get multicast queue info for interface", log.Fields{"error": err, "intf": intf})
+		logger.Errorw(ctx, "Failed to get multicast queue info for interface", log.Fields{"error": err, "intf": intf})
 		return err
 	}
 	if mcastQueues == nil {
@@ -1433,14 +1434,14 @@
 	}
 	mcastQueues[intf] = []uint32{gem, servicePriority}
 	if val, err = json.Marshal(mcastQueues); err != nil {
-		logger.Errorw("Failed to marshal data", log.Fields{"error": err})
+		logger.Errorw(ctx, "Failed to marshal data", log.Fields{"error": err})
 		return err
 	}
 	if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
-		logger.Errorw("Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+		logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
 		return err
 	}
-	logger.Debugw("added multicast queue info to KV store successfully", log.Fields{"path": path, "mcastQueueInfo": mcastQueues[intf], "interfaceId": intf})
+	logger.Debugw(ctx, "added multicast queue info to KV store successfully", log.Fields{"path": path, "mcastQueueInfo": mcastQueues[intf], "interfaceId": intf})
 	return nil
 }
 
@@ -1471,12 +1472,12 @@
 	Value, err = json.Marshal(groupInfo)
 
 	if err != nil {
-		logger.Error("failed to Marshal flow group object")
+		logger.Error(ctx, "failed to Marshal flow group object")
 		return err
 	}
 
 	if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", path)
+		logger.Errorf(ctx, "Failed to update resource %s", path)
 		return err
 	}
 	return nil
@@ -1491,7 +1492,7 @@
 		path = fmt.Sprintf(FlowGroup, groupID)
 	}
 	if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
-		logger.Errorf("Failed to remove resource %s due to %s", path, err)
+		logger.Errorf(ctx, "Failed to remove resource %s due to %s", path, err)
 		return false
 	}
 	return true
@@ -1515,11 +1516,11 @@
 	if kvPair != nil && kvPair.Value != nil {
 		Val, err := kvstore.ToByte(kvPair.Value)
 		if err != nil {
-			logger.Errorw("Failed to convert flow group into byte array", log.Fields{"error": err})
+			logger.Errorw(ctx, "Failed to convert flow group into byte array", log.Fields{"error": err})
 			return false, groupInfo, err
 		}
 		if err = json.Unmarshal(Val, &groupInfo); err != nil {
-			logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
+			logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
 			return false, groupInfo, err
 		}
 		return true, groupInfo, nil
diff --git a/internal/pkg/resourcemanager/resourcemanager_test.go b/internal/pkg/resourcemanager/resourcemanager_test.go
index 5b2ee29..817825d 100644
--- a/internal/pkg/resourcemanager/resourcemanager_test.go
+++ b/internal/pkg/resourcemanager/resourcemanager_test.go
@@ -127,7 +127,7 @@
 
 // Get mock function implementation for KVClient
 func (kvclient *MockResKVClient) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	logger.Debugw("Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
+	logger.Debugw(ctx, "Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
 	if key != "" {
 		if strings.Contains(key, MeterConfig) {
 			var bands []*ofp.OfpMeterBandHeader
@@ -148,7 +148,7 @@
 			return nil, errors.New("invalid meter")
 		}
 		if strings.Contains(key, FlowIDpool) || strings.Contains(key, GemportIDPool) || strings.Contains(key, AllocIDPool) {
-			logger.Debug("Error Error Error Key:", FlowIDpool, GemportIDPool, AllocIDPool)
+			logger.Debug(ctx, "Error Error Error Key:", FlowIDpool, GemportIDPool, AllocIDPool)
 			data := make(map[string]interface{})
 			data["pool"] = "1024"
 			data["start_idx"] = 1
@@ -157,17 +157,17 @@
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, FlowIDInfo) || strings.Contains(key, FlowIDs) {
-			logger.Debug("Error Error Error Key:", FlowIDs, FlowIDInfo)
+			logger.Debug(ctx, "Error Error Error Key:", FlowIDs, FlowIDInfo)
 			str, _ := json.Marshal([]uint32{1, 2})
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, AllocIDs) || strings.Contains(key, GemportIDs) {
-			logger.Debug("Error Error Error Key:", AllocIDs, GemportIDs)
+			logger.Debug(ctx, "Error Error Error Key:", AllocIDs, GemportIDs)
 			str, _ := json.Marshal(1)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, McastQueuesForIntf) {
-			logger.Debug("Error Error Error Key:", McastQueuesForIntf)
+			logger.Debug(ctx, "Error Error Error Key:", McastQueuesForIntf)
 			mcastQueues := make(map[uint32][]uint32)
 			mcastQueues[10] = []uint32{4000, 0}
 			str, _ := json.Marshal(mcastQueues)
@@ -240,11 +240,11 @@
 }
 
 // CloseWatch mock function implementation for KVClient
-func (kvclient *MockResKVClient) CloseWatch(key string, ch chan *kvstore.Event) {
+func (kvclient *MockResKVClient) CloseWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
 }
 
 // Close mock function implementation for KVClient
-func (kvclient *MockResKVClient) Close() {
+func (kvclient *MockResKVClient) Close(ctx context.Context) {
 }
 
 // testResMgrObject maps fields type to OpenOltResourceMgr type.
@@ -951,7 +951,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			if got := SetKVClient(tt.args.backend, tt.args.address, tt.args.DeviceID); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+			if got := SetKVClient(context.Background(), tt.args.backend, tt.args.address, tt.args.DeviceID); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
 				t.Errorf("SetKVClient() = %v, want %v", got, tt.want)
 			}
 		})
@@ -1018,7 +1018,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			err := getFlowIDFromFlowInfo(tt.args.FlowInfo, tt.args.flowID, tt.args.gemportID, tt.args.flowStoreCookie, tt.args.flowCategory, tt.args.vlanVid, tt.args.vlanPcp...)
+			err := getFlowIDFromFlowInfo(context.Background(), tt.args.FlowInfo, tt.args.flowID, tt.args.gemportID, tt.args.flowStoreCookie, tt.args.flowCategory, tt.args.vlanVid, tt.args.vlanPcp...)
 			if reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) && err != nil {
 				t.Errorf("getFlowIDFromFlowInfo() error = %v, wantErr %v", err, tt.wantErr)
 			}
@@ -1046,7 +1046,7 @@
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			got, err := newKVClient(tt.args.storeType, tt.args.address, tt.args.timeout)
+			got, err := newKVClient(context.Background(), tt.args.storeType, tt.args.address, tt.args.timeout)
 			if got != nil && reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
 				t.Errorf("newKVClient() got = %v, want %v", got, tt.want)
 			}
diff --git a/pkg/mocks/common.go b/pkg/mocks/common.go
index 3b2df29..836770c 100644
--- a/pkg/mocks/common.go
+++ b/pkg/mocks/common.go
@@ -21,12 +21,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/mocks/mockEventproxy.go b/pkg/mocks/mockEventproxy.go
index d2621ab..2abddf2 100644
--- a/pkg/mocks/mockEventproxy.go
+++ b/pkg/mocks/mockEventproxy.go
@@ -18,8 +18,8 @@
 package mocks
 
 import (
+	"context"
 	"errors"
-
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
@@ -28,7 +28,7 @@
 }
 
 // SendDeviceEvent mocks the SendDeviceEvent function
-func (me *MockEventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category voltha.EventCategory_Types,
+func (me *MockEventProxy) SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category voltha.EventCategory_Types,
 	subCategory voltha.EventSubCategory_Types, raisedTs int64) error {
 	if raisedTs == 0 {
 		return errors.New("raisedTS cannot be zero")
@@ -37,7 +37,7 @@
 }
 
 // SendKpiEvent mocks the SendKpiEvent function
-func (me *MockEventProxy) SendKpiEvent(id string, deviceEvent *voltha.KpiEvent2, category voltha.EventCategory_Types,
+func (me *MockEventProxy) SendKpiEvent(ctx context.Context, id string, deviceEvent *voltha.KpiEvent2, category voltha.EventCategory_Types,
 	subCategory voltha.EventSubCategory_Types, raisedTs int64) error {
 	if raisedTs == 0 {
 		return errors.New("raisedTS cannot be zero")
diff --git a/pkg/mocks/mockKVClient.go b/pkg/mocks/mockKVClient.go
index d4527ad..5e13d1d 100644
--- a/pkg/mocks/mockKVClient.go
+++ b/pkg/mocks/mockKVClient.go
@@ -70,9 +70,9 @@
 
 // Get mock function implementation for KVClient
 func (kvclient *MockKVClient) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	logger.Debugw("Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
+	logger.Debugw(ctx, "Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
 	if key != "" {
-		logger.Debug("Warning Key Not Blank")
+		logger.Debug(ctx, "Warning Key Not Blank")
 		if strings.Contains(key, "meter_id/{0,62,8}/{upstream}") {
 			meterConfig := ofp.OfpMeterConfig{
 				Flags:   0,
@@ -117,7 +117,7 @@
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, FlowIDpool) {
-			logger.Debug("Error Error Error Key:", FlowIDpool)
+			logger.Debug(ctx, "Error Error Error Key:", FlowIDpool)
 			data := make(map[string]interface{})
 			data["pool"] = "1024"
 			data["start_idx"] = 1
@@ -127,7 +127,7 @@
 		}
 		if strings.Contains(key, FlowIDs) {
 			data := []uint32{1, 2}
-			logger.Debug("Error Error Error Key:", FlowIDs)
+			logger.Debug(ctx, "Error Error Error Key:", FlowIDs)
 			str, _ := json.Marshal(data)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
@@ -140,22 +140,22 @@
 					LogicalFlowID:   1,
 				},
 			}
-			logger.Debug("Error Error Error Key:", FlowIDs)
+			logger.Debug(ctx, "Error Error Error Key:", FlowIDs)
 			str, _ := json.Marshal(data)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, GemportIDs) {
-			logger.Debug("Error Error Error Key:", GemportIDs)
+			logger.Debug(ctx, "Error Error Error Key:", GemportIDs)
 			str, _ := json.Marshal(1)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, AllocIDs) {
-			logger.Debug("Error Error Error Key:", AllocIDs)
+			logger.Debug(ctx, "Error Error Error Key:", AllocIDs)
 			str, _ := json.Marshal(1)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, FlowGroup) || strings.Contains(key, FlowGroupCached) {
-			logger.Debug("Error Error Error Key:", FlowGroup)
+			logger.Debug(ctx, "Error Error Error Key:", FlowGroup)
 			groupInfo := resourcemanager.GroupInfo{
 				GroupID:  2,
 				OutPorts: []uint32{1},
@@ -251,9 +251,9 @@
 }
 
 // CloseWatch mock function implementation for KVClient
-func (kvclient *MockKVClient) CloseWatch(key string, ch chan *kvstore.Event) {
+func (kvclient *MockKVClient) CloseWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
 }
 
 // Close mock function implementation for KVClient
-func (kvclient *MockKVClient) Close() {
+func (kvclient *MockKVClient) Close(ctx context.Context) {
 }
diff --git a/pkg/mocks/mockTechprofile.go b/pkg/mocks/mockTechprofile.go
index d47864c..4a71661 100644
--- a/pkg/mocks/mockTechprofile.go
+++ b/pkg/mocks/mockTechprofile.go
@@ -31,19 +31,19 @@
 }
 
 // SetKVClient to mock techprofile SetKVClient method
-func (m MockTechProfile) SetKVClient() *db.Backend {
+func (m MockTechProfile) SetKVClient(ctx context.Context) *db.Backend {
 	return &db.Backend{Client: &MockKVClient{}}
 }
 
 // GetTechProfileInstanceKVPath to mock techprofile GetTechProfileInstanceKVPath method
-func (m MockTechProfile) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+func (m MockTechProfile) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
 	return ""
 
 }
 
 // GetTPInstanceFromKVStore to mock techprofile GetTPInstanceFromKVStore method
 func (m MockTechProfile) GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error) {
-	logger.Debug("Warning Warning Warning: GetTPInstanceFromKVStore")
+	logger.Debug(ctx, "Warning Warning Warning: GetTPInstanceFromKVStore")
 	return nil, nil
 
 }
@@ -83,19 +83,19 @@
 }
 
 // GetprotoBufParamValue to mock techprofile GetprotoBufParamValue method
-func (m MockTechProfile) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+func (m MockTechProfile) GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32 {
 	return 0
 
 }
 
 // GetUsScheduler to mock techprofile GetUsScheduler method
-func (m MockTechProfile) GetUsScheduler(tpInstance *tp.TechProfile) (*tp_pb.SchedulerConfig, error) {
+func (m MockTechProfile) GetUsScheduler(ctx context.Context, tpInstance *tp.TechProfile) (*tp_pb.SchedulerConfig, error) {
 	return &tp_pb.SchedulerConfig{}, nil
 
 }
 
 // GetDsScheduler to mock techprofile GetDsScheduler method
-func (m MockTechProfile) GetDsScheduler(tpInstance *tp.TechProfile) (*tp_pb.SchedulerConfig, error) {
+func (m MockTechProfile) GetDsScheduler(ctx context.Context, tpInstance *tp.TechProfile) (*tp_pb.SchedulerConfig, error) {
 	return &tp_pb.SchedulerConfig{}, nil
 }
 
@@ -107,17 +107,17 @@
 }
 
 // GetTrafficQueues to mock techprofile GetTrafficQueues method
-func (m MockTechProfile) GetTrafficQueues(tp *tp.TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+func (m MockTechProfile) GetTrafficQueues(ctx context.Context, tp *tp.TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
 	return []*tp_pb.TrafficQueue{{}}, nil
 }
 
 // GetMulticastTrafficQueues to mock techprofile GetMulticastTrafficQueues method
-func (m MockTechProfile) GetMulticastTrafficQueues(tp *tp.TechProfile) []*tp_pb.TrafficQueue {
+func (m MockTechProfile) GetMulticastTrafficQueues(ctx context.Context, tp *tp.TechProfile) []*tp_pb.TrafficQueue {
 	return []*tp_pb.TrafficQueue{{}}
 }
 
 // GetGemportIDForPbit to mock techprofile GetGemportIDForPbit method
-func (m MockTechProfile) GetGemportIDForPbit(tp interface{}, Dir tp_pb.Direction, pbit uint32) uint32 {
+func (m MockTechProfile) GetGemportIDForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) uint32 {
 	return 0
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
index c144935..dbd8140 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
@@ -17,14 +17,15 @@
 package adapterif
 
 import (
+	"context"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 // EventProxy interface for eventproxy
 type EventProxy interface {
-	SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category EventCategory,
+	SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category EventCategory,
 		subCategory EventSubCategory, raisedTs int64) error
-	SendKpiEvent(id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
+	SendKpiEvent(ctx context.Context, id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
 		subCategory EventSubCategory, raisedTs int64) error
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
index cd5750f..ca44d0d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
@@ -35,14 +35,14 @@
 	endpointMgr  kafka.EndpointManager
 }
 
-func NewAdapterProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string, backend *db.Backend) *AdapterProxy {
+func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string, backend *db.Backend) *AdapterProxy {
 	proxy := AdapterProxy{
 		kafkaICProxy: kafkaProxy,
 		adapterTopic: adapterTopic,
 		coreTopic:    coreTopic,
 		endpointMgr:  kafka.NewEndpointManager(backend),
 	}
-	logger.Debugw("topics", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 	return &proxy
 }
 
@@ -54,14 +54,14 @@
 	toDeviceId string,
 	proxyDeviceId string,
 	messageId string) error {
-	logger.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
 		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
 
 	//Marshal the message
 	var marshalledMsg *any.Any
 	var err error
 	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
-		logger.Warnw("cannot-marshal-msg", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
 		return err
 	}
 
@@ -90,7 +90,7 @@
 	}
 
 	// Set up the required rpc arguments
-	endpoint, err := ap.endpointMgr.GetEndpoint(toDeviceId, toAdapter)
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
 	if err != nil {
 		return err
 	}
@@ -99,6 +99,6 @@
 	rpc := "process_inter_adapter_message"
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	logger.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(rpc, "", success, result)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
index 95a036d..ad8b11b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
index 20e1a52..28b532f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
@@ -37,28 +37,28 @@
 	lockDeviceIdCoreMap sync.RWMutex
 }
 
-func NewCoreProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
+func NewCoreProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
 	var proxy CoreProxy
 	proxy.kafkaICProxy = kafkaProxy
 	proxy.adapterTopic = adapterTopic
 	proxy.coreTopic = coreTopic
 	proxy.deviceIdCoreMap = make(map[string]string)
 	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
-	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw(ctx, "TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 
 	return &proxy
 }
 
-func unPackResponse(rpc string, deviceId string, success bool, response *a.Any) error {
+func unPackResponse(ctx context.Context, rpc string, deviceId string, success bool, response *a.Any) error {
 	if success {
 		return nil
 	} else {
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
+		logger.Debugw(ctx, "response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
 		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
 	}
@@ -94,18 +94,18 @@
 }
 
 func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
-	logger.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	logger.Debugw(ctx, "registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
 	rpc := "Register"
 	topic := kafka.Topic{Name: ap.coreTopic}
 	replyToTopic := ap.getAdapterTopic()
 	args := make([]*kafka.KVArg, 2)
 
 	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
-		log.Fatal("totalReplicas can't be 0, since you're here you have at least one")
+		logger.Fatal(ctx, "totalReplicas can't be 0, since you're here you have at least one")
 	}
 
 	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
-		log.Fatal("currentReplica can't be 0, it has to start from 1")
+		logger.Fatal(ctx, "currentReplica can't be 0, it has to start from 1")
 	}
 
 	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
@@ -117,7 +117,7 @@
 	}
 
 	if adapter.CurrentReplica > adapter.TotalReplicas {
-		log.Fatalf("CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
+		logger.Fatalf(ctx, "CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
 			adapter.CurrentReplica, adapter.TotalReplicas)
 	}
 
@@ -131,12 +131,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
-	logger.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(rpc, "", success, result)
+	logger.Debugw(ctx, "Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
 }
 
 func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
-	logger.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
+	logger.Debugw(ctx, "DeviceUpdate", log.Fields{"deviceId": device.Id})
 	rpc := "DeviceUpdate"
 	toTopic := ap.getCoreTopic(device.Id)
 	args := make([]*kafka.KVArg, 1)
@@ -147,12 +147,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
-	logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
-	return unPackResponse(rpc, device.Id, success, result)
+	logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
+	return unPackResponse(ctx, rpc, device.Id, success, result)
 }
 
 func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
-	logger.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
+	logger.Debugw(ctx, "PortCreated", log.Fields{"portNo": port.PortNo})
 	rpc := "PortCreated"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -171,12 +171,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw(ctx, "PortsStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "PortsStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -197,12 +197,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
-	logger.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
+	logger.Debugw(ctx, "DeleteAllPorts", log.Fields{"deviceId": deviceId})
 	rpc := "DeleteAllPorts"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -218,13 +218,13 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
 	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw(ctx, "DeviceStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "DeviceStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -249,13 +249,13 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
 	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
-	logger.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
+	logger.Debugw(ctx, "ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
 	rpc := "ChildDeviceDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -300,12 +300,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevice, nil
@@ -313,17 +313,17 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw(ctx, "ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
 	}
 
 }
 
 func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+	logger.Debugw(ctx, "ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
 	rpc := "ChildDevicesLost"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -338,12 +338,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(rpc, parentDeviceId, success, result)
+	logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+	logger.Debugw(ctx, "ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
 	rpc := "ChildDevicesDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -358,12 +358,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(rpc, parentDeviceId, success, result)
+	logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
-	logger.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
+	logger.Debugw(ctx, "GetDevice", log.Fields{"deviceId": deviceId})
 	rpc := "GetDevice"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -377,12 +377,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw(ctx, "GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevice, nil
@@ -390,16 +390,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw(ctx, "GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
-	logger.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
+	logger.Debugw(ctx, "GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
 	rpc := "GetChildDevice"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -437,12 +437,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevice, nil
@@ -450,16 +450,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw(ctx, "GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
-	logger.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	logger.Debugw(ctx, "GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
 	rpc := "GetChildDevices"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -473,12 +473,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevices := &voltha.Devices{}
 		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevices, nil
@@ -486,16 +486,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw(ctx, "GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
-	logger.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+	logger.Debugw(ctx, "SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
 	rpc := "PacketIn"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -519,12 +519,12 @@
 		Value: pkt,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
-	logger.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
+	logger.Debugw(ctx, "DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
 	rpc := "DeviceReasonUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -543,12 +543,12 @@
 		Value: reason,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
-	logger.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	logger.Debugw(ctx, "DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
 	rpc := "DevicePMConfigUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -561,12 +561,12 @@
 		Value: pmConfigs,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
-	logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
-	return unPackResponse(rpc, pmConfigs.Id, success, result)
+	logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
+	return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
 }
 
 func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	logger.Debugw(ctx, "ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
 	rpc := "ReconcileChildDevices"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -578,13 +578,13 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(rpc, parentDeviceId, success, result)
+	logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
 	operStatus voltha.OperStatus_Types) error {
-	logger.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	logger.Debugw(ctx, "PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
 	rpc := "PortStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -615,6 +615,6 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
+	logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
index da9c9eb..b79bafe 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
@@ -17,6 +17,7 @@
 package common
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"strconv"
@@ -96,9 +97,9 @@
 }
 
 /* Send out device events*/
-func (ep *EventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+func (ep *EventProxy) SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if deviceEvent == nil {
-		logger.Error("Recieved empty device event")
+		logger.Error(ctx, "Recieved empty device event")
 		return errors.New("Device event nil")
 	}
 	var event voltha.Event
@@ -109,11 +110,11 @@
 		return err
 	}
 	event.EventType = &de
-	if err := ep.sendEvent(&event); err != nil {
-		logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
 		return err
 	}
-	logger.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow(ctx, "Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
 		"DeviceEventName": deviceEvent.DeviceEventName})
@@ -123,9 +124,9 @@
 }
 
 // SendKpiEvent is to send kpi events to voltha.event topic
-func (ep *EventProxy) SendKpiEvent(id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+func (ep *EventProxy) SendKpiEvent(ctx context.Context, id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if kpiEvent == nil {
-		logger.Error("Recieved empty kpi event")
+		logger.Error(ctx, "Recieved empty kpi event")
 		return errors.New("KPI event nil")
 	}
 	var event voltha.Event
@@ -136,11 +137,11 @@
 		return err
 	}
 	event.EventType = &de
-	if err := ep.sendEvent(&event); err != nil {
-		logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
 		return err
 	}
-	logger.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow(ctx, "Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
 
@@ -150,11 +151,11 @@
 
 /* TODO: Send out KPI events*/
 
-func (ep *EventProxy) sendEvent(event *voltha.Event) error {
-	if err := ep.kafkaClient.Send(event, &ep.eventTopic); err != nil {
+func (ep *EventProxy) sendEvent(ctx context.Context, event *voltha.Event) error {
+	if err := ep.kafkaClient.Send(ctx, event, &ep.eventTopic); err != nil {
 		return err
 	}
-	logger.Debugw("Sent event to kafka", log.Fields{"event": event})
+	logger.Debugw(ctx, "Sent event to kafka", log.Fields{"event": event})
 
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
index 62d8cdd..a92ed51 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
@@ -16,6 +16,7 @@
 package common
 
 import (
+	"context"
 	"errors"
 
 	"github.com/golang/protobuf/ptypes"
@@ -58,9 +59,9 @@
 	return nil, nil
 }
 
-func (rhp *RequestHandlerProxy) Adopt_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Adopt_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -71,38 +72,38 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
+	logger.Debugw(ctx, "Adopt_device", log.Fields{"deviceId": device.Id})
 
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 
 	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Adopt_device(device); err != nil {
+	if err := rhp.adapter.Adopt_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reconcile_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reconcile_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -114,17 +115,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -133,7 +134,7 @@
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 
 	//Invoke the reconcile device API on the adapter
-	if err := rhp.adapter.Reconcile_device(device); err != nil {
+	if err := rhp.adapter.Reconcile_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -143,9 +144,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Disable_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Disable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -157,17 +158,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -175,15 +176,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Disable_device API on the adapter
-	if err := rhp.adapter.Disable_device(device); err != nil {
+	if err := rhp.adapter.Disable_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reenable_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reenable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -195,17 +196,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -213,15 +214,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Reenable_device API on the adapter
-	if err := rhp.adapter.Reenable_device(device); err != nil {
+	if err := rhp.adapter.Reenable_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reboot_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reboot_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -233,17 +234,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -251,7 +252,7 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Reboot_device API on the adapter
-	if err := rhp.adapter.Reboot_device(device); err != nil {
+	if err := rhp.adapter.Reboot_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -262,9 +263,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Delete_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -276,17 +277,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -294,7 +295,7 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the delete_device API on the adapter
-	if err := rhp.adapter.Delete_device(device); err != nil {
+	if err := rhp.adapter.Delete_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -304,10 +305,10 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_flows_bulk(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug("Update_flows_bulk")
+func (rhp *RequestHandlerProxy) Update_flows_bulk(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_bulk")
 	if len(args) < 5 {
-		logger.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -320,43 +321,43 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flows":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "groups":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw(ctx, "Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the bulk flow update API of the adapter
-	if err := rhp.adapter.Update_flows_bulk(device, flows, groups, flowMetadata); err != nil {
+	if err := rhp.adapter.Update_flows_bulk(ctx, device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug("Update_flows_incrementally")
+func (rhp *RequestHandlerProxy) Update_flows_incrementally(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_incrementally")
 	if len(args) < 5 {
-		logger.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -369,43 +370,43 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "group_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the incremental flow update API of the adapter
-	if err := rhp.adapter.Update_flows_incrementally(device, flows, groups, flowMetadata); err != nil {
+	if err := rhp.adapter.Update_flows_incrementally(ctx, device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_pm_config(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug("Update_pm_config")
+func (rhp *RequestHandlerProxy) Update_pm_config(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_pm_config")
 	if len(args) < 2 {
-		logger.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -416,33 +417,33 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pm_configs":
 			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
-				logger.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-pm-configs", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
+	logger.Debugw(ctx, "Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
 	//Invoke the pm config update API of the adapter
-	if err := rhp.adapter.Update_pm_config(device, pmConfigs); err != nil {
+	if err := rhp.adapter.Update_pm_config(ctx, device, pmConfigs); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debugw("Receive_packet_out", log.Fields{"args": args})
+func (rhp *RequestHandlerProxy) Receive_packet_out(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"args": args})
 	if len(args) < 3 {
-		logger.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -454,29 +455,29 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-deviceId", log.Fields{"error": err})
 				return nil, err
 			}
 		case "outPort":
 			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
-				logger.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-egressPort", log.Fields{"error": err})
 				return nil, err
 			}
 		case "packet":
 			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
-				logger.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-packet", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
 	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
+	if err := rhp.adapter.Receive_packet_out(ctx, deviceId.Val, int(egressPort.Val), packet); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -490,9 +491,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Get_ofp_device_info(args []*ic.Argument) (*ic.SwitchCapability, error) {
+func (rhp *RequestHandlerProxy) Get_ofp_device_info(ctx context.Context, args []*ic.Argument) (*ic.SwitchCapability, error) {
 	if len(args) < 2 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -502,31 +503,31 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"deviceId": device.Id})
 
 	var cap *ic.SwitchCapability
 	var err error
-	if cap, err = rhp.adapter.Get_ofp_device_info(device); err != nil {
+	if cap, err = rhp.adapter.Get_ofp_device_info(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
-	logger.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"cap": cap})
 	return cap, nil
 }
 
-func (rhp *RequestHandlerProxy) Process_inter_adapter_message(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Process_inter_adapter_message(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 2 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -536,21 +537,21 @@
 		switch arg.Key {
 		case "msg":
 			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
 
 	//Invoke the inter adapter API on the handler
-	if err := rhp.adapter.Process_inter_adapter_message(iaMsg); err != nil {
+	if err := rhp.adapter.Process_inter_adapter_message(ctx, iaMsg); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 
@@ -577,30 +578,30 @@
 	return &voltha.ImageDownload{}, nil
 }
 
-func (rhp *RequestHandlerProxy) Enable_port(args []*ic.Argument) error {
-	logger.Debugw("enable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(args)
+func (rhp *RequestHandlerProxy) Enable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "enable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
 	if err != nil {
-		logger.Warnw("enable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
+		logger.Warnw(ctx, "enable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
 		return err
 	}
-	return rhp.adapter.Enable_port(deviceId, port)
+	return rhp.adapter.Enable_port(ctx, deviceId, port)
 }
 
-func (rhp *RequestHandlerProxy) Disable_port(args []*ic.Argument) error {
-	logger.Debugw("disable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(args)
+func (rhp *RequestHandlerProxy) Disable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "disable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
 	if err != nil {
-		logger.Warnw("disable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
+		logger.Warnw(ctx, "disable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
 		return err
 	}
-	return rhp.adapter.Disable_port(deviceId, port)
+	return rhp.adapter.Disable_port(ctx, deviceId, port)
 }
 
-func (rhp *RequestHandlerProxy) getEnableDisableParams(args []*ic.Argument) (string, *voltha.Port, error) {
-	logger.Debugw("getEnableDisableParams", log.Fields{"args": args})
+func (rhp *RequestHandlerProxy) getEnableDisableParams(ctx context.Context, args []*ic.Argument) (string, *voltha.Port, error) {
+	logger.Debugw(ctx, "getEnableDisableParams", log.Fields{"args": args})
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		return "", nil, errors.New("invalid-number-of-args")
 	}
 	deviceId := &ic.StrType{}
@@ -609,12 +610,12 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return "", nil, err
 			}
 		case "port":
 			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
-				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
 				return "", nil, err
 			}
 		}
@@ -622,9 +623,9 @@
 	return deviceId.Val, port, nil
 }
 
-func (rhp *RequestHandlerProxy) Child_device_lost(args []*ic.Argument) error {
+func (rhp *RequestHandlerProxy) Child_device_lost(ctx context.Context, args []*ic.Argument) error {
 	if len(args) < 4 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		return errors.New("invalid-number-of-args")
 	}
 
@@ -636,22 +637,22 @@
 		switch arg.Key {
 		case "pDeviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
 				return err
 			}
 		case "pPortNo":
 			if err := ptypes.UnmarshalAny(arg.Value, pPortNo); err != nil {
-				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
 				return err
 			}
 		case "onuID":
 			if err := ptypes.UnmarshalAny(arg.Value, onuID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return err
 			}
 		}
@@ -659,15 +660,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(pDeviceId.Val, fromTopic.Val)
 	//Invoke the Child_device_lost API on the adapter
-	if err := rhp.adapter.Child_device_lost(pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
+	if err := rhp.adapter.Child_device_lost(ctx, pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
 		return status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return nil
 }
 
-func (rhp *RequestHandlerProxy) Start_omci_test(args []*ic.Argument) (*ic.TestResponse, error) {
+func (rhp *RequestHandlerProxy) Start_omci_test(ctx context.Context, args []*ic.Argument) (*ic.TestResponse, error) {
 	if len(args) < 2 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -681,26 +682,26 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "omcitestrequest":
 			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
-				logger.Warnw("cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
-	result, err := rhp.adapter.Start_omci_test(device, request)
+	logger.Debugw(ctx, "Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
+	result, err := rhp.adapter.Start_omci_test(ctx, device, request)
 	if err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return result, nil
 }
-func (rhp *RequestHandlerProxy) Get_ext_value(args []*ic.Argument) (*voltha.ReturnValues, error) {
+func (rhp *RequestHandlerProxy) Get_ext_value(ctx context.Context, args []*ic.Argument) (*voltha.ReturnValues, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		return nil, errors.New("invalid-number-of-args")
 	}
 
@@ -711,24 +712,24 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pDeviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
 				return nil, err
 			}
 		case "valuetype":
 			if err := ptypes.UnmarshalAny(arg.Value, valuetype); err != nil {
-				logger.Warnw("cannot-unmarshal-valuetype", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-valuetype", log.Fields{"error": err})
 				return nil, err
 			}
 		default:
-			logger.Warnw("key-not-found", log.Fields{"arg.Key": arg.Key})
+			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
 		}
 	}
 
 	//Invoke the Get_value API on the adapter
-	return rhp.adapter.Get_ext_value(pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
+	return rhp.adapter.Get_ext_value(ctx, pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
index 94e8bd6..3d91119 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
@@ -16,6 +16,7 @@
 package common
 
 import (
+	"context"
 	"fmt"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
@@ -75,7 +76,7 @@
 	return string(b)
 }
 
-func ICProxyErrorCodeToGrpcErrorCode(icErr ic.ErrorCodeCodes) codes.Code {
+func ICProxyErrorCodeToGrpcErrorCode(ctx context.Context, icErr ic.ErrorCodeCodes) codes.Code {
 	switch icErr {
 	case ic.ErrorCode_INVALID_PARAMETERS:
 		return codes.InvalidArgument
@@ -84,7 +85,7 @@
 	case ic.ErrorCode_DEADLINE_EXCEEDED:
 		return codes.DeadlineExceeded
 	default:
-		logger.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		logger.Warnw(ctx, "cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
 		return codes.Internal
 	}
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
index 1e81890..ce0b791 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
@@ -16,6 +16,7 @@
 package adapters
 
 import (
+	"context"
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 	"github.com/opencord/voltha-protos/v3/go/openflow_13"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
@@ -23,34 +24,34 @@
 
 //IAdapter represents the set of APIs a voltha adapter has to support.
 type IAdapter interface {
-	Adapter_descriptor() error
-	Device_types() (*voltha.DeviceTypes, error)
-	Health() (*voltha.HealthStatus, error)
-	Adopt_device(device *voltha.Device) error
-	Reconcile_device(device *voltha.Device) error
-	Abandon_device(device *voltha.Device) error
-	Disable_device(device *voltha.Device) error
-	Reenable_device(device *voltha.Device) error
-	Reboot_device(device *voltha.Device) error
-	Self_test_device(device *voltha.Device) error
-	Delete_device(device *voltha.Device) error
-	Get_device_details(device *voltha.Device) error
-	Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
-	Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
-	Update_pm_config(device *voltha.Device, pm_configs *voltha.PmConfigs) error
-	Receive_packet_out(deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
-	Suppress_event(filter *voltha.EventFilter) error
-	Unsuppress_event(filter *voltha.EventFilter) error
-	Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error)
-	Process_inter_adapter_message(msg *ic.InterAdapterMessage) error
-	Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Enable_port(deviceId string, port *voltha.Port) error
-	Disable_port(deviceId string, port *voltha.Port) error
-	Child_device_lost(parentDeviceId string, parentPortNo uint32, onuID uint32) error
-	Start_omci_test(device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
-	Get_ext_value(deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
+	Adapter_descriptor(ctx context.Context) error
+	Device_types(ctx context.Context) (*voltha.DeviceTypes, error)
+	Health(ctx context.Context) (*voltha.HealthStatus, error)
+	Adopt_device(ctx context.Context, device *voltha.Device) error
+	Reconcile_device(ctx context.Context, device *voltha.Device) error
+	Abandon_device(ctx context.Context, device *voltha.Device) error
+	Disable_device(ctx context.Context, device *voltha.Device) error
+	Reenable_device(ctx context.Context, device *voltha.Device) error
+	Reboot_device(ctx context.Context, device *voltha.Device) error
+	Self_test_device(ctx context.Context, device *voltha.Device) error
+	Delete_device(ctx context.Context, device *voltha.Device) error
+	Get_device_details(ctx context.Context, device *voltha.Device) error
+	Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
+	Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
+	Update_pm_config(ctx context.Context, device *voltha.Device, pm_configs *voltha.PmConfigs) error
+	Receive_packet_out(ctx context.Context, deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
+	Suppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
+	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
+	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Child_device_lost(ctx context.Context, parentDeviceId string, parentPortNo uint32, onuID uint32) error
+	Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
+	Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go
index 37e05fd..06b8b3c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
index 24988be..11aa8e6 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
@@ -96,14 +96,14 @@
 	kvStoreEventChan chan *kvstore.Event
 }
 
-func NewConfigManager(kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
+func NewConfigManager(ctx context.Context, kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
 	var kvStorePrefix string
 	if prefix, present := os.LookupEnv("KV_STORE_DATAPATH_PREFIX"); present {
 		kvStorePrefix = prefix
-		logger.Infow("KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
 	} else {
 		kvStorePrefix = defaultkvStoreDataPathPrefix
-		logger.Infow("KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
 	}
 	return &ConfigManager{
 		KVStoreConfigPrefix:   defaultkvStoreConfigPath,
@@ -176,31 +176,31 @@
 func (c *ComponentConfig) MonitorForConfigChange(ctx context.Context) chan *ConfigChangeEvent {
 	key := c.makeConfigPath()
 
-	logger.Debugw("monitoring-for-config-change", log.Fields{"key": key})
+	logger.Debugw(ctx, "monitoring-for-config-change", log.Fields{"key": key})
 
 	c.changeEventChan = make(chan *ConfigChangeEvent, 1)
 
 	c.kvStoreEventChan = c.cManager.Backend.CreateWatch(ctx, key, true)
 
-	go c.processKVStoreWatchEvents()
+	go c.processKVStoreWatchEvents(ctx)
 
 	return c.changeEventChan
 }
 
 // processKVStoreWatchEvents process event channel recieved from the Backend for any ChangeType
 // It checks for the EventType is valid or not.For the valid EventTypes creates ConfigChangeEvent and send it on channel
-func (c *ComponentConfig) processKVStoreWatchEvents() {
+func (c *ComponentConfig) processKVStoreWatchEvents(ctx context.Context) {
 
 	ccKeyPrefix := c.makeConfigPath()
 
-	logger.Debugw("processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
+	logger.Debugw(ctx, "processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
 
 	ccPathPrefix := c.cManager.Backend.PathPrefix + ccKeyPrefix + kvStorePathSeparator
 
 	for watchResp := range c.kvStoreEventChan {
 
 		if watchResp.EventType == kvstore.CONNECTIONDOWN || watchResp.EventType == kvstore.UNKNOWN {
-			logger.Warnw("received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
+			logger.Warnw(ctx, "received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
 			continue
 		}
 
@@ -220,7 +220,7 @@
 func (c *ComponentConfig) Retrieve(ctx context.Context, configKey string) (string, error) {
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw("retrieving-config", log.Fields{"key": key})
+	logger.Debugw(ctx, "retrieving-config", log.Fields{"key": key})
 
 	if kvpair, err := c.cManager.Backend.Get(ctx, key); err != nil {
 		return "", err
@@ -230,7 +230,7 @@
 		}
 
 		value := strings.Trim(fmt.Sprintf("%s", kvpair.Value), "\"")
-		logger.Debugw("retrieved-config", log.Fields{"key": key, "value": value})
+		logger.Debugw(ctx, "retrieved-config", log.Fields{"key": key, "value": value})
 		return value, nil
 	}
 }
@@ -238,7 +238,7 @@
 func (c *ComponentConfig) RetrieveAll(ctx context.Context) (map[string]string, error) {
 	key := c.makeConfigPath()
 
-	logger.Debugw("retreiving-list", log.Fields{"key": key})
+	logger.Debugw(ctx, "retreiving-list", log.Fields{"key": key})
 
 	data, err := c.cManager.Backend.List(ctx, key)
 	if err != nil {
@@ -261,7 +261,7 @@
 func (c *ComponentConfig) Save(ctx context.Context, configKey string, configValue string) error {
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw("saving-config", log.Fields{"key": key, "value": configValue})
+	logger.Debugw(ctx, "saving-config", log.Fields{"key": key, "value": configValue})
 
 	//save the data for update config
 	if err := c.cManager.Backend.Put(ctx, key, configValue); err != nil {
@@ -274,7 +274,7 @@
 	//construct key using makeConfigPath
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw("deleting-config", log.Fields{"key": key})
+	logger.Debugw(ctx, "deleting-config", log.Fields{"key": key})
 	//delete the config
 	if err := c.cManager.Backend.Delete(ctx, key); err != nil {
 		return err
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go
index b00569f..f83e383 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go
@@ -51,9 +51,8 @@
 	initialLogLevel     string // Initial default log level set by helm chart
 }
 
-func NewComponentLogController(cm *ConfigManager) (*ComponentLogController, error) {
-
-	logger.Debug("creating-new-component-log-controller")
+func NewComponentLogController(ctx context.Context, cm *ConfigManager) (*ComponentLogController, error) {
+	logger.Debug(ctx, "creating-new-component-log-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
 		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
@@ -80,17 +79,17 @@
 // Then, it persists initial default Loglevels into Config Store before
 // starting the loading and processing of all Log Configuration
 func StartLogLevelConfigProcessing(cm *ConfigManager, ctx context.Context) {
-	cc, err := NewComponentLogController(cm)
+	cc, err := NewComponentLogController(ctx, cm)
 	if err != nil {
-		logger.Errorw("unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
+		logger.Errorw(ctx, "unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
 		return
 	}
 
 	cc.GlobalConfig = cm.InitComponentConfig(globalConfigRootNode, ConfigTypeLogLevel)
-	logger.Debugw("global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
+	logger.Debugw(ctx, "global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
 
 	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogLevel)
-	logger.Debugw("component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+	logger.Debugw(ctx, "component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
 
 	cc.persistInitialDefaultLogConfigs(ctx)
 
@@ -105,21 +104,21 @@
 
 	_, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		logger.Debugw("failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw(ctx, "failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.GlobalConfig.Save(ctx, defaultLogLevelKey, initialGlobalDefaultLogLevelValue)
 		if err != nil {
-			logger.Errorw("failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
+			logger.Errorw(ctx, "failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
 		}
 	}
 
 	_, err = c.componentNameConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		logger.Debugw("failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw(ctx, "failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.componentNameConfig.Save(ctx, defaultLogLevelKey, c.initialLogLevel)
 		if err != nil {
-			logger.Errorw("failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
+			logger.Errorw(ctx, "failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
 		}
 	}
 }
@@ -129,7 +128,7 @@
 func (c *ComponentLogController) persistRegisteredLogPackageList(ctx context.Context) {
 
 	componentMetadataConfig := c.configManager.InitComponentConfig(c.ComponentName, ConfigTypeMetadata)
-	logger.Debugw("component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
+	logger.Debugw(ctx, "component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
 
 	packageList := log.GetPackageNames()
 	packageList = append(packageList, defaultLogLevelKey)
@@ -137,12 +136,12 @@
 
 	packageNames, err := json.Marshal(packageList)
 	if err != nil {
-		logger.Errorw("failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
+		logger.Errorw(ctx, "failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
 		return
 	}
 
 	if err := componentMetadataConfig.Save(ctx, logPackagesListKey, string(packageNames)); err != nil {
-		logger.Errorw("failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
+		logger.Errorw(ctx, "failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
 	}
 }
 
@@ -155,10 +154,10 @@
 	// Load and apply Log Config for first time
 	initialLogConfig, err := c.buildUpdatedLogConfig(ctx)
 	if err != nil {
-		logger.Warnw("unable-to-load-log-config-at-startup", log.Fields{"error": err})
+		logger.Warnw(ctx, "unable-to-load-log-config-at-startup", log.Fields{"error": err})
 	} else {
-		if err := c.loadAndApplyLogConfig(initialLogConfig); err != nil {
-			logger.Warnw("unable-to-apply-log-config-at-startup", log.Fields{"error": err})
+		if err := c.loadAndApplyLogConfig(ctx, initialLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-apply-log-config-at-startup", log.Fields{"error": err})
 		}
 	}
 
@@ -174,25 +173,25 @@
 		case configEvent = <-componentConfigEventChan:
 
 		}
-		logger.Debugw("processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+		logger.Debugw(ctx, "processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
 
 		updatedLogConfig, err := c.buildUpdatedLogConfig(ctx)
 		if err != nil {
-			logger.Warnw("unable-to-fetch-updated-log-config", log.Fields{"error": err})
+			logger.Warnw(ctx, "unable-to-fetch-updated-log-config", log.Fields{"error": err})
 			continue
 		}
 
-		logger.Debugw("applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
+		logger.Debugw(ctx, "applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
 
-		if err := c.loadAndApplyLogConfig(updatedLogConfig); err != nil {
-			logger.Warnw("unable-to-load-and-apply-log-config", log.Fields{"error": err})
+		if err := c.loadAndApplyLogConfig(ctx, updatedLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-load-and-apply-log-config", log.Fields{"error": err})
 		}
 	}
 
 }
 
 // get active loglevel from the zap logger
-func getActiveLogLevels() map[string]string {
+func getActiveLogLevels(ctx context.Context) map[string]string {
 	loglevels := make(map[string]string)
 
 	// now do the default log level
@@ -204,7 +203,7 @@
 	for _, packageName := range log.GetPackageNames() {
 		level, err := log.GetPackageLogLevel(packageName)
 		if err != nil {
-			logger.Warnw("unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
+			logger.Warnw(ctx, "unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
 			continue
 		}
 
@@ -213,7 +212,7 @@
 		}
 	}
 
-	logger.Debugw("retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
+	logger.Debugw(ctx, "retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
 
 	return loglevels
 }
@@ -228,16 +227,16 @@
 	// Handle edge cases when global default loglevel is deleted directly from etcd or set to a invalid value
 	// We should use hard-coded initial default value in such cases
 	if globalDefaultLogLevel == "" {
-		logger.Warn("global-default-loglevel-not-found-in-config-store")
+		logger.Warn(ctx, "global-default-loglevel-not-found-in-config-store")
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
 	if _, err := log.StringToLogLevel(globalDefaultLogLevel); err != nil {
-		logger.Warnw("unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
+		logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
-	logger.Debugw("retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
+	logger.Debugw(ctx, "retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
 
 	return globalDefaultLogLevel, nil
 }
@@ -251,7 +250,7 @@
 	effectiveDefaultLogLevel := ""
 	for logConfigKey, logConfigValue := range componentLogConfig {
 		if _, err := log.StringToLogLevel(logConfigValue); err != nil || logConfigKey == "" {
-			logger.Warnw("unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
+			logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
 			delete(componentLogConfig, logConfigKey)
 		} else {
 			if logConfigKey == defaultLogLevelKey {
@@ -268,7 +267,7 @@
 
 	componentLogConfig[defaultLogLevelKey] = effectiveDefaultLogLevel
 
-	logger.Debugw("retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
+	logger.Debugw(ctx, "retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
 
 	return componentLogConfig, nil
 }
@@ -282,7 +281,7 @@
 func (c *ComponentLogController) buildUpdatedLogConfig(ctx context.Context) (map[string]string, error) {
 	globalLogLevel, err := c.getGlobalLogConfig(ctx)
 	if err != nil {
-		logger.Errorw("unable-to-retrieve-global-log-config", log.Fields{"err": err})
+		logger.Errorw(ctx, "unable-to-retrieve-global-log-config", log.Fields{"err": err})
 	}
 
 	componentLogConfig, err := c.getComponentLogConfig(ctx, globalLogLevel)
@@ -302,17 +301,17 @@
 // create hash of loaded configuration using GenerateLogConfigHash
 // if there is previous hash stored, compare the hash to stored hash
 // if there is any change will call UpdateLogLevels
-func (c *ComponentLogController) loadAndApplyLogConfig(logConfig map[string]string) error {
+func (c *ComponentLogController) loadAndApplyLogConfig(ctx context.Context, logConfig map[string]string) error {
 	currentLogHash, err := GenerateLogConfigHash(logConfig)
 	if err != nil {
 		return err
 	}
 
 	if c.logHash != currentLogHash {
-		UpdateLogLevels(logConfig)
+		updateLogLevels(ctx, logConfig)
 		c.logHash = currentLogHash
 	} else {
-		logger.Debug("effective-loglevel-config-same-as-currently-active")
+		logger.Debug(ctx, "effective-loglevel-config-same-as-currently-active")
 	}
 
 	return nil
@@ -322,7 +321,7 @@
 // to identify and create map of modified Log Levels of 2 types:
 // - Packages for which log level has been changed
 // - Packages for which log level config has been cleared - set to default log level
-func createModifiedLogLevels(activeLogLevels, updatedLogLevels map[string]string) map[string]string {
+func createModifiedLogLevels(ctx context.Context, activeLogLevels, updatedLogLevels map[string]string) map[string]string {
 	defaultLevel := updatedLogLevels[defaultLogLevelKey]
 
 	modifiedLogLevels := make(map[string]string)
@@ -339,7 +338,7 @@
 	// Log warnings for all invalid packages for which log config has been set
 	for key, value := range updatedLogLevels {
 		if _, exist := activeLogLevels[key]; !exist {
-			logger.Warnw("ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
+			logger.Warnw(ctx, "ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
 		}
 	}
 
@@ -349,18 +348,18 @@
 // updateLogLevels update the loglevels for the component
 // retrieve active confguration from logger
 // compare with entries one by one and apply
-func UpdateLogLevels(updatedLogConfig map[string]string) {
+func updateLogLevels(ctx context.Context, updatedLogConfig map[string]string) {
 
-	activeLogLevels := getActiveLogLevels()
-	changedLogLevels := createModifiedLogLevels(activeLogLevels, updatedLogConfig)
+	activeLogLevels := getActiveLogLevels(ctx)
+	changedLogLevels := createModifiedLogLevels(ctx, activeLogLevels, updatedLogConfig)
 
 	// If no changed log levels are found, just return. It may happen on configuration of a invalid package
 	if len(changedLogLevels) == 0 {
-		logger.Debug("no-change-in-effective-loglevel-config")
+		logger.Debug(ctx, "no-change-in-effective-loglevel-config")
 		return
 	}
 
-	logger.Debugw("applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
+	logger.Debugw(ctx, "applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
 	for key, level := range changedLogLevels {
 		if key == defaultLogLevelKey {
 			if l, err := log.StringToLogLevel(level); err == nil {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
index 1e23a0f..f595dc1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
@@ -47,7 +47,7 @@
 }
 
 // NewBackend creates a new instance of a Backend structure
-func NewBackend(storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
+func NewBackend(ctx context.Context, storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
 	var err error
 
 	b := &Backend{
@@ -59,8 +59,8 @@
 		alive:                   false, // connection considered down at start
 	}
 
-	if b.Client, err = b.newClient(address, timeout); err != nil {
-		logger.Errorw("failed-to-create-kv-client",
+	if b.Client, err = b.newClient(ctx, address, timeout); err != nil {
+		logger.Errorw(ctx, "failed-to-create-kv-client",
 			log.Fields{
 				"type": storeType, "address": address,
 				"timeout": timeout, "prefix": pathPrefix,
@@ -71,22 +71,22 @@
 	return b
 }
 
-func (b *Backend) newClient(address string, timeout time.Duration) (kvstore.Client, error) {
+func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
 	switch b.StoreType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func (b *Backend) makePath(key string) string {
+func (b *Backend) makePath(ctx context.Context, key string) string {
 	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
 	return path
 }
 
-func (b *Backend) updateLiveness(alive bool) {
+func (b *Backend) updateLiveness(ctx context.Context, alive bool) {
 	// Periodically push stream of liveness data to the channel,
 	// so that in a live state, the core does not timeout and
 	// send a forced liveness message. Push alive state if the
@@ -94,11 +94,11 @@
 	if b.liveness != nil {
 
 		if b.alive != alive {
-			logger.Debug("update-liveness-channel-reason-change")
+			logger.Debug(ctx, "update-liveness-channel-reason-change")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
-			logger.Debug("update-liveness-channel-reason-interval")
+			logger.Debug(ctx, "update-liveness-channel-reason-interval")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		}
@@ -106,7 +106,7 @@
 
 	// Emit log message only for alive state change
 	if b.alive != alive {
-		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+		logger.Debugw(ctx, "change-kvstore-alive-status", log.Fields{"alive": alive})
 		b.alive = alive
 	}
 }
@@ -115,9 +115,9 @@
 // post on Liveness channel
 func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
 	alive := b.Client.IsConnectionUp(ctx)
-	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+	logger.Debugw(ctx, "kvstore-liveness-check-result", log.Fields{"alive": alive})
 
-	b.updateLiveness(alive)
+	b.updateLiveness(ctx, alive)
 	return alive
 }
 
@@ -126,11 +126,11 @@
 // or not the connection is still Live. This channel is then picked up
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
-func (b *Backend) EnableLivenessChannel() chan bool {
-	logger.Debug("enable-kvstore-liveness-channel")
+func (b *Backend) EnableLivenessChannel(ctx context.Context) chan bool {
+	logger.Debug(ctx, "enable-kvstore-liveness-channel")
 
 	if b.liveness == nil {
-		logger.Debug("create-kvstore-liveness-channel")
+		logger.Debug(ctx, "create-kvstore-liveness-channel")
 
 		// Channel size of 10 to avoid any possibility of blocking in Load conditions
 		b.liveness = make(chan bool, 10)
@@ -144,7 +144,7 @@
 }
 
 // Extract Alive status of Kvstore based on type of error
-func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
+func (b *Backend) isErrorIndicatingAliveKvstore(ctx context.Context, err error) bool {
 	// Alive unless observed an error indicating so
 	alive := true
 
@@ -182,64 +182,64 @@
 
 // List retrieves one or more items that match the specified key
 func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
-	formattedPath := b.makePath(key)
-	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "listing-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.List(ctx, formattedPath)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return pair, err
 }
 
 // Get retrieves an item that matches the specified key
 func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	formattedPath := b.makePath(key)
-	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "getting-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.Get(ctx, formattedPath)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return pair, err
 }
 
 // Put stores an item value under the specifed key
 func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
-	formattedPath := b.makePath(key)
-	logger.Debugw("putting-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "putting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Put(ctx, formattedPath, value)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return err
 }
 
 // Delete removes an item under the specified key
 func (b *Backend) Delete(ctx context.Context, key string) error {
-	formattedPath := b.makePath(key)
-	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Delete(ctx, formattedPath)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return err
 }
 
 // CreateWatch starts watching events for the specified key
 func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
-	formattedPath := b.makePath(key)
-	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "creating-key-watch", log.Fields{"key": key, "path": formattedPath})
 
 	return b.Client.Watch(ctx, formattedPath, withPrefix)
 }
 
 // DeleteWatch stops watching events for the specified key
-func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
-	formattedPath := b.makePath(key)
-	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
 
-	b.Client.CloseWatch(formattedPath, ch)
+	b.Client.CloseWatch(ctx, formattedPath, ch)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
index 1cf2e1c..fe84b46 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
index 158e626..480d476 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
@@ -88,6 +88,6 @@
 	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
 	ReleaseLock(lockName string) error
 	IsConnectionUp(ctx context.Context) bool // timeout in second
-	CloseWatch(key string, ch chan *Event)
-	Close()
+	CloseWatch(ctx context.Context, key string, ch chan *Event)
+	Close(ctx context.Context)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
index aa7aeb0..0de395f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
index d2544dd..c2cd841 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
@@ -44,14 +44,13 @@
 }
 
 // NewConsulClient returns a new client for the Consul KV store
-func NewConsulClient(addr string, timeout time.Duration) (*ConsulClient, error) {
-
+func NewConsulClient(ctx context.Context, addr string, timeout time.Duration) (*ConsulClient, error) {
 	config := consulapi.DefaultConfig()
 	config.Address = addr
 	config.WaitTime = timeout
 	consul, err := consulapi.NewClient(config)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 
@@ -63,7 +62,7 @@
 
 // IsConnectionUp returns whether the connection to the Consul KV store is up
 func (c *ConsulClient) IsConnectionUp(ctx context.Context) bool {
-	logger.Error("Unimplemented function")
+	logger.Error(ctx, "Unimplemented function")
 	return false
 }
 
@@ -80,7 +79,7 @@
 	// For now we ignore meta data
 	kvps, _, err := kv.List(key, &queryOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -103,7 +102,7 @@
 	// For now we ignore meta data
 	kvp, _, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	if kvp != nil {
@@ -122,7 +121,7 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		logger.Error(er)
+		logger.Error(ctx, er)
 		return er
 	}
 
@@ -134,7 +133,7 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Put(&kvp, &writeOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return err
 	}
 	return nil
@@ -149,26 +148,26 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Delete(key, &writeOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return err
 	}
 	return nil
 }
 
-func (c *ConsulClient) deleteSession() {
+func (c *ConsulClient) deleteSession(ctx context.Context) {
 	if c.sessionID != "" {
-		logger.Debug("cleaning-up-session")
+		logger.Debug(ctx, "cleaning-up-session")
 		session := c.consul.Session()
 		_, err := session.Destroy(c.sessionID, nil)
 		if err != nil {
-			logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+			logger.Errorw(ctx, "error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
 		}
 	}
 	c.sessionID = ""
 	c.session = nil
 }
 
-func (c *ConsulClient) createSession(ttl time.Duration, retries int) (*consulapi.Session, string, error) {
+func (c *ConsulClient) createSession(ctx context.Context, ttl time.Duration, retries int) (*consulapi.Session, string, error) {
 	session := c.consul.Session()
 	entry := &consulapi.SessionEntry{
 		Behavior: consulapi.SessionBehaviorDelete,
@@ -178,17 +177,17 @@
 	for {
 		id, meta, err := session.Create(entry, nil)
 		if err != nil {
-			logger.Errorw("create-session-error", log.Fields{"error": err})
+			logger.Errorw(ctx, "create-session-error", log.Fields{"error": err})
 			if retries == 0 {
 				return nil, "", err
 			}
 		} else if meta.RequestTime == 0 {
-			logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
+			logger.Errorw(ctx, "create-session-bad-meta-data", log.Fields{"meta-data": meta})
 			if retries == 0 {
 				return nil, "", errors.New("bad-meta-data")
 			}
 		} else if id == "" {
-			logger.Error("create-session-nil-id")
+			logger.Error(ctx, "create-session-nil-id")
 			if retries == 0 {
 				return nil, "", errors.New("ID-nil")
 			}
@@ -199,7 +198,7 @@
 		if retries > 0 {
 			retries--
 		}
-		logger.Debug("retrying-session-create-after-a-second-delay")
+		logger.Debug(ctx, "retrying-session-create-after-a-second-delay")
 		time.Sleep(time.Duration(1) * time.Second)
 	}
 }
@@ -226,30 +225,30 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		logger.Error(er)
+		logger.Error(ctx, er)
 		return nil, er
 	}
 
 	// Cleanup any existing session and recreate new ones.  A key is reserved against a session
 	if c.sessionID != "" {
-		c.deleteSession()
+		c.deleteSession(ctx)
 	}
 
 	// Clear session if reservation is not successful
 	reservationSuccessful := false
 	defer func() {
 		if !reservationSuccessful {
-			logger.Debug("deleting-session")
-			c.deleteSession()
+			logger.Debug(ctx, "deleting-session")
+			c.deleteSession(ctx)
 		}
 	}()
 
-	session, sessionID, err := c.createSession(ttl, -1)
+	session, sessionID, err := c.createSession(ctx, ttl, -1)
 	if err != nil {
-		logger.Errorw("no-session-created", log.Fields{"error": err})
+		logger.Errorw(ctx, "no-session-created", log.Fields{"error": err})
 		return "", errors.New("no-session-created")
 	}
-	logger.Debugw("session-created", log.Fields{"session-id": sessionID})
+	logger.Debugw(ctx, "session-created", log.Fields{"session-id": sessionID})
 	c.sessionID = sessionID
 	c.session = session
 
@@ -258,11 +257,11 @@
 	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
 	result, _, err := kv.Acquire(&kvp, nil)
 	if err != nil {
-		logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-acquiring-keys", log.Fields{"error": err})
 		return nil, err
 	}
 
-	logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
+	logger.Debugw(ctx, "key-acquired", log.Fields{"key": key, "status": result})
 
 	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
 	m, err := c.Get(ctx, key)
@@ -270,7 +269,7 @@
 		return nil, err
 	}
 	if m != nil {
-		logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+		logger.Debugw(ctx, "response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
 		if m.Key == key && isEqual(m.Value, value) {
 			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
 			// per session.
@@ -300,11 +299,11 @@
 		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
 		result, _, err = kv.Release(&kvp, nil)
 		if err != nil {
-			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		if !result {
-			logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key})
 		}
 		delete(c.keyReservations, key)
 	}
@@ -384,21 +383,21 @@
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *ConsulClient) CloseWatch(key string, ch chan *Event) {
+func (c *ConsulClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
 	// First close the context
 	var ok bool
 	var watchedChannelsContexts []*channelContextMap
 	c.writeLock.Lock()
 	defer c.writeLock.Unlock()
 	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
-		logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
+		logger.Errorw(ctx, "key-has-no-watched-context-or-channel", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chCtxMap := range watchedChannelsContexts {
 		if chCtxMap.channel == ch {
-			logger.Debug("channel-found")
+			logger.Debug(ctx, "channel-found")
 			chCtxMap.cancel()
 			//close the channel
 			close(ch)
@@ -410,7 +409,7 @@
 	if pos >= 0 {
 		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
 	}
-	logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+	logger.Debugw(ctx, "watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
 }
 
 func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
@@ -430,10 +429,10 @@
 	return true
 }
 
-func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
-	logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
+func (c *ConsulClient) listenForKeyChange(ctx context.Context, key string, ch chan *Event) {
+	logger.Debugw(ctx, "start-watching-channel", log.Fields{"key": key, "channel": ch})
 
-	defer c.CloseWatch(key, ch)
+	defer c.CloseWatch(ctx, key, ch)
 	kv := c.consul.KV()
 	var queryOptions consulapi.QueryOptions
 	queryOptions.WaitTime = defaultKVGetTimeout
@@ -441,7 +440,7 @@
 	// Get the existing value, if any
 	previousKVPair, meta, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		logger.Debug(err)
+		logger.Debug(ctx, err)
 	}
 	lastIndex := meta.LastIndex
 
@@ -449,37 +448,37 @@
 	//var waitOptions consulapi.QueryOptions
 	var pair *consulapi.KVPair
 	//watchContext, _ := context.WithCancel(context.Background())
-	waitOptions := queryOptions.WithContext(watchContext)
+	waitOptions := queryOptions.WithContext(ctx)
 	for {
 		//waitOptions = consulapi.QueryOptions{WaitIndex: lastIndex}
 		waitOptions.WaitIndex = lastIndex
 		pair, meta, err = kv.Get(key, waitOptions)
 		select {
-		case <-watchContext.Done():
-			logger.Debug("done-event-received-exiting")
+		case <-ctx.Done():
+			logger.Debug(ctx, "done-event-received-exiting")
 			return
 		default:
 			if err != nil {
-				logger.Warnw("error-from-watch", log.Fields{"error": err})
+				logger.Warnw(ctx, "error-from-watch", log.Fields{"error": err})
 				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
 			} else {
-				logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+				logger.Debugw(ctx, "index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
 			}
 		}
 		if err != nil {
-			logger.Debug(err)
+			logger.Debug(ctx, err)
 			// On error, block for 10 milliseconds to prevent endless loop
 			time.Sleep(10 * time.Millisecond)
 		} else if meta.LastIndex <= lastIndex {
-			logger.Info("no-index-change-or-negative")
+			logger.Info(ctx, "no-index-change-or-negative")
 		} else {
-			logger.Debugw("update-received", log.Fields{"pair": pair})
+			logger.Debugw(ctx, "update-received", log.Fields{"pair": pair})
 			if pair == nil {
 				ch <- NewEvent(DELETE, key, []byte(""), -1)
 			} else if !c.isKVEqual(pair, previousKVPair) {
 				// Push the change onto the channel if the data has changed
 				// For now just assume it's a PUT change
-				logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+				logger.Debugw(ctx, "pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
 				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
 			}
 			previousKVPair = pair
@@ -489,7 +488,7 @@
 }
 
 // Close closes the KV store client
-func (c *ConsulClient) Close() {
+func (c *ConsulClient) Close(ctx context.Context) {
 	var writeOptions consulapi.WriteOptions
 	// Inform any goroutine it's time to say goodbye.
 	c.writeLock.Lock()
@@ -500,7 +499,7 @@
 
 	// Clear the sessionID
 	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
-		logger.Errorw("error-closing-client", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
index 8d4a462..0165e18 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
@@ -40,7 +40,7 @@
 }
 
 // NewEtcdClient returns a new client for the Etcd KV store
-func NewEtcdClient(addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
 	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
 
 	c, err := v3Client.New(v3Client.Config{
@@ -49,7 +49,7 @@
 		LogConfig:   &logconfig,
 	})
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 
@@ -77,7 +77,7 @@
 func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
 	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -94,7 +94,7 @@
 	resp, err := c.ectdAPI.Get(ctx, key)
 
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	for _, ev := range resp.Kvs {
@@ -131,13 +131,13 @@
 	if err != nil {
 		switch err {
 		case context.Canceled:
-			logger.Warnw("context-cancelled", log.Fields{"error": err})
+			logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 		case context.DeadlineExceeded:
-			logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
+			logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
 		case v3rpcTypes.ErrEmptyKey:
-			logger.Warnw("etcd-client-error", log.Fields{"error": err})
+			logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
 		default:
-			logger.Warnw("bad-endpoints", log.Fields{"error": err})
+			logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
 		}
 		return err
 	}
@@ -150,10 +150,10 @@
 
 	// delete the key
 	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
-		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
+		logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
 		return err
 	}
-	logger.Debugw("key(s)-deleted", log.Fields{"key": key})
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
 	return nil
 }
 
@@ -172,7 +172,7 @@
 
 	resp, err := c.ectdAPI.Grant(ctx, int64(ttl.Seconds()))
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	// Register the lease id
@@ -185,7 +185,7 @@
 	defer func() {
 		if !reservationSuccessful {
 			if err = c.ReleaseReservation(context.Background(), key); err != nil {
-				logger.Error("cannot-release-lease")
+				logger.Error(ctx, "cannot-release-lease")
 			}
 		}
 	}()
@@ -240,7 +240,7 @@
 	for key, leaseID := range c.keyReservations {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -251,7 +251,7 @@
 // ReleaseReservation releases reservation for a specific key.
 func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
 	// Get the leaseid using the key
-	logger.Debugw("Release-reservation", log.Fields{"key": key})
+	logger.Debugw(ctx, "Release-reservation", log.Fields{"key": key})
 	var ok bool
 	var leaseID *v3Client.LeaseID
 	c.keyReservationsLock.Lock()
@@ -263,7 +263,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			logger.Error(err)
+			logger.Error(ctx, err)
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -288,7 +288,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
 		if err != nil {
-			logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
+			logger.Errorw(ctx, "lease-may-have-expired", log.Fields{"error": err})
 			return err
 		}
 	} else {
@@ -320,9 +320,9 @@
 
 	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
 	// json format.
-	logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
 	// Launch a go routine to listen for updates
-	go c.listenForKeyChange(channel, ch, cancel)
+	go c.listenForKeyChange(ctx, channel, ch, cancel)
 
 	return ch
 
@@ -369,23 +369,23 @@
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *EtcdClient) CloseWatch(key string, ch chan *Event) {
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
 	// Get the array of channels mapping
 	var watchedChannels []map[chan *Event]v3Client.Watcher
 	var ok bool
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
-		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chMap := range watchedChannels {
 		if t, ok := chMap[ch]; ok {
-			logger.Debug("channel-found")
+			logger.Debug(ctx, "channel-found")
 			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
 			if err := t.Close(); err != nil {
-				logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
 			}
 			pos = i
 			break
@@ -397,11 +397,11 @@
 	if pos >= 0 {
 		channelMaps = c.removeChannelMap(key, pos)
 	}
-	logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
 }
 
-func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
-	logger.Debug("start-listening-on-channel ...")
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
 	defer cancel()
 	defer close(ch)
 	for resp := range channel {
@@ -409,7 +409,7 @@
 			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
 		}
 	}
-	logger.Debug("stop-listening-on-channel ...")
+	logger.Debug(ctx, "stop-listening-on-channel ...")
 }
 
 func getEventType(event *v3Client.Event) int {
@@ -423,9 +423,9 @@
 }
 
 // Close closes the KV store client
-func (c *EtcdClient) Close() {
+func (c *EtcdClient) Close(ctx context.Context) {
 	if err := c.ectdAPI.Close(); err != nil {
-		logger.Errorw("error-closing-client", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
index 557de3f..0328d72 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "flowsUtils"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "flowsUtils"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
index 3139144..66e719c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
@@ -17,6 +17,7 @@
 
 import (
 	"bytes"
+	"context"
 	"crypto/md5"
 	"encoding/binary"
 	"fmt"
@@ -503,7 +504,7 @@
 }
 
 //GetMetaData - legacy get method (only want lower 32 bits)
-func GetMetaData(flow *ofp.OfpFlowStats) uint32 {
+func GetMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
 	if flow == nil {
 		return 0
 	}
@@ -512,11 +513,11 @@
 			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
 		}
 	}
-	logger.Debug("No-metadata-present")
+	logger.Debug(ctx, "No-metadata-present")
 	return 0
 }
 
-func GetMetaData64Bit(flow *ofp.OfpFlowStats) uint64 {
+func GetMetaData64Bit(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
 	if flow == nil {
 		return 0
 	}
@@ -525,12 +526,12 @@
 			return field.GetTableMetadata()
 		}
 	}
-	logger.Debug("No-metadata-present")
+	logger.Debug(ctx, "No-metadata-present")
 	return 0
 }
 
 // function returns write metadata value from write_metadata action field
-func GetMetadataFromWriteMetadataAction(flow *ofp.OfpFlowStats) uint64 {
+func GetMetadataFromWriteMetadataAction(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
 	if flow != nil {
 		for _, instruction := range flow.Instructions {
 			if instruction.Type == uint32(WRITE_METADATA) {
@@ -540,11 +541,11 @@
 			}
 		}
 	}
-	logger.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
+	logger.Debugw(ctx, "No-write-metadata-present", log.Fields{"flow": flow})
 	return 0
 }
 
-func GetTechProfileIDFromWriteMetaData(metadata uint64) uint16 {
+func GetTechProfileIDFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
 	/*
 	   Write metadata instruction value (metadata) is 8 bytes:
 	   MS 2 bytes: C Tag
@@ -554,15 +555,15 @@
 	   This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var tpId uint16 = 0
-	logger.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	logger.Debugw(ctx, "Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
 	if metadata != 0 {
 		tpId = uint16((metadata >> 32) & 0xFFFF)
-		logger.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+		logger.Debugw(ctx, "Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
 	}
 	return tpId
 }
 
-func GetEgressPortNumberFromWriteMetadata(flow *ofp.OfpFlowStats) uint32 {
+func GetEgressPortNumberFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
 	/*
 			  Write metadata instruction value (metadata) is 8 bytes:
 		    	MS 2 bytes: C Tag
@@ -571,17 +572,17 @@
 		    	This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var uniPort uint32 = 0
-	md := GetMetadataFromWriteMetadataAction(flow)
-	logger.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "Metadata found for egress/uni port ", log.Fields{"metadata": md})
 	if md != 0 {
 		uniPort = uint32(md & 0xFFFFFFFF)
-		logger.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+		logger.Debugw(ctx, "Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
 	}
 	return uniPort
 
 }
 
-func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint16 {
+func GetInnerTagFromMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint16 {
 	/*
 			  Write metadata instruction value (metadata) is 8 bytes:
 		    	MS 2 bytes: C Tag
@@ -590,10 +591,10 @@
 		    	This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var innerTag uint16 = 0
-	md := GetMetadataFromWriteMetadataAction(flow)
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
 	if md != 0 {
 		innerTag = uint16((md >> 48) & 0xFFFF)
-		logger.Debugw("Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
 	}
 	return innerTag
 }
@@ -607,7 +608,7 @@
 		return 0
 	}
 	if md <= 0xffffffff {
-		logger.Debugw("onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		logger.Debugw(ctx, "onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
 		return md
 	}
 	return (md >> 32) & 0xffffffff
@@ -937,12 +938,12 @@
 }
 
 // flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
-func MeterEntryFromMeterMod(meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+func MeterEntryFromMeterMod(ctx context.Context, meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
 	bandStats := make([]*ofp.OfpMeterBandStats, 0)
 	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
 		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
 	if meterMod == nil {
-		logger.Error("Invalid meter mod command")
+		logger.Error(ctx, "Invalid meter mod command")
 		return meter
 	}
 	// config init
@@ -964,7 +965,7 @@
 		bandStats = append(bandStats, band)
 	}
 	meter.Stats.BandStats = bandStats
-	logger.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
+	logger.Debugw(ctx, "Allocated meter entry", log.Fields{"meter": *meter})
 	return meter
 
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
index 0d9e3a5..d977e38 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
@@ -16,9 +16,9 @@
 package kafka
 
 import (
-	"time"
-
+	"context"
 	ca "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"time"
 )
 
 const (
@@ -61,15 +61,15 @@
 
 // MsgClient represents the set of APIs  a Kafka MsgClient must implement
 type Client interface {
-	Start() error
-	Stop()
-	CreateTopic(topic *Topic, numPartition int, repFactor int) error
-	DeleteTopic(topic *Topic) error
-	Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
-	UnSubscribe(topic *Topic, ch <-chan *ca.InterContainerMessage) error
-	SubscribeForMetadata(func(fromTopic string, timestamp time.Time))
-	Send(msg interface{}, topic *Topic, keys ...string) error
-	SendLiveness() error
-	EnableLivenessChannel(enable bool) chan bool
-	EnableHealthinessChannel(enable bool) chan bool
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error
+	DeleteTopic(ctx context.Context, topic *Topic) error
+	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
+	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time))
+	Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error
+	SendLiveness(ctx context.Context) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	EnableHealthinessChannel(ctx context.Context, enable bool) chan bool
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
index 149c150..99b4cdf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
index 1258382..a876c09 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
@@ -50,15 +50,15 @@
 
 	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
 	// now this will return the topic name
-	GetEndpoint(deviceID string, serviceType string) (Endpoint, error)
+	GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error)
 
 	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
 	// devices owned by that service need to be reconciled
-	IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error)
+	IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error)
 
 	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
 	// test only
-	GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error)
+	GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error)
 }
 
 type service struct {
@@ -119,9 +119,9 @@
 	return newEndpointManager(backend, opts...)
 }
 
-func (ep *endpointManager) GetEndpoint(deviceID string, serviceType string) (Endpoint, error) {
-	logger.Debugw("getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
-	owner, err := ep.getOwner(deviceID, serviceType)
+func (ep *endpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error) {
+	logger.Debugw(ctx, "getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
 	if err != nil {
 		return "", err
 	}
@@ -133,13 +133,13 @@
 	if endpoint == "" {
 		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
 	}
-	logger.Debugw("returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
+	logger.Debugw(ctx, "returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
 	return endpoint, nil
 }
 
-func (ep *endpointManager) IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error) {
-	logger.Debugw("device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
-	owner, err := ep.getOwner(deviceID, serviceType)
+func (ep *endpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	logger.Debugw(ctx, "device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
 	if err != nil {
 		return false, nil
 	}
@@ -150,8 +150,8 @@
 	return m.getReplica() == ReplicaID(replicaNumber), nil
 }
 
-func (ep *endpointManager) GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error) {
-	owner, err := ep.getOwner(deviceID, serviceType)
+func (ep *endpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error) {
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
 	if err != nil {
 		return 0, nil
 	}
@@ -162,8 +162,8 @@
 	return m.getReplica(), nil
 }
 
-func (ep *endpointManager) getOwner(deviceID string, serviceType string) (consistent.Member, error) {
-	serv, dType, err := ep.getServiceAndDeviceType(serviceType)
+func (ep *endpointManager) getOwner(ctx context.Context, deviceID string, serviceType string) (consistent.Member, error) {
+	serv, dType, err := ep.getServiceAndDeviceType(ctx, serviceType)
 	if err != nil {
 		return nil, err
 	}
@@ -171,7 +171,7 @@
 	return serv.consistentRing.LocateKey(key), nil
 }
 
-func (ep *endpointManager) getServiceAndDeviceType(serviceType string) (*service, string, error) {
+func (ep *endpointManager) getServiceAndDeviceType(ctx context.Context, serviceType string) (*service, string, error) {
 	// Check whether service exist
 	ep.servicesLock.RLock()
 	serv, serviceExist := ep.services[serviceType]
@@ -179,7 +179,7 @@
 
 	// Load the service and device types if needed
 	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
-		if err := ep.loadServices(); err != nil {
+		if err := ep.loadServices(ctx); err != nil {
 			return nil, "", err
 		}
 
@@ -214,7 +214,7 @@
 // loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
 // the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
 // instead of watching for updates in the dB and acting on it.
-func (ep *endpointManager) loadServices() error {
+func (ep *endpointManager) loadServices(ctx context.Context) error {
 	ep.servicesLock.Lock()
 	defer ep.servicesLock.Unlock()
 	ep.deviceTypeServiceMapLock.Lock()
@@ -276,13 +276,13 @@
 	if logger.V(log.DebugLevel) {
 		for key, val := range ep.services {
 			members := val.consistentRing.GetMembers()
-			logger.Debugw("service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
+			logger.Debugw(ctx, "service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
 			for _, m := range members {
 				n := m.(Member)
-				logger.Debugw("service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
+				logger.Debugw(ctx, "service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
 			}
 		}
-		logger.Debugw("device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
+		logger.Debugw(ctx, "device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
 	}
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
index cbde834..368391e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
@@ -64,17 +64,17 @@
 }
 
 type InterContainerProxy interface {
-	Start() error
-	Stop()
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
 	GetDefaultTopic() *Topic
 	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
 	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
-	SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error
-	SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error
-	UnSubscribeFromRequestHandler(topic Topic) error
-	DeleteTopic(topic Topic) error
-	EnableLivenessChannel(enable bool) chan bool
-	SendLiveness() error
+	SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error
+	SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error
+	UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error
+	DeleteTopic(ctx context.Context, topic Topic) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	SendLiveness(ctx context.Context) error
 }
 
 // interContainerProxy represents the messaging proxy
@@ -146,17 +146,17 @@
 	return newInterContainerProxy(opts...)
 }
 
-func (kp *interContainerProxy) Start() error {
-	logger.Info("Starting-Proxy")
+func (kp *interContainerProxy) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-Proxy")
 
 	// Kafka MsgClient should already have been created.  If not, output fatal error
 	if kp.kafkaClient == nil {
-		logger.Fatal("kafka-client-not-set")
+		logger.Fatal(ctx, "kafka-client-not-set")
 	}
 
 	// Start the kafka client
-	if err := kp.kafkaClient.Start(); err != nil {
-		logger.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
+	if err := kp.kafkaClient.Start(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-proxy", log.Fields{"error": err})
 		return err
 	}
 
@@ -172,20 +172,20 @@
 	return nil
 }
 
-func (kp *interContainerProxy) Stop() {
-	logger.Info("stopping-intercontainer-proxy")
+func (kp *interContainerProxy) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-intercontainer-proxy")
 	kp.doneOnce.Do(func() { close(kp.doneCh) })
 	// TODO : Perform cleanup
-	kp.kafkaClient.Stop()
-	err := kp.deleteAllTopicRequestHandlerChannelMap()
+	kp.kafkaClient.Stop(ctx)
+	err := kp.deleteAllTopicRequestHandlerChannelMap(ctx)
 	if err != nil {
-		logger.Errorw("failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+		logger.Errorw(ctx, "failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
 	}
-	err = kp.deleteAllTopicResponseChannelMap()
+	err = kp.deleteAllTopicResponseChannelMap(ctx)
 	if err != nil {
-		logger.Errorw("failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+		logger.Errorw(ctx, "failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
 	}
-	kp.deleteAllTransactionIdToChannelMap()
+	kp.deleteAllTransactionIdToChannelMap(ctx)
 }
 
 func (kp *interContainerProxy) GetDefaultTopic() *Topic {
@@ -196,7 +196,7 @@
 func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
 	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
 
-	logger.Debugw("InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
+	logger.Debugw(ctx, "InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
 	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
 	// typically the device ID.
 	responseTopic := replyToTopic
@@ -216,17 +216,17 @@
 		var protoRequest *ic.InterContainerMessage
 
 		// Encode the request
-		protoRequest, err = encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
+		protoRequest, err = encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
 		if err != nil {
-			logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+			logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
 			chnl <- NewResponse(RpcFormattingError, err, nil)
 			return
 		}
 
 		// Subscribe for response, if needed, before sending request
 		var ch <-chan *ic.InterContainerMessage
-		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
 			chnl <- NewResponse(RpcTransportError, err, nil)
 			return
 		}
@@ -234,10 +234,10 @@
 		// Send request - if the topic is formatted with a device Id then we will send the request using a
 		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
 		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
-		logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+		logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
 
 		// if the message is not sent on kafka publish an event an close the channel
-		if err = kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
+		if err = kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
 			chnl <- NewResponse(RpcTransportError, err, nil)
 			return
 		}
@@ -250,8 +250,8 @@
 
 		defer func() {
 			// Remove the subscription for a response on return
-			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
-				logger.Warnw("invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Warnw(ctx, "invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
 			}
 		}()
 
@@ -259,11 +259,11 @@
 		select {
 		case msg, ok := <-ch:
 			if !ok {
-				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
 				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
 			}
-			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
-			if responseBody, err := decodeResponse(msg); err != nil {
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			if responseBody, err := decodeResponse(ctx, msg); err != nil {
 				chnl <- NewResponse(RpcReply, err, nil)
 			} else {
 				if responseBody.Success {
@@ -279,12 +279,12 @@
 				}
 			}
 		case <-ctx.Done():
-			logger.Errorw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			logger.Errorw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
 			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
 			chnl <- NewResponse(RpcTimeout, err, nil)
 		case <-kp.doneCh:
 			chnl <- NewResponse(RpcSystemClosing, nil, nil)
-			logger.Warnw("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			logger.Warnw(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
 		}
 	}()
 	return chnl
@@ -302,9 +302,9 @@
 	}
 
 	// Encode the request
-	protoRequest, err := encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
+	protoRequest, err := encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
 	if err != nil {
-		logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
 		return false, nil
 	}
 
@@ -312,8 +312,8 @@
 	var ch <-chan *ic.InterContainerMessage
 	if waitForResponse {
 		var err error
-		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
 		}
 	}
 
@@ -321,10 +321,10 @@
 	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
 	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
 	//key := GetDeviceIdFromTopic(*toTopic)
-	logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
 	go func() {
-		if err := kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
-			logger.Errorw("send-failed", log.Fields{
+		if err := kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			logger.Errorw(ctx, "send-failed", log.Fields{
 				"topic": toTopic,
 				"key":   key,
 				"error": err})
@@ -345,8 +345,8 @@
 		// Wait for response as well as timeout or cancellation
 		// Remove the subscription for a response on return
 		defer func() {
-			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
-				logger.Errorw("response-unsubscribe-failed", log.Fields{
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Errorw(ctx, "response-unsubscribe-failed", log.Fields{
 					"id":    protoRequest.Header.Id,
 					"error": err})
 			}
@@ -354,7 +354,7 @@
 		select {
 		case msg, ok := <-ch:
 			if !ok {
-				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
 				protoError := &ic.Error{Reason: "channel-closed"}
 				var marshalledArg *any.Any
 				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
@@ -362,16 +362,16 @@
 				}
 				return false, marshalledArg
 			}
-			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
 			var responseBody *ic.InterContainerResponseBody
 			var err error
-			if responseBody, err = decodeResponse(msg); err != nil {
-				logger.Errorw("decode-response-error", log.Fields{"error": err})
+			if responseBody, err = decodeResponse(ctx, msg); err != nil {
+				logger.Errorw(ctx, "decode-response-error", log.Fields{"error": err})
 				// FIXME we should return something
 			}
 			return responseBody.Success, responseBody.Result
 		case <-ctx.Done():
-			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
 			//	 pack the error as proto any type
 			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
 
@@ -381,7 +381,7 @@
 			}
 			return false, marshalledArg
 		case <-childCtx.Done():
-			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
 			//	 pack the error as proto any type
 			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
 
@@ -391,7 +391,7 @@
 			}
 			return false, marshalledArg
 		case <-kp.doneCh:
-			logger.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			logger.Infow(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
 			return true, nil
 		}
 	}
@@ -400,55 +400,55 @@
 
 // SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
 // when a message is received on a given topic
-func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error {
+func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error {
 
 	// Subscribe to receive messages for that topic
 	var ch <-chan *ic.InterContainerMessage
 	var err error
-	if ch, err = kp.kafkaClient.Subscribe(&topic); err != nil {
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic); err != nil {
 		//if ch, err = kp.Subscribe(topic); err != nil {
-		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
 		return err
 	}
 
 	kp.defaultRequestHandlerInterface = handler
 	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
 	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ch, topic, handler)
+	go kp.waitForMessages(ctx, ch, topic, handler)
 
 	return nil
 }
 
 // SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
 // when a message is received on a given topic.  So far there is only 1 target registered per microservice
-func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error {
+func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error {
 	// Subscribe to receive messages for that topic
 	var ch <-chan *ic.InterContainerMessage
 	var err error
-	if ch, err = kp.kafkaClient.Subscribe(&topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
-		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
 		return err
 	}
 	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
 
 	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ch, topic, kp.defaultRequestHandlerInterface)
+	go kp.waitForMessages(ctx, ch, topic, kp.defaultRequestHandlerInterface)
 
 	return nil
 }
 
-func (kp *interContainerProxy) UnSubscribeFromRequestHandler(topic Topic) error {
-	return kp.deleteFromTopicRequestHandlerChannelMap(topic.Name)
+func (kp *interContainerProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error {
+	return kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name)
 }
 
-func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(topic string) error {
+func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(ctx context.Context, topic string) error {
 	kp.lockTopicResponseChannelMap.Lock()
 	defer kp.lockTopicResponseChannelMap.Unlock()
 	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
 		// Unsubscribe to this topic first - this will close the subscribed channel
 		var err error
-		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic})
+		if err = kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic})
 		}
 		delete(kp.topicToResponseChannelMap, topic)
 		return err
@@ -458,16 +458,16 @@
 }
 
 // nolint: unused
-func (kp *interContainerProxy) deleteAllTopicResponseChannelMap() error {
-	logger.Debug("delete-all-topic-response-channel")
+func (kp *interContainerProxy) deleteAllTopicResponseChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-response-channel")
 	kp.lockTopicResponseChannelMap.Lock()
 	defer kp.lockTopicResponseChannelMap.Unlock()
 	var unsubscribeFailTopics []string
 	for topic := range kp.topicToResponseChannelMap {
 		// Unsubscribe to this topic first - this will close the subscribed channel
-		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
 			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
 			// Do not return. Continue to try to unsubscribe to other topics.
 		} else {
 			// Only delete from channel map if successfully unsubscribed.
@@ -488,12 +488,12 @@
 	}
 }
 
-func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(topic string) error {
+func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(ctx context.Context, topic string) error {
 	kp.lockTopicRequestHandlerChannelMap.Lock()
 	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
 	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
 		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
 			return err
 		}
 		delete(kp.topicToRequestHandlerChannelMap, topic)
@@ -504,16 +504,16 @@
 }
 
 // nolint: unused
-func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap() error {
-	logger.Debug("delete-all-topic-request-channel")
+func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-request-channel")
 	kp.lockTopicRequestHandlerChannelMap.Lock()
 	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
 	var unsubscribeFailTopics []string
 	for topic := range kp.topicToRequestHandlerChannelMap {
 		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
 			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
 			// Do not return. Continue to try to unsubscribe to other topics.
 		} else {
 			// Only delete from channel map if successfully unsubscribed.
@@ -556,8 +556,8 @@
 }
 
 // nolint: unused
-func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap() {
-	logger.Debug("delete-all-transaction-id-channel-map")
+func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap(ctx context.Context) {
+	logger.Debug(ctx, "delete-all-transaction-id-channel-map")
 	kp.lockTransactionIdToChannelMap.Lock()
 	defer kp.lockTransactionIdToChannelMap.Unlock()
 	for key, value := range kp.transactionIdToChannelMap {
@@ -566,27 +566,27 @@
 	}
 }
 
-func (kp *interContainerProxy) DeleteTopic(topic Topic) error {
+func (kp *interContainerProxy) DeleteTopic(ctx context.Context, topic Topic) error {
 	// If we have any consumers on that topic we need to close them
-	if err := kp.deleteFromTopicResponseChannelMap(topic.Name); err != nil {
-		logger.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+	if err := kp.deleteFromTopicResponseChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
 	}
-	if err := kp.deleteFromTopicRequestHandlerChannelMap(topic.Name); err != nil {
-		logger.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+	if err := kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
 	}
 	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
 
-	return kp.kafkaClient.DeleteTopic(&topic)
+	return kp.kafkaClient.DeleteTopic(ctx, &topic)
 }
 
-func encodeReturnedValue(returnedVal interface{}) (*any.Any, error) {
+func encodeReturnedValue(ctx context.Context, returnedVal interface{}) (*any.Any, error) {
 	// Encode the response argument - needs to be a proto message
 	if returnedVal == nil {
 		return nil, nil
 	}
 	protoValue, ok := returnedVal.(proto.Message)
 	if !ok {
-		logger.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		logger.Warnw(ctx, "response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
 		err := errors.New("response-value-not-proto-message")
 		return nil, err
 	}
@@ -595,13 +595,13 @@
 	var marshalledReturnedVal *any.Any
 	var err error
 	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
-		logger.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-marshal-returned-val", log.Fields{"error": err})
 		return nil, err
 	}
 	return marshalledReturnedVal, nil
 }
 
-func encodeDefaultFailedResponse(request *ic.InterContainerMessage) *ic.InterContainerMessage {
+func encodeDefaultFailedResponse(ctx context.Context, request *ic.InterContainerMessage) *ic.InterContainerMessage {
 	responseHeader := &ic.Header{
 		Id:        request.Header.Id,
 		Type:      ic.MessageType_RESPONSE,
@@ -617,7 +617,7 @@
 	var err error
 	// Error should never happen here
 	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-marshal-failed-response-body", log.Fields{"error": err})
 	}
 
 	return &ic.InterContainerMessage{
@@ -629,8 +629,8 @@
 
 //formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
 //or an error on failure
-func encodeResponse(request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
-	//logger.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+func encodeResponse(ctx context.Context, request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
+	//logger.Debugw(ctx, "encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
 	responseHeader := &ic.Header{
 		Id:        request.Header.Id,
 		Type:      ic.MessageType_RESPONSE,
@@ -646,8 +646,8 @@
 
 	// for now we support only 1 returned value - (excluding the error)
 	if len(returnedValues) > 0 {
-		if marshalledReturnedVal, err = encodeReturnedValue(returnedValues[0]); err != nil {
-			logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+		if marshalledReturnedVal, err = encodeReturnedValue(ctx, returnedValues[0]); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
 		}
 	}
 
@@ -659,7 +659,7 @@
 	// Marshal the response body
 	var marshalledResponseBody *any.Any
 	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
 		return nil, err
 	}
 
@@ -669,7 +669,7 @@
 	}, nil
 }
 
-func CallFuncByName(myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
+func CallFuncByName(ctx context.Context, myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
 	myClassValue := reflect.ValueOf(myClass)
 	// Capitalize the first letter in the funcName to workaround the first capital letters required to
 	// invoke a function from a different package
@@ -678,15 +678,16 @@
 	if !m.IsValid() {
 		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
 	}
-	in := make([]reflect.Value, len(params))
+	in := make([]reflect.Value, len(params)+1)
+	in[0] = reflect.ValueOf(ctx)
 	for i, param := range params {
-		in[i] = reflect.ValueOf(param)
+		in[i+1] = reflect.ValueOf(param)
 	}
 	out = m.Call(in)
 	return
 }
 
-func (kp *interContainerProxy) addTransactionId(transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
+func (kp *interContainerProxy) addTransactionId(ctx context.Context, transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
 	arg := &KVArg{
 		Key:   TransactionKey,
 		Value: &ic.StrType{Val: transactionId},
@@ -695,7 +696,7 @@
 	var marshalledArg *any.Any
 	var err error
 	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
-		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
 		return currentArgs
 	}
 	protoArg := &ic.Argument{
@@ -705,11 +706,11 @@
 	return append(currentArgs, protoArg)
 }
 
-func (kp *interContainerProxy) addFromTopic(fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
+func (kp *interContainerProxy) addFromTopic(ctx context.Context, fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
 	var marshalledArg *any.Any
 	var err error
 	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
-		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
 		return currentArgs
 	}
 	protoArg := &ic.Argument{
@@ -719,7 +720,7 @@
 	return append(currentArgs, protoArg)
 }
 
-func (kp *interContainerProxy) handleMessage(msg *ic.InterContainerMessage, targetInterface interface{}) {
+func (kp *interContainerProxy) handleMessage(ctx context.Context, msg *ic.InterContainerMessage, targetInterface interface{}) {
 
 	// First extract the header to know whether this is a request - responses are handled by a different handler
 	if msg.Header.Type == ic.MessageType_REQUEST {
@@ -729,21 +730,21 @@
 		// Get the request body
 		requestBody := &ic.InterContainerRequestBody{}
 		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
-			logger.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-unmarshal-request", log.Fields{"error": err})
 		} else {
-			logger.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			logger.Debugw(ctx, "received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
 			// let the callee unpack the arguments as its the only one that knows the real proto type
 			// Augment the requestBody with the message Id as it will be used in scenarios where cores
 			// are set in pairs and competing
-			requestBody.Args = kp.addTransactionId(msg.Header.Id, requestBody.Args)
+			requestBody.Args = kp.addTransactionId(ctx, msg.Header.Id, requestBody.Args)
 
 			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
 			// needs to send an unsollicited message to the currently requested container
-			requestBody.Args = kp.addFromTopic(msg.Header.FromTopic, requestBody.Args)
+			requestBody.Args = kp.addFromTopic(ctx, msg.Header.FromTopic, requestBody.Args)
 
-			out, err = CallFuncByName(targetInterface, requestBody.Rpc, requestBody.Args)
+			out, err = CallFuncByName(ctx, targetInterface, requestBody.Rpc, requestBody.Args)
 			if err != nil {
-				logger.Warn(err)
+				logger.Warn(ctx, err)
 			}
 		}
 		// Response required?
@@ -763,7 +764,7 @@
 				if out[lastIndex].Interface() != nil { // Error
 					if retError, ok := out[lastIndex].Interface().(error); ok {
 						if retError.Error() == ErrorTransactionNotAcquired.Error() {
-							logger.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							logger.Debugw(ctx, "Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
 							return // Ignore - process is in competing mode and ignored transaction
 						}
 						returnError = &ic.Error{Reason: retError.Error()}
@@ -773,12 +774,12 @@
 						returnedValues = append(returnedValues, returnError)
 					}
 				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
-					logger.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					logger.Warnw(ctx, "Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
 					return // Ignore - should not happen
 				} else { // Non-error case
 					success = true
 					for idx, val := range out {
-						//logger.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						//logger.Debugw(ctx, "returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
 						if idx != lastIndex {
 							returnedValues = append(returnedValues, val.Interface())
 						}
@@ -787,9 +788,9 @@
 			}
 
 			var icm *ic.InterContainerMessage
-			if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
-				logger.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
-				icm = encodeDefaultFailedResponse(msg)
+			if icm, err = encodeResponse(ctx, msg, success, returnedValues...); err != nil {
+				logger.Warnw(ctx, "error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				icm = encodeDefaultFailedResponse(ctx, msg)
 			}
 			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
 			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
@@ -797,11 +798,11 @@
 			// partitions.
 			replyTopic := &Topic{Name: msg.Header.FromTopic}
 			key := msg.Header.KeyTopic
-			logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			logger.Debugw(ctx, "sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
 			// TODO: handle error response.
 			go func() {
-				if err := kp.kafkaClient.Send(icm, replyTopic, key); err != nil {
-					logger.Errorw("send-reply-failed", log.Fields{
+				if err := kp.kafkaClient.Send(ctx, icm, replyTopic, key); err != nil {
+					logger.Errorw(ctx, "send-reply-failed", log.Fields{
 						"topic": replyTopic,
 						"key":   key,
 						"error": err})
@@ -809,26 +810,26 @@
 			}()
 		}
 	} else if msg.Header.Type == ic.MessageType_RESPONSE {
-		logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
-		go kp.dispatchResponse(msg)
+		logger.Debugw(ctx, "response-received", log.Fields{"msg-header": msg.Header})
+		go kp.dispatchResponse(ctx, msg)
 	} else {
-		logger.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
+		logger.Warnw(ctx, "unsupported-message-received", log.Fields{"msg-header": msg.Header})
 	}
 }
 
-func (kp *interContainerProxy) waitForMessages(ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
+func (kp *interContainerProxy) waitForMessages(ctx context.Context, ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
 	//	Wait for messages
 	for msg := range ch {
-		//logger.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
-		go kp.handleMessage(msg, targetInterface)
+		//logger.Debugw(ctx, "request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		go kp.handleMessage(context.Background(), msg, targetInterface)
 	}
 }
 
-func (kp *interContainerProxy) dispatchResponse(msg *ic.InterContainerMessage) {
+func (kp *interContainerProxy) dispatchResponse(ctx context.Context, msg *ic.InterContainerMessage) {
 	kp.lockTransactionIdToChannelMap.RLock()
 	defer kp.lockTransactionIdToChannelMap.RUnlock()
 	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
-		logger.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		logger.Debugw(ctx, "no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
 		return
 	}
 	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
@@ -838,8 +839,8 @@
 // This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
 // API. There is one response channel waiting for kafka messages before dispatching the message to the
 // corresponding waiting channel
-func (kp *interContainerProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
-	logger.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+func (kp *interContainerProxy) subscribeForResponse(ctx context.Context, topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
+	logger.Debugw(ctx, "subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
 
 	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
 	// broadcast any message for this topic to all channels waiting on it.
@@ -850,27 +851,27 @@
 	return ch, nil
 }
 
-func (kp *interContainerProxy) unSubscribeForResponse(trnsId string) error {
-	logger.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+func (kp *interContainerProxy) unSubscribeForResponse(ctx context.Context, trnsId string) error {
+	logger.Debugw(ctx, "unsubscribe-for-response", log.Fields{"trnsId": trnsId})
 	kp.deleteFromTransactionIdToChannelMap(trnsId)
 	return nil
 }
 
-func (kp *interContainerProxy) EnableLivenessChannel(enable bool) chan bool {
-	return kp.kafkaClient.EnableLivenessChannel(enable)
+func (kp *interContainerProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableLivenessChannel(ctx, enable)
 }
 
-func (kp *interContainerProxy) EnableHealthinessChannel(enable bool) chan bool {
-	return kp.kafkaClient.EnableHealthinessChannel(enable)
+func (kp *interContainerProxy) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableHealthinessChannel(ctx, enable)
 }
 
-func (kp *interContainerProxy) SendLiveness() error {
-	return kp.kafkaClient.SendLiveness()
+func (kp *interContainerProxy) SendLiveness(ctx context.Context) error {
+	return kp.kafkaClient.SendLiveness(ctx)
 }
 
 //formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
 //or an error on failure
-func encodeRequest(rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
+func encodeRequest(ctx context.Context, rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
 	requestHeader := &ic.Header{
 		Id:        uuid.New().String(),
 		Type:      ic.MessageType_REQUEST,
@@ -895,12 +896,12 @@
 		// ascertain the value interface type is a proto.Message
 		protoValue, ok := arg.Value.(proto.Message)
 		if !ok {
-			logger.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			logger.Warnw(ctx, "argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
 			err := errors.New("argument-value-not-proto-message")
 			return nil, err
 		}
 		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
-			logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
+			logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
 			return nil, err
 		}
 		protoArg := &ic.Argument{
@@ -913,7 +914,7 @@
 	var marshalledData *any.Any
 	var err error
 	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
-		logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
 		return nil, err
 	}
 	request := &ic.InterContainerMessage{
@@ -923,14 +924,14 @@
 	return request, nil
 }
 
-func decodeResponse(response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
+func decodeResponse(ctx context.Context, response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
 	//	Extract the message body
 	responseBody := ic.InterContainerResponseBody{}
 	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
-		logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
 		return nil, err
 	}
-	//logger.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+	//logger.Debugw(ctx, "response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
 
 	return &responseBody, nil
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
index 581cf49..87c7ce4 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
@@ -231,8 +231,8 @@
 	return client
 }
 
-func (sc *SaramaClient) Start() error {
-	logger.Info("Starting-kafka-sarama-client")
+func (sc *SaramaClient) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-kafka-sarama-client")
 
 	// Create the Done channel
 	sc.doneCh = make(chan int, 1)
@@ -242,26 +242,26 @@
 	// Add a cleanup in case of failure to startup
 	defer func() {
 		if err != nil {
-			sc.Stop()
+			sc.Stop(ctx)
 		}
 	}()
 
 	// Create the Cluster Admin
-	if err = sc.createClusterAdmin(); err != nil {
-		logger.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
+	if err = sc.createClusterAdmin(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-cluster-admin", log.Fields{"error": err})
 		return err
 	}
 
 	// Create the Publisher
-	if err := sc.createPublisher(); err != nil {
-		logger.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
+	if err := sc.createPublisher(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-publisher", log.Fields{"error": err})
 		return err
 	}
 
 	if sc.consumerType == DefaultConsumerType {
 		// Create the master consumers
-		if err := sc.createConsumer(); err != nil {
-			logger.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
+		if err := sc.createConsumer(ctx); err != nil {
+			logger.Errorw(ctx, "Cannot-create-kafka-consumers", log.Fields{"error": err})
 			return err
 		}
 	}
@@ -269,15 +269,15 @@
 	// Create the topic to consumers/channel map
 	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
 
-	logger.Info("kafka-sarama-client-started")
+	logger.Info(ctx, "kafka-sarama-client-started")
 
 	sc.started = true
 
 	return nil
 }
 
-func (sc *SaramaClient) Stop() {
-	logger.Info("stopping-sarama-client")
+func (sc *SaramaClient) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-sarama-client")
 
 	sc.started = false
 
@@ -286,38 +286,38 @@
 
 	if sc.producer != nil {
 		if err := sc.producer.Close(); err != nil {
-			logger.Errorw("closing-producer-failed", log.Fields{"error": err})
+			logger.Errorw(ctx, "closing-producer-failed", log.Fields{"error": err})
 		}
 	}
 
 	if sc.consumer != nil {
 		if err := sc.consumer.Close(); err != nil {
-			logger.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
+			logger.Errorw(ctx, "closing-partition-consumer-failed", log.Fields{"error": err})
 		}
 	}
 
 	for key, val := range sc.groupConsumers {
-		logger.Debugw("closing-group-consumer", log.Fields{"topic": key})
+		logger.Debugw(ctx, "closing-group-consumer", log.Fields{"topic": key})
 		if err := val.Close(); err != nil {
-			logger.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+			logger.Errorw(ctx, "closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
 		}
 	}
 
 	if sc.cAdmin != nil {
 		if err := sc.cAdmin.Close(); err != nil {
-			logger.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
+			logger.Errorw(ctx, "closing-cluster-admin-failed", log.Fields{"error": err})
 		}
 	}
 
 	//TODO: Clear the consumers map
 	//sc.clearConsumerChannelMap()
 
-	logger.Info("sarama-client-stopped")
+	logger.Info(ctx, "sarama-client-stopped")
 }
 
 //createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
 // the invoking function must hold the lock
-func (sc *SaramaClient) createTopic(topic *Topic, numPartition int, repFactor int) error {
+func (sc *SaramaClient) createTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
 	// Set the topic details
 	topicDetail := &sarama.TopicDetail{}
 	topicDetail.NumPartitions = int32(numPartition)
@@ -329,29 +329,29 @@
 	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
 		if err == sarama.ErrTopicAlreadyExists {
 			//	Not an error
-			logger.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw(ctx, "topic-already-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		logger.Errorw("create-topic-failure", log.Fields{"error": err})
+		logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err})
 		return err
 	}
 	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
 	// do so.
-	logger.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	logger.Debugw(ctx, "topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
 	return nil
 }
 
 //CreateTopic is a public API to create a topic on the Kafka Broker.  It uses a lock on a specific topic to
 // ensure no two go routines are performing operations on the same topic
-func (sc *SaramaClient) CreateTopic(topic *Topic, numPartition int, repFactor int) error {
+func (sc *SaramaClient) CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	return sc.createTopic(topic, numPartition, repFactor)
+	return sc.createTopic(ctx, topic, numPartition, repFactor)
 }
 
 //DeleteTopic removes a topic from the kafka Broker
-func (sc *SaramaClient) DeleteTopic(topic *Topic) error {
+func (sc *SaramaClient) DeleteTopic(ctx context.Context, topic *Topic) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
@@ -359,16 +359,16 @@
 	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
 		if err == sarama.ErrUnknownTopicOrPartition {
 			//	Not an error as does not exist
-			logger.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw(ctx, "topic-not-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		logger.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		logger.Errorw(ctx, "delete-topic-failed", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 
 	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
-	if err := sc.clearTopicFromConsumerChannelMap(*topic); err != nil {
-		logger.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+	if err := sc.clearTopicFromConsumerChannelMap(ctx, *topic); err != nil {
+		logger.Errorw(ctx, "failure-clearing-channels", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 	return nil
@@ -376,18 +376,18 @@
 
 // Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
 // messages from that topic
-func (sc *SaramaClient) Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	logger.Debugw("subscribe", log.Fields{"topic": topic.Name})
+	logger.Debugw(ctx, "subscribe", log.Fields{"topic": topic.Name})
 
 	// If a consumers already exist for that topic then resuse it
 	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
-		logger.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
+		logger.Debugw(ctx, "topic-already-subscribed", log.Fields{"topic": topic.Name})
 		// Create a channel specific for that consumers and add it to the consumers channel map
 		ch := make(chan *ic.InterContainerMessage)
-		sc.addChannelToConsumerChannelMap(topic, ch)
+		sc.addChannelToConsumerChannelMap(ctx, topic, ch)
 		return ch, nil
 	}
 
@@ -398,13 +398,13 @@
 	// Use the consumerType option to figure out the type of consumer to launch
 	if sc.consumerType == PartitionConsumer {
 		if sc.autoCreateTopic {
-			if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-				logger.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+			if err = sc.createTopic(ctx, topic, sc.numPartitions, sc.numReplicas); err != nil {
+				logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
 				return nil, err
 			}
 		}
-		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(topic, getOffset(kvArgs...)); err != nil {
-			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(ctx, topic, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 	} else if sc.consumerType == GroupCustomer {
@@ -412,7 +412,7 @@
 		// does not consume from a precreated topic in some scenarios
 		//if sc.autoCreateTopic {
 		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-		//		logger.Errorw("create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
+		//		logger.Errorw(ctx, "create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
 		//		return nil, err
 		//	}
 		//}
@@ -425,13 +425,13 @@
 			// Need to use a unique group Id per topic
 			groupId = sc.consumerGroupPrefix + topic.Name
 		}
-		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(topic, groupId, getOffset(kvArgs...)); err != nil {
-			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(ctx, topic, groupId, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 			return nil, err
 		}
 
 	} else {
-		logger.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		logger.Warnw(ctx, "unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
 		return nil, errors.New("unknown-consumer-type")
 	}
 
@@ -439,37 +439,37 @@
 }
 
 //UnSubscribe unsubscribe a consumer from a given topic
-func (sc *SaramaClient) UnSubscribe(topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ic.InterContainerMessage) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	logger.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	logger.Debugw(ctx, "unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
 	var err error
-	if err = sc.removeChannelFromConsumerChannelMap(*topic, ch); err != nil {
-		logger.Errorw("failed-removing-channel", log.Fields{"error": err})
+	if err = sc.removeChannelFromConsumerChannelMap(ctx, *topic, ch); err != nil {
+		logger.Errorw(ctx, "failed-removing-channel", log.Fields{"error": err})
 	}
-	if err = sc.deleteFromGroupConsumers(topic.Name); err != nil {
-		logger.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
+	if err = sc.deleteFromGroupConsumers(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "failed-deleting-group-consumer", log.Fields{"error": err})
 	}
 	return err
 }
 
-func (sc *SaramaClient) SubscribeForMetadata(callback func(fromTopic string, timestamp time.Time)) {
+func (sc *SaramaClient) SubscribeForMetadata(ctx context.Context, callback func(fromTopic string, timestamp time.Time)) {
 	sc.metadataCallback = callback
 }
 
-func (sc *SaramaClient) updateLiveness(alive bool) {
+func (sc *SaramaClient) updateLiveness(ctx context.Context, alive bool) {
 	// Post a consistent stream of liveness data to the channel,
 	// so that in a live state, the core does not timeout and
 	// send a forced liveness message. Production of liveness
 	// events to the channel is rate-limited by livenessChannelInterval.
 	if sc.liveness != nil {
 		if sc.alive != alive {
-			logger.Info("update-liveness-channel-because-change")
+			logger.Info(ctx, "update-liveness-channel-because-change")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		} else if time.Since(sc.lastLivenessTime) > sc.livenessChannelInterval {
-			logger.Info("update-liveness-channel-because-interval")
+			logger.Info(ctx, "update-liveness-channel-because-interval")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		}
@@ -477,21 +477,21 @@
 
 	// Only emit a log message when the state changes
 	if sc.alive != alive {
-		logger.Info("set-client-alive", log.Fields{"alive": alive})
+		logger.Info(ctx, "set-client-alive", log.Fields{"alive": alive})
 		sc.alive = alive
 	}
 }
 
 // Once unhealthy, we never go back
-func (sc *SaramaClient) setUnhealthy() {
+func (sc *SaramaClient) setUnhealthy(ctx context.Context) {
 	sc.healthy = false
 	if sc.healthiness != nil {
-		logger.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		logger.Infow(ctx, "set-client-unhealthy", log.Fields{"healthy": sc.healthy})
 		sc.healthiness <- sc.healthy
 	}
 }
 
-func (sc *SaramaClient) isLivenessError(err error) bool {
+func (sc *SaramaClient) isLivenessError(ctx context.Context, err error) bool {
 	// Sarama producers and consumers encapsulate the error inside
 	// a ProducerError or ConsumerError struct.
 	if prodError, ok := err.(*sarama.ProducerError); ok {
@@ -506,48 +506,48 @@
 
 	switch err.Error() {
 	case context.DeadlineExceeded.Error():
-		logger.Info("is-liveness-error-timeout")
+		logger.Info(ctx, "is-liveness-error-timeout")
 		return true
 	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
-		logger.Info("is-liveness-error-no-brokers")
+		logger.Info(ctx, "is-liveness-error-no-brokers")
 		return true
 	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
-		logger.Info("is-liveness-error-shutting-down")
+		logger.Info(ctx, "is-liveness-error-shutting-down")
 		return true
 	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
-		logger.Info("is-liveness-error-not-available")
+		logger.Info(ctx, "is-liveness-error-not-available")
 		return true
 	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
-		logger.Info("is-liveness-error-circuit-breaker-open")
+		logger.Info(ctx, "is-liveness-error-circuit-breaker-open")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
-		logger.Info("is-liveness-error-connection-refused")
+		logger.Info(ctx, "is-liveness-error-connection-refused")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
-		logger.Info("is-liveness-error-io-timeout")
+		logger.Info(ctx, "is-liveness-error-io-timeout")
 		return true
 	}
 
 	// Other errors shouldn't trigger a loss of liveness
 
-	logger.Infow("is-liveness-error-ignored", log.Fields{"err": err})
+	logger.Infow(ctx, "is-liveness-error-ignored", log.Fields{"err": err})
 
 	return false
 }
 
 // send formats and sends the request onto the kafka messaging bus.
-func (sc *SaramaClient) Send(msg interface{}, topic *Topic, keys ...string) error {
+func (sc *SaramaClient) Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error {
 
 	// Assert message is a proto message
 	var protoMsg proto.Message
 	var ok bool
 	// ascertain the value interface type is a proto.Message
 	if protoMsg, ok = msg.(proto.Message); !ok {
-		logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
+		logger.Warnw(ctx, "message-not-proto-message", log.Fields{"msg": msg})
 		return fmt.Errorf("not-a-proto-msg-%s", msg)
 	}
 
@@ -555,7 +555,7 @@
 	var err error
 	//	Create the Sarama producer message
 	if marshalled, err = proto.Marshal(protoMsg); err != nil {
-		logger.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		logger.Errorw(ctx, "marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
 		return err
 	}
 	key := ""
@@ -574,12 +574,12 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		logger.Debugw("message-sent", log.Fields{"status": ok.Topic})
-		sc.updateLiveness(true)
+		logger.Debugw(ctx, "message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
 	case notOk := <-sc.producer.Errors():
-		logger.Debugw("error-sending", log.Fields{"status": notOk})
-		if sc.isLivenessError(notOk) {
-			sc.updateLiveness(false)
+		logger.Debugw(ctx, "error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
 		}
 		return notOk
 	}
@@ -591,11 +591,11 @@
 // or not the channel is still live. This channel is then picked up
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
-func (sc *SaramaClient) EnableLivenessChannel(enable bool) chan bool {
-	logger.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
+func (sc *SaramaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-liveness-channel", log.Fields{"enable": enable})
 	if enable {
 		if sc.liveness == nil {
-			logger.Info("kafka-create-liveness-channel")
+			logger.Info(ctx, "kafka-create-liveness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -615,11 +615,11 @@
 // Enable the Healthiness monitor channel. This channel will report "false"
 // if the kafka consumers die, or some other problem occurs which is
 // catastrophic that would require re-creating the client.
-func (sc *SaramaClient) EnableHealthinessChannel(enable bool) chan bool {
-	logger.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+func (sc *SaramaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-healthiness-channel", log.Fields{"enable": enable})
 	if enable {
 		if sc.healthiness == nil {
-			logger.Info("kafka-create-healthiness-channel")
+			logger.Info(ctx, "kafka-create-healthiness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -638,7 +638,7 @@
 
 // send an empty message on the liveness channel to check whether connectivity has
 // been restored.
-func (sc *SaramaClient) SendLiveness() error {
+func (sc *SaramaClient) SendLiveness(ctx context.Context) error {
 	if !sc.started {
 		return fmt.Errorf("SendLiveness() called while not started")
 	}
@@ -654,12 +654,12 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		logger.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
-		sc.updateLiveness(true)
+		logger.Debugw(ctx, "liveness-message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
 	case notOk := <-sc.producer.Errors():
-		logger.Debugw("liveness-error-sending", log.Fields{"status": notOk})
-		if sc.isLivenessError(notOk) {
-			sc.updateLiveness(false)
+		logger.Debugw(ctx, "liveness-error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
 		}
 		return notOk
 	}
@@ -686,7 +686,7 @@
 	return sarama.OffsetNewest
 }
 
-func (sc *SaramaClient) createClusterAdmin() error {
+func (sc *SaramaClient) createClusterAdmin(ctx context.Context) error {
 	config := sarama.NewConfig()
 	config.Version = sarama.V1_0_0_0
 
@@ -694,7 +694,7 @@
 	var cAdmin sarama.ClusterAdmin
 	var err error
 	if cAdmin, err = sarama.NewClusterAdmin([]string{sc.KafkaAddress}, config); err != nil {
-		logger.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
+		logger.Errorw(ctx, "cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
 		return err
 	}
 	sc.cAdmin = cAdmin
@@ -739,24 +739,24 @@
 	return nil
 }
 
-func (sc *SaramaClient) addChannelToConsumerChannelMap(topic *Topic, ch chan *ic.InterContainerMessage) {
+func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan *ic.InterContainerMessage) {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		consumerCh.channels = append(consumerCh.channels, ch)
 		return
 	}
-	logger.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw(ctx, "consumers-channel-not-exist", log.Fields{"topic": topic.Name})
 }
 
 //closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
-func closeConsumers(consumers []interface{}) error {
+func closeConsumers(ctx context.Context, consumers []interface{}) error {
 	var err error
 	for _, consumer := range consumers {
 		//	Is it a partition consumers?
 		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
 			if errTemp := partionConsumer.Close(); errTemp != nil {
-				logger.Debugw("partition!!!", log.Fields{"err": errTemp})
+				logger.Debugw(ctx, "partition!!!", log.Fields{"err": errTemp})
 				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
 					// This can occur on race condition
 					err = nil
@@ -778,35 +778,35 @@
 	return err
 }
 
-func (sc *SaramaClient) removeChannelFromConsumerChannelMap(topic Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan *ic.InterContainerMessage) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		// Channel will be closed in the removeChannel method
-		consumerCh.channels = removeChannel(consumerCh.channels, ch)
+		consumerCh.channels = removeChannel(ctx, consumerCh.channels, ch)
 		// If there are no more channels then we can close the consumers itself
 		if len(consumerCh.channels) == 0 {
-			logger.Debugw("closing-consumers", log.Fields{"topic": topic})
-			err := closeConsumers(consumerCh.consumers)
+			logger.Debugw(ctx, "closing-consumers", log.Fields{"topic": topic})
+			err := closeConsumers(ctx, consumerCh.consumers)
 			//err := consumerCh.consumers.Close()
 			delete(sc.topicToConsumerChannelMap, topic.Name)
 			return err
 		}
 		return nil
 	}
-	logger.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return errors.New("topic-does-not-exist")
 }
 
-func (sc *SaramaClient) clearTopicFromConsumerChannelMap(topic Topic) error {
+func (sc *SaramaClient) clearTopicFromConsumerChannelMap(ctx context.Context, topic Topic) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		for _, ch := range consumerCh.channels {
 			// Channel will be closed in the removeChannel method
-			removeChannel(consumerCh.channels, ch)
+			removeChannel(ctx, consumerCh.channels, ch)
 		}
-		err := closeConsumers(consumerCh.consumers)
+		err := closeConsumers(ctx, consumerCh.consumers)
 		//if err == sarama.ErrUnknownTopicOrPartition {
 		//	// Not an error
 		//	err = nil
@@ -815,12 +815,12 @@
 		delete(sc.topicToConsumerChannelMap, topic.Name)
 		return err
 	}
-	logger.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Debugw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return nil
 }
 
 //createPublisher creates the publisher which is used to send a message onto kafka
-func (sc *SaramaClient) createPublisher() error {
+func (sc *SaramaClient) createPublisher(ctx context.Context) error {
 	// This Creates the publisher
 	config := sarama.NewConfig()
 	config.Producer.Partitioner = sarama.NewRandomPartitioner
@@ -835,16 +835,16 @@
 	brokers := []string{sc.KafkaAddress}
 
 	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
-		logger.Errorw("error-starting-publisher", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-starting-publisher", log.Fields{"error": err})
 		return err
 	} else {
 		sc.producer = producer
 	}
-	logger.Info("Kafka-publisher-created")
+	logger.Info(ctx, "Kafka-publisher-created")
 	return nil
 }
 
-func (sc *SaramaClient) createConsumer() error {
+func (sc *SaramaClient) createConsumer(ctx context.Context) error {
 	config := sarama.NewConfig()
 	config.Consumer.Return.Errors = true
 	config.Consumer.Fetch.Min = 1
@@ -855,17 +855,17 @@
 	brokers := []string{sc.KafkaAddress}
 
 	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
-		logger.Errorw("error-starting-consumers", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-starting-consumers", log.Fields{"error": err})
 		return err
 	} else {
 		sc.consumer = consumer
 	}
-	logger.Info("Kafka-consumers-created")
+	logger.Info(ctx, "Kafka-consumers-created")
 	return nil
 }
 
 // createGroupConsumer creates a consumers group
-func (sc *SaramaClient) createGroupConsumer(topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
 	config := scc.NewConfig()
 	config.ClientID = uuid.New().String()
 	config.Group.Mode = scc.ConsumerModeMultiplex
@@ -883,10 +883,10 @@
 	var err error
 
 	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
-		logger.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 		return nil, err
 	}
-	logger.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
 
 	//sc.groupConsumers[topic.Name] = consumer
 	sc.addToGroupConsumers(topic.Name, consumer)
@@ -911,104 +911,104 @@
 	}
 }
 
-func (sc *SaramaClient) consumeFromAPartition(topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
-	logger.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) consumeFromAPartition(ctx context.Context, topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
 startloop:
 	for {
 		select {
 		case err, ok := <-consumer.Errors():
 			if ok {
-				if sc.isLivenessError(err) {
-					sc.updateLiveness(false)
-					logger.Warnw("partition-consumers-error", log.Fields{"error": err})
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
+					logger.Warnw(ctx, "partition-consumers-error", log.Fields{"error": err})
 				}
 			} else {
 				// Channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
-			//logger.Debugw("message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			//logger.Debugw(ctx, "message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
 			if !ok {
 				// channel is closed
 				break startloop
 			}
 			msgBody := msg.Value
-			sc.updateLiveness(true)
-			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				logger.Warnw("partition-invalid-message", log.Fields{"error": err})
+				logger.Warnw(ctx, "partition-invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 		case <-sc.doneCh:
-			logger.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow(ctx, "partition-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	logger.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy()
+	logger.Infow(ctx, "partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
 }
 
-func (sc *SaramaClient) consumeGroupMessages(topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
-	logger.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
 
 startloop:
 	for {
 		select {
 		case err, ok := <-consumer.Errors():
 			if ok {
-				if sc.isLivenessError(err) {
-					sc.updateLiveness(false)
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
 				}
-				logger.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
 			} else {
-				logger.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
 				// channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
 			if !ok {
-				logger.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
 				// Channel closed
 				break startloop
 			}
-			sc.updateLiveness(true)
-			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			msgBody := msg.Value
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				logger.Warnw("invalid-message", log.Fields{"error": err})
+				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 			consumer.MarkOffset(msg, "")
 		case ntf := <-consumer.Notifications():
-			logger.Debugw("group-received-notification", log.Fields{"notification": ntf})
+			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
 		case <-sc.doneCh:
-			logger.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	logger.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy()
+	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
 }
 
-func (sc *SaramaClient) startConsumers(topic *Topic) error {
-	logger.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
+	logger.Debugw(ctx, "starting-consumers", log.Fields{"topic": topic.Name})
 	var consumerCh *consumerChannels
 	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
-		logger.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
+		logger.Errorw(ctx, "consumers-not-exist", log.Fields{"topic": topic.Name})
 		return errors.New("consumers-not-exist")
 	}
 	// For each consumer listening for that topic, start a consumption loop
 	for _, consumer := range consumerCh.consumers {
 		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
-			go sc.consumeFromAPartition(topic, pConsumer, consumerCh)
+			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
 		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
-			go sc.consumeGroupMessages(topic, gConsumer, consumerCh)
+			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
 		} else {
-			logger.Errorw("invalid-consumer", log.Fields{"topic": topic})
+			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
 			return errors.New("invalid-consumer")
 		}
 	}
@@ -1017,12 +1017,12 @@
 
 //// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 //// for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupPartitionConsumerChannel(topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
 	var pConsumers []sarama.PartitionConsumer
 	var err error
 
-	if pConsumers, err = sc.createPartitionConsumers(topic, initialOffset); err != nil {
-		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if pConsumers, err = sc.createPartitionConsumers(ctx, topic, initialOffset); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1044,8 +1044,8 @@
 
 	//Start a consumers to listen on that specific topic
 	go func() {
-		if err := sc.startConsumers(topic); err != nil {
-			logger.Errorw("start-consumers-failed", log.Fields{
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
 				"topic": topic,
 				"error": err})
 		}
@@ -1056,12 +1056,12 @@
 
 // setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 // for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupGroupConsumerChannel(topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
 	// TODO:  Replace this development partition consumers with a group consumers
 	var pConsumer *scc.Consumer
 	var err error
-	if pConsumer, err = sc.createGroupConsumer(topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
-		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
@@ -1077,8 +1077,8 @@
 
 	//Start a consumers to listen on that specific topic
 	go func() {
-		if err := sc.startConsumers(topic); err != nil {
-			logger.Errorw("start-consumers-failed", log.Fields{
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
 				"topic": topic,
 				"error": err})
 		}
@@ -1087,11 +1087,11 @@
 	return consumerListeningChannel, nil
 }
 
-func (sc *SaramaClient) createPartitionConsumers(topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
-	logger.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) createPartitionConsumers(ctx context.Context, topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
+	logger.Debugw(ctx, "creating-partition-consumers", log.Fields{"topic": topic.Name})
 	partitionList, err := sc.consumer.Partitions(topic.Name)
 	if err != nil {
-		logger.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		logger.Warnw(ctx, "get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1099,7 +1099,7 @@
 	for _, partition := range partitionList {
 		var pConsumer sarama.PartitionConsumer
 		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
-			logger.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			logger.Warnw(ctx, "consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 		pConsumers = append(pConsumers, pConsumer)
@@ -1107,14 +1107,14 @@
 	return pConsumers, nil
 }
 
-func removeChannel(channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+func removeChannel(ctx context.Context, channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
 	var i int
 	var channel chan *ic.InterContainerMessage
 	for i, channel = range channels {
 		if channel == ch {
 			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
 			close(channel)
-			logger.Debug("channel-closed")
+			logger.Debug(ctx, "channel-closed")
 			return channels[:len(channels)-1]
 		}
 	}
@@ -1129,14 +1129,14 @@
 	}
 }
 
-func (sc *SaramaClient) deleteFromGroupConsumers(topic string) error {
+func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
 	if _, exist := sc.groupConsumers[topic]; exist {
 		consumer := sc.groupConsumers[topic]
 		delete(sc.groupConsumers, topic)
 		if err := consumer.Close(); err != nil {
-			logger.Errorw("failure-closing-consumer", log.Fields{"error": err})
+			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
 			return err
 		}
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
index aab1048..b47b562 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
@@ -75,6 +75,10 @@
 }
 
 func AddPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (Logger, error) {
+	// Get package name of caller method and pass further on; else this method is considered caller
+	pkgName, _, _, _ := getCallerInfo()
+
+	pkgNames = append(pkgNames, pkgName)
 	clg, err := RegisterPackage(outputType, level, defaultFields, pkgNames...)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/common.go
index 0f4339e..113b39c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "ponresourcemanager"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "ponresourcemanager"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
index 5c10b5e..baff575 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
@@ -154,23 +154,23 @@
 	Globalorlocal      string
 }
 
-func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
-	logger.Infow("kv-store-type", log.Fields{"store": storeType})
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+	logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func SetKVClient(Technology string, Backend string, Addr string, configClient bool) *db.Backend {
+func SetKVClient(ctx context.Context, Technology string, Backend string, Addr string, configClient bool) *db.Backend {
 	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
 	// issue between kv store and backend , core is not calling NewBackend directly
-	kvClient, err := newKVClient(Backend, Addr, KVSTORE_RETRY_TIMEOUT)
+	kvClient, err := newKVClient(ctx, Backend, Addr, KVSTORE_RETRY_TIMEOUT)
 	if err != nil {
-		logger.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+		logger.Fatalw(ctx, "Failed to init KV client\n", log.Fields{"err": err})
 		return nil
 	}
 
@@ -192,27 +192,27 @@
 }
 
 // NewPONResourceManager creates a new PON resource manager.
-func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Address string) (*PONResourceManager, error) {
+func NewPONResourceManager(ctx context.Context, Technology string, DeviceType string, DeviceID string, Backend string, Address string) (*PONResourceManager, error) {
 	var PONMgr PONResourceManager
 	PONMgr.Technology = Technology
 	PONMgr.DeviceType = DeviceType
 	PONMgr.DeviceID = DeviceID
 	PONMgr.Backend = Backend
 	PONMgr.Address = Address
-	PONMgr.KVStore = SetKVClient(Technology, Backend, Address, false)
+	PONMgr.KVStore = SetKVClient(ctx, Technology, Backend, Address, false)
 	if PONMgr.KVStore == nil {
-		logger.Error("KV Client initilization failed")
+		logger.Error(ctx, "KV Client initilization failed")
 		return nil, errors.New("Failed to init KV client")
 	}
 	// init kv client to read from the config path
-	PONMgr.KVStoreForConfig = SetKVClient(Technology, Backend, Address, true)
+	PONMgr.KVStoreForConfig = SetKVClient(ctx, Technology, Backend, Address, true)
 	if PONMgr.KVStoreForConfig == nil {
-		logger.Error("KV Config Client initilization failed")
+		logger.Error(ctx, "KV Config Client initilization failed")
 		return nil, errors.New("Failed to init KV Config client")
 	}
 	// Initialize techprofile for this technology
-	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Address); PONMgr.TechProfileMgr == nil {
-		logger.Error("Techprofile initialization failed")
+	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(ctx, &PONMgr, Backend, Address); PONMgr.TechProfileMgr == nil {
+		logger.Error(ctx, "Techprofile initialization failed")
 		return nil, errors.New("Failed to init tech profile")
 	}
 	PONMgr.PonResourceRanges = make(map[string]interface{})
@@ -240,36 +240,36 @@
 	// Try to initialize the PON Resource Ranges from KV store based on the
 	// OLT model key, if available
 	if PONRMgr.OLTModel == "" {
-		logger.Error("Failed to get OLT model")
+		logger.Error(ctx, "Failed to get OLT model")
 		return false
 	}
 	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
 	//get resource from kv store
 	Result, err := PONRMgr.KVStore.Get(ctx, Path)
 	if err != nil {
-		logger.Debugf("Error in fetching resource %s from KV strore", Path)
+		logger.Debugf(ctx, "Error in fetching resource %s from KV strore", Path)
 		return false
 	}
 	if Result == nil {
-		logger.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+		logger.Debug(ctx, "There may be no resources in the KV store in case of fresh bootup, return true")
 		return false
 	}
 	//update internal ranges from kv ranges. If there are missing
 	// values in the KV profile, continue to use the defaults
 	Value, err := ToByte(Result.Value)
 	if err != nil {
-		logger.Error("Failed to convert kvpair to byte string")
+		logger.Error(ctx, "Failed to convert kvpair to byte string")
 		return false
 	}
 	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
-		logger.Error("Failed to Unmarshal json byte")
+		logger.Error(ctx, "Failed to Unmarshal json byte")
 		return false
 	}
-	logger.Debug("Init resource ranges from kvstore success")
+	logger.Debug(ctx, "Init resource ranges from kvstore success")
 	return true
 }
 
-func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+func (PONRMgr *PONResourceManager) UpdateRanges(ctx context.Context, StartIDx string, StartID uint32, EndIDx string, EndID uint32,
 	SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
 	/*
 	   Update the ranges for all reosurce type in the intermnal maps
@@ -281,7 +281,7 @@
 	   param: shared pool id
 	   param: global resource manager
 	*/
-	logger.Debugf("update ranges for %s, %d", StartIDx, StartID)
+	logger.Debugf(ctx, "update ranges for %s, %d", StartIDx, StartID)
 
 	if StartID != 0 {
 		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
@@ -301,7 +301,8 @@
 	}
 }
 
-func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ctx context.Context,
+	ONUIDStart uint32,
 	ONUIDEnd uint32,
 	ONUIDSharedPoolID uint32,
 	AllocIDStart uint32,
@@ -335,12 +336,12 @@
 	  :param num_of_pon_ports: number of PON ports
 	  :param intf_ids: interfaces serviced by this manager
 	*/
-	PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
-	logger.Debug("Initialize default range values")
+	PONRMgr.UpdateRanges(ctx, ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+	logger.Debug(ctx, "Initialize default range values")
 	var i uint32
 	if IntfIDs == nil {
 		for i = 0; i < NoOfPONPorts; i++ {
@@ -356,7 +357,7 @@
 
 	//Initialize resource pool for all PON ports.
 
-	logger.Debug("Init resource ranges")
+	logger.Debug(ctx, "Init resource ranges")
 
 	var err error
 	for _, Intf := range PONRMgr.IntfIDs {
@@ -367,7 +368,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ONU_ID,
 			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
-			logger.Error("Failed to init ONU ID resource pool")
+			logger.Error(ctx, "Failed to init ONU ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -383,7 +384,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ALLOC_ID,
 			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
-			logger.Error("Failed to init ALLOC ID resource pool ")
+			logger.Error(ctx, "Failed to init ALLOC ID resource pool ")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -398,7 +399,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, GEMPORT_ID,
 			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
-			logger.Error("Failed to init GEMPORT ID resource pool")
+			logger.Error(ctx, "Failed to init GEMPORT ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -414,7 +415,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, FLOW_ID,
 			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
-			logger.Error("Failed to init FLOW ID resource pool")
+			logger.Error(ctx, "Failed to init FLOW ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -428,7 +429,7 @@
 
 	//Clear resource pool for all PON ports.
 
-	logger.Debug("Clear resource ranges")
+	logger.Debug(ctx, "Clear resource ranges")
 
 	for _, Intf := range PONRMgr.IntfIDs {
 		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
@@ -436,7 +437,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ONU_ID); !status {
-			logger.Error("Failed to clear ONU ID resource pool")
+			logger.Error(ctx, "Failed to clear ONU ID resource pool")
 			return errors.New("Failed to clear ONU ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -450,7 +451,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ALLOC_ID); !status {
-			logger.Error("Failed to clear ALLOC ID resource pool ")
+			logger.Error(ctx, "Failed to clear ALLOC ID resource pool ")
 			return errors.New("Failed to clear ALLOC ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -463,7 +464,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, GEMPORT_ID); !status {
-			logger.Error("Failed to clear GEMPORT ID resource pool")
+			logger.Error(ctx, "Failed to clear GEMPORT ID resource pool")
 			return errors.New("Failed to clear GEMPORT ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -477,7 +478,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, FLOW_ID); !status {
-			logger.Error("Failed to clear FLOW ID resource pool")
+			logger.Error(ctx, "Failed to clear FLOW ID resource pool")
 			return errors.New("Failed to clear FLOW ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -504,9 +505,9 @@
 		return SharedResourceMgr.InitResourceIDPool(ctx, Intf, ResourceType, StartID, EndID)
 	}
 
-	Path := PONRMgr.GetPath(Intf, ResourceType)
+	Path := PONRMgr.GetPath(ctx, Intf, ResourceType)
 	if Path == "" {
-		logger.Errorf("Failed to get path for resource type %s", ResourceType)
+		logger.Errorf(ctx, "Failed to get path for resource type %s", ResourceType)
 		return fmt.Errorf("Failed to get path for resource type %s", ResourceType)
 	}
 
@@ -514,7 +515,7 @@
 	//checked for its presence if not kv store update happens
 	Res, err := PONRMgr.GetResource(ctx, Path)
 	if (err == nil) && (Res != nil) {
-		logger.Debugf("Resource %s already present in store ", Path)
+		logger.Debugf(ctx, "Resource %s already present in store ", Path)
 		return nil
 	} else {
 		var excluded []uint32
@@ -522,23 +523,23 @@
 			//get gem port ids defined in the KV store, if any, and exclude them from the gem port id pool
 			if reservedGemPortIds, defined := PONRMgr.getReservedGemPortIdsFromKVStore(ctx); defined {
 				excluded = reservedGemPortIds
-				logger.Debugw("Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
+				logger.Debugw(ctx, "Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
 			}
 		}
-		FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID, excluded)
+		FormatResult, err := PONRMgr.FormatResource(ctx, Intf, StartID, EndID, excluded)
 		if err != nil {
-			logger.Errorf("Failed to format resource")
+			logger.Errorf(ctx, "Failed to format resource")
 			return err
 		}
 		// Add resource as json in kv store.
 		err = PONRMgr.KVStore.Put(ctx, Path, FormatResult)
 		if err == nil {
-			logger.Debug("Successfuly posted to kv store")
+			logger.Debug(ctx, "Successfuly posted to kv store")
 			return err
 		}
 	}
 
-	logger.Debug("Error initializing pool")
+	logger.Debug(ctx, "Error initializing pool")
 
 	return err
 }
@@ -548,7 +549,7 @@
 	// read reserved gem ports from the config path
 	KvPair, err := PONRMgr.KVStoreForConfig.Get(ctx, RESERVED_GEMPORT_IDS_PATH)
 	if err != nil {
-		logger.Errorw("Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
+		logger.Errorw(ctx, "Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	if KvPair == nil || KvPair.Value == nil {
@@ -557,17 +558,17 @@
 	}
 	Val, err := kvstore.ToByte(KvPair.Value)
 	if err != nil {
-		logger.Errorw("Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
+		logger.Errorw(ctx, "Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	if err = json.Unmarshal(Val, &reservedGemPortIds); err != nil {
-		logger.Errorw("Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
+		logger.Errorw(ctx, "Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	return reservedGemPortIds, true
 }
 
-func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32,
+func (PONRMgr *PONResourceManager) FormatResource(ctx context.Context, IntfID uint32, StartIDx uint32, EndIDx uint32,
 	Excluded []uint32) ([]byte, error) {
 	/*
 	   Format resource as json.
@@ -589,22 +590,22 @@
 	*/
 	var TSData *bitmap.Threadsafe
 	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
-		logger.Error("Failed to create a bitmap")
+		logger.Error(ctx, "Failed to create a bitmap")
 		return nil, errors.New("Failed to create bitmap")
 	}
 	for _, excludedID := range Excluded {
 		if excludedID < StartIDx || excludedID > EndIDx {
-			logger.Warnf("Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
+			logger.Warnf(ctx, "Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
 				StartIDx, EndIDx)
 			continue
 		}
-		PONRMgr.reserveID(TSData, StartIDx, excludedID)
+		PONRMgr.reserveID(ctx, TSData, StartIDx, excludedID)
 	}
 	Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
 
 	Value, err := json.Marshal(Resource)
 	if err != nil {
-		logger.Errorf("Failed to marshall resource")
+		logger.Errorf(ctx, "Failed to marshall resource")
 		return nil, err
 	}
 	return Value, err
@@ -624,7 +625,7 @@
 
 	Resource, err := PONRMgr.KVStore.Get(ctx, Path)
 	if (err != nil) || (Resource == nil) {
-		logger.Debugf("Resource  unavailable at %s", Path)
+		logger.Debugf(ctx, "Resource  unavailable at %s", Path)
 		return nil, err
 	}
 
@@ -636,7 +637,7 @@
 	// decode resource fetched from backend store to dictionary
 	err = json.Unmarshal(Value, &Result)
 	if err != nil {
-		logger.Error("Failed to decode resource")
+		logger.Error(ctx, "Failed to decode resource")
 		return Result, err
 	}
 	/*
@@ -646,20 +647,20 @@
 	*/
 	Str, err = ToString(Result[POOL])
 	if err != nil {
-		logger.Error("Failed to conver to kv pair to string")
+		logger.Error(ctx, "Failed to conver to kv pair to string")
 		return Result, err
 	}
 	Decode64, _ := base64.StdEncoding.DecodeString(Str)
 	Result[POOL], err = ToByte(Decode64)
 	if err != nil {
-		logger.Error("Failed to convert resource pool to byte")
+		logger.Error(ctx, "Failed to convert resource pool to byte")
 		return Result, err
 	}
 
 	return Result, err
 }
 
-func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
+func (PONRMgr *PONResourceManager) GetPath(ctx context.Context, IntfID uint32, ResourceType string) string {
 	/*
 	   Get path for given resource type.
 	   :param pon_intf_id: OLT PON interface id
@@ -685,7 +686,7 @@
 	} else if ResourceType == FLOW_ID {
 		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
 	} else {
-		logger.Error("Invalid resource pool identifier")
+		logger.Error(ctx, "Invalid resource pool identifier")
 	}
 	return Path
 }
@@ -700,7 +701,7 @@
 	    alloc_id/gemport_id, onu_id or invalid type respectively
 	*/
 	if NumIDs < 1 {
-		logger.Error("Invalid number of resources requested")
+		logger.Error(ctx, "Invalid number of resources requested")
 		return nil, fmt.Errorf("Invalid number of resources requested %d", NumIDs)
 	}
 	// delegate to the master instance if sharing enabled across instances
@@ -709,34 +710,34 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
 	}
-	logger.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+	logger.Debugf(ctx, "Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
 
-	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	Path := PONRMgr.GetPath(ctx, IntfID, ResourceType)
 	if Path == "" {
-		logger.Errorf("Failed to get path for resource type %s", ResourceType)
+		logger.Errorf(ctx, "Failed to get path for resource type %s", ResourceType)
 		return nil, fmt.Errorf("Failed to get path for resource type %s", ResourceType)
 	}
-	logger.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+	logger.Debugf(ctx, "Get resource for type %s on path %s", ResourceType, Path)
 	var Result []uint32
 	var NextID uint32
 	Resource, err := PONRMgr.GetResource(ctx, Path)
 	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
-		if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-			logger.Error("Failed to Generate ID")
+		if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
+			logger.Error(ctx, "Failed to Generate ID")
 			return Result, err
 		}
 		Result = append(Result, NextID)
 	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
 		if NumIDs == 1 {
-			if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-				logger.Error("Failed to Generate ID")
+			if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
+				logger.Error(ctx, "Failed to Generate ID")
 				return Result, err
 			}
 			Result = append(Result, NextID)
 		} else {
 			for NumIDs > 0 {
-				if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-					logger.Error("Failed to Generate ID")
+				if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
+					logger.Error(ctx, "Failed to Generate ID")
 					return Result, err
 				}
 				Result = append(Result, NextID)
@@ -744,13 +745,13 @@
 			}
 		}
 	} else {
-		logger.Error("get resource failed")
+		logger.Error(ctx, "get resource failed")
 		return Result, err
 	}
 
 	//Update resource in kv store
 	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
-		logger.Errorf("Failed to update resource %s", Path)
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
 		return nil, fmt.Errorf("Failed to update resource %s", Path)
 	}
 	return Result, nil
@@ -776,11 +777,11 @@
 	   :return boolean: True if all IDs in given release_content release else False
 	*/
 	if !checkValidResourceType(ResourceType) {
-		logger.Error("Invalid resource type")
+		logger.Error(ctx, "Invalid resource type")
 		return false
 	}
 	if ReleaseContent == nil {
-		logger.Debug("Nothing to release")
+		logger.Debug(ctx, "Nothing to release")
 		return true
 	}
 	// delegate to the master instance if sharing enabled across instances
@@ -788,21 +789,21 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
 	}
-	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	Path := PONRMgr.GetPath(ctx, IntfID, ResourceType)
 	if Path == "" {
-		logger.Error("Failed to get path")
+		logger.Error(ctx, "Failed to get path")
 		return false
 	}
 	Resource, err := PONRMgr.GetResource(ctx, Path)
 	if err != nil {
-		logger.Error("Failed to get resource")
+		logger.Error(ctx, "Failed to get resource")
 		return false
 	}
 	for _, Val := range ReleaseContent {
-		PONRMgr.ReleaseID(Resource, Val)
+		PONRMgr.ReleaseID(ctx, Resource, Val)
 	}
 	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
-		logger.Errorf("Free resource for %s failed", Path)
+		logger.Errorf(ctx, "Free resource for %s failed", Path)
 		return false
 	}
 	return true
@@ -818,12 +819,12 @@
 	// TODO resource[POOL] = resource[POOL].bin
 	Value, err := json.Marshal(Resource)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 	err = PONRMgr.KVStore.Put(ctx, Path, Value)
 	if err != nil {
-		logger.Error("failed to put data to kv store %s", Path)
+		logger.Error(ctx, "failed to put data to kv store %s", Path)
 		return err
 	}
 	return nil
@@ -840,17 +841,17 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.ClearResourceIDPool(ctx, contIntfID, ResourceType)
 	}
-	Path := PONRMgr.GetPath(contIntfID, ResourceType)
+	Path := PONRMgr.GetPath(ctx, contIntfID, ResourceType)
 	if Path == "" {
-		logger.Error("Failed to get path")
+		logger.Error(ctx, "Failed to get path")
 		return false
 	}
 
 	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
-		logger.Errorf("Failed to delete resource %s", Path)
+		logger.Errorf(ctx, "Failed to delete resource %s", Path)
 		return false
 	}
-	logger.Debugf("Cleared resource %s", Path)
+	logger.Debugf(ctx, "Cleared resource %s", Path)
 	return true
 }
 
@@ -864,7 +865,7 @@
 	var AllocIDs []byte
 	Result := PONRMgr.KVStore.Put(ctx, AllocIDPath, AllocIDs)
 	if Result != nil {
-		logger.Error("Failed to update the KV store")
+		logger.Error(ctx, "Failed to update the KV store")
 		return
 	}
 	// initialize pon_intf_onu_id tuple to gemport_ids map
@@ -872,7 +873,7 @@
 	var GEMPortIDs []byte
 	Result = PONRMgr.KVStore.Put(ctx, GEMPortIDPath, GEMPortIDs)
 	if Result != nil {
-		logger.Error("Failed to update the KV store")
+		logger.Error(ctx, "Failed to update the KV store")
 		return
 	}
 }
@@ -886,14 +887,14 @@
 	var err error
 	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
 	if err = PONRMgr.KVStore.Delete(ctx, AllocIDPath); err != nil {
-		logger.Errorf("Failed to remove resource %s", AllocIDPath)
+		logger.Errorf(ctx, "Failed to remove resource %s", AllocIDPath)
 		return false
 	}
 	// remove pon_intf_onu_id tuple to gemport_ids map
 	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
 	err = PONRMgr.KVStore.Delete(ctx, GEMPortIDPath)
 	if err != nil {
-		logger.Errorf("Failed to remove resource %s", GEMPortIDPath)
+		logger.Errorf(ctx, "Failed to remove resource %s", GEMPortIDPath)
 		return false
 	}
 
@@ -902,14 +903,14 @@
 		for _, Flow := range FlowIDs {
 			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
 			if err = PONRMgr.KVStore.Delete(ctx, FlowIDInfoPath); err != nil {
-				logger.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+				logger.Errorf(ctx, "Failed to remove resource %s", FlowIDInfoPath)
 				return false
 			}
 		}
 	}
 
 	if err = PONRMgr.KVStore.Delete(ctx, FlowIDPath); err != nil {
-		logger.Errorf("Failed to remove resource %s", FlowIDPath)
+		logger.Errorf(ctx, "Failed to remove resource %s", FlowIDPath)
 		return false
 	}
 
@@ -930,11 +931,11 @@
 		if Value != nil {
 			Val, err := ToByte(Value.Value)
 			if err != nil {
-				logger.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+				logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": err})
 				return Data
 			}
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Error("Failed to unmarshal", log.Fields{"error": err})
+				logger.Error(ctx, "Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
@@ -950,19 +951,19 @@
 	*/
 
 	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	logger.Debugf("Getting current gemports for %s", Path)
+	logger.Debugf(ctx, "Getting current gemports for %s", Path)
 	var Data []uint32
 	Value, err := PONRMgr.KVStore.Get(ctx, Path)
 	if err == nil {
 		if Value != nil {
 			Val, _ := ToByte(Value.Value)
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
 	} else {
-		logger.Errorf("Failed to get data from kvstore for %s", Path)
+		logger.Errorf(ctx, "Failed to get data from kvstore for %s", Path)
 	}
 	return Data
 }
@@ -982,7 +983,7 @@
 		if Value != nil {
 			Val, _ := ToByte(Value.Value)
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Error("Failed to unmarshal")
+				logger.Error(ctx, "Failed to unmarshal")
 				return Data
 			}
 		}
@@ -1006,11 +1007,11 @@
 		if Value != nil {
 			Val, err := ToByte(Value.Value)
 			if err != nil {
-				logger.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
+				logger.Errorw(ctx, "Failed to convert flowinfo into byte array", log.Fields{"error": err})
 				return err
 			}
 			if err = json.Unmarshal(Val, Data); err != nil {
-				logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
 				return err
 			}
 		}
@@ -1027,7 +1028,7 @@
 	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
 
 	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
-		logger.Errorf("Falied to remove resource %s", Path)
+		logger.Errorf(ctx, "Falied to remove resource %s", Path)
 		return false
 	}
 	return true
@@ -1044,12 +1045,12 @@
 	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
 	Value, err = json.Marshal(AllocIDs)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", Path)
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1065,15 +1066,15 @@
 	var Value []byte
 	var err error
 	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	logger.Debugf("Updating gemport ids for %s", Path)
+	logger.Debugf(ctx, "Updating gemport ids for %s", Path)
 	Value, err = json.Marshal(GEMPortIDs)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", Path)
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1124,12 +1125,12 @@
 	}
 	Value, err = json.Marshal(FlowIDs)
 	if err != nil {
-		logger.Error("Failed to Marshal")
+		logger.Error(ctx, "Failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", Path)
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1148,18 +1149,18 @@
 	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
 	Value, err = json.Marshal(FlowData)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", Path)
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
 		return err
 	}
 	return err
 }
 
-func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
+func (PONRMgr *PONResourceManager) GenerateNextID(ctx context.Context, Resource map[string]interface{}) (uint32, error) {
 	/*
 	   Generate unique id having OFFSET as start
 	   :param resource: resource used to generate ID
@@ -1167,12 +1168,12 @@
 	*/
 	ByteArray, err := ToByte(Resource[POOL])
 	if err != nil {
-		logger.Error("Failed to convert resource to byte array")
+		logger.Error(ctx, "Failed to convert resource to byte array")
 		return 0, err
 	}
 	Data := bitmap.TSFromData(ByteArray, false)
 	if Data == nil {
-		logger.Error("Failed to get data from byte array")
+		logger.Error(ctx, "Failed to get data from byte array")
 		return 0, errors.New("Failed to get data from byte array")
 	}
 
@@ -1186,11 +1187,11 @@
 	Data.Set(Idx, true)
 	res := uint32(Resource[START_IDX].(float64))
 	Resource[POOL] = Data.Data(false)
-	logger.Debugf("Generated ID for %d", (uint32(Idx) + res))
+	logger.Debugf(ctx, "Generated ID for %d", (uint32(Idx) + res))
 	return (uint32(Idx) + res), err
 }
 
-func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
+func (PONRMgr *PONResourceManager) ReleaseID(ctx context.Context, Resource map[string]interface{}, Id uint32) bool {
 	/*
 	   Release unique id having OFFSET as start index.
 	   :param resource: resource used to release ID
@@ -1198,12 +1199,12 @@
 	*/
 	ByteArray, err := ToByte(Resource[POOL])
 	if err != nil {
-		logger.Error("Failed to convert resource to byte array")
+		logger.Error(ctx, "Failed to convert resource to byte array")
 		return false
 	}
 	Data := bitmap.TSFromData(ByteArray, false)
 	if Data == nil {
-		logger.Error("Failed to get resource pool")
+		logger.Error(ctx, "Failed to get resource pool")
 		return false
 	}
 	Idx := Id - uint32(Resource[START_IDX].(float64))
@@ -1217,10 +1218,10 @@
 :param Resource: resource used to reserve ID
 :param Id: ID to be reserved
 */
-func (PONRMgr *PONResourceManager) reserveID(TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
+func (PONRMgr *PONResourceManager) reserveID(ctx context.Context, TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
 	Data := bitmap.TSFromData(TSData.Data(false), false)
 	if Data == nil {
-		logger.Error("Failed to get resource pool")
+		logger.Error(ctx, "Failed to get resource pool")
 		return false
 	}
 	Idx := Id - StartIndex
@@ -1277,12 +1278,12 @@
 	Path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfID)
 	Value, err = json.Marshal(onuGemData)
 	if err != nil {
-		logger.Error("failed to Marshal")
+		logger.Error(ctx, "failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf("Failed to update resource %s", Path)
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1299,22 +1300,22 @@
 	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, IntfId)
 	value, err := PONRMgr.KVStore.Get(ctx, path)
 	if err != nil {
-		logger.Errorw("Failed to get from kv store", log.Fields{"path": path})
+		logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
 		return err
 	} else if value == nil {
-		logger.Debug("No onuinfo for path", log.Fields{"path": path})
+		logger.Debug(ctx, "No onuinfo for path", log.Fields{"path": path})
 		return nil // returning nil as this could happen if there are no onus for the interface yet
 	}
 	if Val, err = kvstore.ToByte(value.Value); err != nil {
-		logger.Error("Failed to convert to byte array")
+		logger.Error(ctx, "Failed to convert to byte array")
 		return err
 	}
 
 	if err = json.Unmarshal(Val, &onuGemInfo); err != nil {
-		logger.Error("Failed to unmarshall")
+		logger.Error(ctx, "Failed to unmarshall")
 		return err
 	}
-	logger.Debugw("found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
+	logger.Debugw(ctx, "found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
 	return err
 }
 
@@ -1326,7 +1327,7 @@
 
 	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfId)
 	if err := PONRMgr.KVStore.Delete(ctx, path); err != nil {
-		logger.Errorf("Falied to remove resource %s", path)
+		logger.Errorf(ctx, "Falied to remove resource %s", path)
 		return err
 	}
 	return nil
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
index 211419d..14857ab 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
index e89d5bc..732d6df 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
@@ -109,7 +109,7 @@
 }
 
 // RegisterService register one or more service names with the probe, status will be track against service name
-func (p *Probe) RegisterService(names ...string) {
+func (p *Probe) RegisterService(ctx context.Context, names ...string) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.status == nil {
@@ -118,7 +118,7 @@
 	for _, name := range names {
 		if _, ok := p.status[name]; !ok {
 			p.status[name] = ServiceStatusUnknown
-			logger.Debugw("probe-service-registered", log.Fields{"service-name": name})
+			logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
 		}
 	}
 
@@ -136,7 +136,7 @@
 }
 
 // UpdateStatus utility function to send a service update to the probe
-func (p *Probe) UpdateStatus(name string, status ServiceStatus) {
+func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.status == nil {
@@ -161,7 +161,7 @@
 	} else {
 		p.isHealthy = defaultHealthFunc(p.status)
 	}
-	logger.Debugw("probe-service-status-updated",
+	logger.Debugw(ctx, "probe-service-status-updated",
 		log.Fields{
 			"service-name": name,
 			"status":       status.String(),
@@ -204,7 +204,7 @@
 func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
 	p := GetProbeFromContext(ctx)
 	if p != nil {
-		p.UpdateStatus(name, status)
+		p.UpdateStatus(ctx, name, status)
 	}
 }
 
@@ -228,25 +228,26 @@
 	}
 }
 func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+	ctx := context.Background()
 	p.mutex.RLock()
 	defer p.mutex.RUnlock()
 	w.Header().Set("Content-Type", "application/json")
 	if _, err := w.Write([]byte("{")); err != nil {
-		logger.Errorw("write-response", log.Fields{"error": err})
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
 	comma := ""
 	for c, s := range p.status {
 		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
-			logger.Errorw("write-response", log.Fields{"error": err})
+			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 			w.WriteHeader(http.StatusInternalServerError)
 			return
 		}
 		comma = ", "
 	}
 	if _, err := w.Write([]byte("}")); err != nil {
-		logger.Errorw("write-response", log.Fields{"error": err})
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -254,7 +255,7 @@
 }
 
 // ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
-func (p *Probe) ListenAndServe(address string) {
+func (p *Probe) ListenAndServe(ctx context.Context, address string) {
 	mux := http.NewServeMux()
 
 	// Returns the result of the readyFunc calculation
@@ -269,7 +270,7 @@
 		Addr:    address,
 		Handler: mux,
 	}
-	logger.Fatal(s.ListenAndServe())
+	logger.Fatal(ctx, s.ListenAndServe())
 }
 
 func (p *Probe) IsReady() bool {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go
index 42818f1..e7cd798 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "techprofile"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "techprofile"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
index ff37326..a12df1d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
@@ -379,10 +379,10 @@
 	epon   = "EPON"
 )
 
-func (t *TechProfileMgr) SetKVClient() *db.Backend {
-	kvClient, err := newKVClient(t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
+func (t *TechProfileMgr) SetKVClient(ctx context.Context) *db.Backend {
+	kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
 	if err != nil {
-		logger.Errorw("failed-to-create-kv-client",
+		logger.Errorw(ctx, "failed-to-create-kv-client",
 			log.Fields{
 				"type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
 				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
@@ -404,34 +404,34 @@
 	*/
 }
 
-func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
 
-	logger.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
+	logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string) (*TechProfileMgr, error) {
+func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string) (*TechProfileMgr, error) {
 	var techprofileObj TechProfileMgr
-	logger.Debug("Initializing techprofile Manager")
+	logger.Debug(ctx, "Initializing techprofile Manager")
 	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreAddress)
-	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx)
 	if techprofileObj.config.KVBackend == nil {
-		logger.Error("Failed to initialize KV backend\n")
+		logger.Error(ctx, "Failed to initialize KV backend\n")
 		return nil, errors.New("KV backend init failed")
 	}
 	techprofileObj.resourceMgr = resourceMgr
-	logger.Debug("Initializing techprofile object instance success")
+	logger.Debug(ctx, "Initializing techprofile object instance success")
 	return &techprofileObj, nil
 }
 
-func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
-	logger.Debugw("get-tp-instance-kv-path", log.Fields{
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
+	logger.Debugw(ctx, "get-tp-instance-kv-path", log.Fields{
 		"uniPortName": uniPortName,
 		"tpId":        techProfiletblID,
 	})
@@ -460,12 +460,12 @@
 
 	kvResult, _ = t.config.KVBackend.Get(ctx, path)
 	if kvResult == nil {
-		log.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
+		logger.Infow(ctx, "tp-instance-not-found-on-kv", log.Fields{"key": path})
 		return nil, nil
 	} else {
 		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
 			if err = json.Unmarshal(value, resPtr); err != nil {
-				log.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+				logger.Errorw(ctx, "error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
 				return nil, errors.New("error-unmarshal-kv-result")
 			} else {
 				return resPtr, nil
@@ -476,29 +476,29 @@
 }
 
 func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	logger.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
+	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
 	tpInstanceJson, err := json.Marshal(*tpInstance)
 	if err == nil {
 		// Backend will convert JSON byte array into string format
-		logger.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
 		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
 	} else {
-		logger.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
 	}
 	return err
 }
 
 func (t *TechProfileMgr) addEponProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *EponProfile) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	logger.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
+	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
 	tpInstanceJson, err := json.Marshal(*tpInstance)
 	if err == nil {
 		// Backend will convert JSON byte array into string format
-		logger.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
 		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
 	} else {
-		logger.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
 	}
 	return err
 }
@@ -506,21 +506,21 @@
 func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
 	var kvtechprofile DefaultTechProfile
 	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
 	kvresult, err := t.config.KVBackend.Get(ctx, key)
 	if err != nil {
-		logger.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
 		return nil
 	}
 	if kvresult != nil {
 		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
 		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
 			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
 				return nil
 			}
 
-			logger.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
 			return &kvtechprofile
 		}
 	}
@@ -530,21 +530,21 @@
 func (t *TechProfileMgr) getEponTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultEponProfile {
 	var kvtechprofile DefaultEponProfile
 	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
 	kvresult, err := t.config.KVBackend.Get(ctx, key)
 	if err != nil {
-		logger.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
 		return nil
 	}
 	if kvresult != nil {
 		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
 		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
 			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
 				return nil
 			}
 
-			logger.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
 			return &kvtechprofile
 		}
 	}
@@ -555,14 +555,14 @@
 	var tpInstance *TechProfile
 	var tpEponInstance *EponProfile
 
-	logger.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+	logger.Infow(ctx, "creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
 
 	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
 	if !uniPortNameFormat.Match([]byte(uniPortName)) {
-		logger.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
 		return nil, errors.New("uni-port-name-not-confirming-to-format")
 	}
-	tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	tpInstancePath := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
 	// For example:
 	// tpInstPath like "XGS-PON/64/uni_port_name"
 	// is broken into ["XGS-PON" "64" ...]
@@ -570,74 +570,74 @@
 	if pathSlice[0] == epon {
 		tp := t.getEponTPFromKVStore(ctx, techProfiletblID)
 		if tp != nil {
-			if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
-				logger.Error("invalid-instance-ctrl-attr--using-default-tp")
+			if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
+				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
 				tp = t.getDefaultEponProfile()
 			} else {
-				logger.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
 			}
 		} else {
-			logger.Info("tp-not-found-on-kv--creating-default-tp")
+			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
 			tp = t.getDefaultEponProfile()
 		}
 
 		if tpEponInstance = t.allocateEponTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpEponInstance == nil {
-			logger.Error("tp-intance-allocation-failed")
+			logger.Error(ctx, "tp-intance-allocation-failed")
 			return nil, errors.New("tp-intance-allocation-failed")
 		}
 		if err := t.addEponProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpEponInstance); err != nil {
-			logger.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+			logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
 			return nil, errors.New("error-adding-tp-to-kv-store")
 		}
-		logger.Infow("tp-added-to-kv-store-successfully",
+		logger.Infow(ctx, "tp-added-to-kv-store-successfully",
 			log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
 		return tpEponInstance, nil
 	} else {
 		tp := t.getTPFromKVStore(ctx, techProfiletblID)
 		if tp != nil {
-			if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
-				logger.Error("invalid-instance-ctrl-attr--using-default-tp")
-				tp = t.getDefaultTechProfile()
+			if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
+				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
+				tp = t.getDefaultTechProfile(ctx)
 			} else {
-				logger.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
 			}
 		} else {
-			logger.Info("tp-not-found-on-kv--creating-default-tp")
-			tp = t.getDefaultTechProfile()
+			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+			tp = t.getDefaultTechProfile(ctx)
 		}
 
 		if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
-			logger.Error("tp-intance-allocation-failed")
+			logger.Error(ctx, "tp-intance-allocation-failed")
 			return nil, errors.New("tp-intance-allocation-failed")
 		}
 		if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
-			logger.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+			logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
 			return nil, errors.New("error-adding-tp-to-kv-store")
 		}
-		logger.Infow("tp-added-to-kv-store-successfully",
+		logger.Infow(ctx, "tp-added-to-kv-store-successfully",
 			log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
 		return tpInstance, nil
 	}
 }
 
 func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
 	return t.config.KVBackend.Delete(ctx, path)
 }
 
-func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
+func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl InstanceControl) error {
 	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
-		logger.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
 		return errors.New("invalid-onu-instance-ctl-attr")
 	}
 
 	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
-		logger.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
 		return errors.New("invalid-uni-instance-ctl-attr")
 	}
 
 	if instCtl.Uni == "multi-instance" {
-		logger.Error("uni-multi-instance-tp-not-supported")
+		logger.Error(ctx, "uni-multi-instance-tp-not-supported")
 		return errors.New("uni-multi-instance-tp-not-supported")
 	}
 
@@ -654,19 +654,19 @@
 	var gemPorts []uint32
 	var err error
 
-	logger.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
 
 	if tp.InstanceCtrl.Onu == "multi-instance" {
 		t.AllocIDMgmtLock.Lock()
 		tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
 		t.AllocIDMgmtLock.Unlock()
 		if err != nil {
-			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		}
 	} else { // "single-instance"
 		if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
-			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		} else if tpInst == nil {
 			// No "single-instance" tp found on one any uni port for the given TP ID
@@ -675,7 +675,7 @@
 			tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
 			t.AllocIDMgmtLock.Unlock()
 			if err != nil {
-				logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 				return nil
 			}
 		} else {
@@ -683,15 +683,15 @@
 			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
 		}
 	}
-	logger.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	t.GemPortIDMgmtLock.Lock()
 	gemPorts, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts)
 	t.GemPortIDMgmtLock.Unlock()
 	if err != nil {
-		logger.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
 	}
-	logger.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
@@ -705,7 +705,7 @@
 				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 	}
 
-	logger.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
 	//put multicast and unicast downstream GEM port attributes in different lists first
 	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
 		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
@@ -879,10 +879,10 @@
 	for keyPath, kvPair := range kvPairs {
 		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
 				return nil, errors.New("error-unmarshal-kv-pair")
 			} else {
-				logger.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
 				return &tpInst, nil
 			}
 		}
@@ -903,10 +903,10 @@
 	for keyPath, kvPair := range kvPairs {
 		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
 				return nil, errors.New("error-unmarshal-kv-pair")
 			} else {
-				logger.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
 				return &tpInst, nil
 			}
 		}
@@ -914,13 +914,12 @@
 	return nil, nil
 }
 
-func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
-
+func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *DefaultTechProfile {
 	var usGemPortAttributeList []GemPortAttribute
 	var dsGemPortAttributeList []GemPortAttribute
 
 	for _, pbit := range t.config.DefaultPbits {
-		logger.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
+		logger.Debugw(ctx, "Creating GEM port", log.Fields{"pbit": pbit})
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			GemPortAttribute{
 				MaxQueueSize:     defaultMaxQueueSize,
@@ -1041,7 +1040,7 @@
 		DownstreamQueueAttributeList: dsQueueAttributeList}
 }
 
-func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+func (t *TechProfileMgr) GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32 {
 	var result int32 = -1
 
 	if paramType == "direction" {
@@ -1059,7 +1058,7 @@
 	} else if paramType == "sched_policy" {
 		for key, val := range tp_pb.SchedulingPolicy_value {
 			if key == paramKey {
-				logger.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+				logger.Debugw(ctx, "Got value in proto", log.Fields{"key": key, "value": val})
 				result = val
 			}
 		}
@@ -1070,29 +1069,29 @@
 			}
 		}
 	} else {
-		logger.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		logger.Error(ctx, "Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
 		return -1
 	}
-	logger.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+	logger.Debugw(ctx, "Got value in proto", log.Fields{"key": paramKey, "value": result})
 	return result
 }
 
-func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
-	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
+func (t *TechProfileMgr) GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
-		logger.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+		logger.Errorf(ctx, "Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 	}
 
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
-		logger.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 	}
 
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
-		logger.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 	}
 
@@ -1104,23 +1103,23 @@
 		SchedPolicy:  policy}, nil
 }
 
-func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+func (t *TechProfileMgr) GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
 
-	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
+	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
-		logger.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+		logger.Errorf(ctx, "Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 	}
 
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
-		logger.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 	}
 
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
-		logger.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 	}
 
@@ -1144,7 +1143,7 @@
 	return tSched
 }
 
-func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+func (tpm *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
 
 	var encryp bool
 	if Dir == tp_pb.Direction_UPSTREAM {
@@ -1158,20 +1157,20 @@
 				encryp = false
 			}
 
-			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
-			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
 			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.UsScheduler.Direction)),
 				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
 				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
 				AesEncryption: encryp,
@@ -1181,7 +1180,7 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		logger.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	} else if Dir == tp_pb.Direction_DOWNSTREAM {
 		//downstream GEM ports
@@ -1198,20 +1197,20 @@
 				encryp = false
 			}
 
-			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
-			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
 			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
 				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
 				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
 				AesEncryption: encryp,
@@ -1221,11 +1220,11 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		logger.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	}
 
-	logger.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
+	logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", Dir)
 	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
 }
 
@@ -1235,7 +1234,7 @@
 		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
 }
 
-func (tpm *TechProfileMgr) GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue {
+func (tpm *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue {
 	var encryp bool
 	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
 	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
@@ -1249,29 +1248,29 @@
 			encryp = false
 		}
 		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
-			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
 			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
 			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
 			AesEncryption: encryp,
-			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
 			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
 			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
-			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
 		})
 	}
-	logger.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
 	return mcastTrafficQueues
 }
 
-func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
-	UsScheduler, _ := tpm.GetUsScheduler(tp)
+func (tpm *TechProfileMgr) GetUsTrafficScheduler(ctx context.Context, tp *TechProfile) *tp_pb.TrafficScheduler {
+	UsScheduler, _ := tpm.GetUsScheduler(ctx, tp)
 
 	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
 		AllocId:   tp.UsScheduler.AllocID,
 		Scheduler: UsScheduler}
 }
 
-func (t *TechProfileMgr) GetGemportIDForPbit(tp interface{}, dir tp_pb.Direction, pbit uint32) uint32 {
+func (t *TechProfileMgr) GetGemportIDForPbit(ctx context.Context, tp interface{}, dir tp_pb.Direction, pbit uint32) uint32 {
 	/*
 	  Function to get the Gemport ID mapped to a pbit.
 	*/
@@ -1287,7 +1286,7 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
 							return tp.UpstreamGemPortAttributeList[gemCnt].GemportID
 						}
 					}
@@ -1303,14 +1302,14 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
 							return tp.DownstreamGemPortAttributeList[gemCnt].GemportID
 						}
 					}
 				}
 			}
 		}
-		logger.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+		logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
 	case *EponProfile:
 		if dir == tp_pb.Direction_UPSTREAM {
 			// upstream GEM ports
@@ -1322,7 +1321,7 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.UpstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportID})
 							return tp.UpstreamQueueAttributeList[gemCnt].GemportID
 						}
 					}
@@ -1338,16 +1337,16 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.DownstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportID})
 							return tp.DownstreamQueueAttributeList[gemCnt].GemportID
 						}
 					}
 				}
 			}
 		}
-		logger.Errorw("No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+		logger.Errorw(ctx, "No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
 	default:
-		logger.Errorw("unknown-tech", log.Fields{"tp": tp})
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tp": tp})
 	}
 	return 0
 }
@@ -1368,14 +1367,14 @@
 			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 				if tech == xgspon || tech == gpon {
 					if err = json.Unmarshal(value, &tpTech); err != nil {
-						logger.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+						logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
 						continue
 					} else {
 						tpInstancesTech = append(tpInstancesTech, tpTech)
 					}
 				} else if tech == epon {
 					if err = json.Unmarshal(value, &tpEpon); err != nil {
-						logger.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+						logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
 						continue
 					} else {
 						tpInstancesEpon = append(tpInstancesEpon, tpEpon)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
index 8391a5b..0af1d4e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
@@ -24,18 +24,18 @@
 )
 
 type TechProfileIf interface {
-	SetKVClient() *db.Backend
-	GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
+	SetKVClient(ctx context.Context) *db.Backend
+	GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string
 	GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error)
 	CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (interface{}, error)
 	DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error
-	GetprotoBufParamValue(paramType string, paramKey string) int32
-	GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
-	GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32
+	GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
 	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
 		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
-	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
-	GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue
-	GetGemportIDForPbit(tp interface{}, Dir tp_pb.Direction, pbit uint32) uint32
+	GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue
+	GetGemportIDForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) uint32
 	FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) interface{}
 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d08179a..0129f03 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -67,7 +67,7 @@
 github.com/mitchellh/go-homedir
 # github.com/mitchellh/mapstructure v1.1.2
 github.com/mitchellh/mapstructure
-# github.com/opencord/voltha-lib-go/v3 v3.1.22
+# github.com/opencord/voltha-lib-go/v3 v3.2.0
 github.com/opencord/voltha-lib-go/v3/pkg/adapters
 github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif
 github.com/opencord/voltha-lib-go/v3/pkg/adapters/common