[VOL-2694] Use package specific logger instance in all log statements

Change-Id: Ib33d89bb04750d3f95901f3d80f9d37d336a3e2c
diff --git a/VERSION b/VERSION
index 70f01b6..de2e889 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.0.25-dev
+3.0.25
diff --git a/pkg/adapters/common/common.go b/pkg/adapters/common/common.go
index acf818c..95a036d 100644
--- a/pkg/adapters/common/common.go
+++ b/pkg/adapters/common/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "common"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/adapters/common/common_test.go b/pkg/adapters/common/common_test.go
deleted file mode 100644
index d2d9f0e..0000000
--- a/pkg/adapters/common/common_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Portions copyright 2019-present Open Networking Foundation
- * Original copyright 2019-present Ciena Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the"github.com/stretchr/testify/assert" "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-/*
- * This file has common code that is imported for all test cases, but
- * is not built into production binaries.
- */
-
-import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-)
-
-const (
-	/*
-	 * This sets the LogLevel of the Voltha logger. It's pinned to FatalLevel here, as we
-	 * generally don't want to see logger output, even when running go test in verbose
-	 * mode. Even "Error" level messages are expected to be output by some unit tests.
-	 *
-	 * If you are developing a unit test, and experiencing problems or wish additional
-	 * debugging from Voltha, then changing this constant to log.DebugLevel may be
-	 * useful.
-	 */
-
-	VOLTHA_LOGLEVEL = log.FatalLevel
-)
-
-// Unit test initialization. This init() function will be run once for all unit tests in afrouter
-func init() {
-	// Logger must be configured or bad things happen
-	_, err := log.SetDefaultLogger(log.JSON, VOLTHA_LOGLEVEL, log.Fields{"instanceId": 1})
-	if err != nil {
-		panic(err)
-	}
-
-	_, err = log.AddPackage(log.JSON, VOLTHA_LOGLEVEL, nil)
-	if err != nil {
-		panic(err)
-	}
-}
diff --git a/pkg/adapters/common/core_proxy.go b/pkg/adapters/common/core_proxy.go
index 86f186d..9582f33 100644
--- a/pkg/adapters/common/core_proxy.go
+++ b/pkg/adapters/common/core_proxy.go
@@ -154,7 +154,7 @@
 }
 
 func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
-	log.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "PortsStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -202,7 +202,7 @@
 
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
 	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
-	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "DeviceStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
diff --git a/pkg/adapters/common/utils.go b/pkg/adapters/common/utils.go
index b782ebe..94e8bd6 100644
--- a/pkg/adapters/common/utils.go
+++ b/pkg/adapters/common/utils.go
@@ -84,7 +84,7 @@
 	case ic.ErrorCode_DEADLINE_EXCEEDED:
 		return codes.DeadlineExceeded
 	default:
-		log.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		logger.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
 		return codes.Internal
 	}
 }
diff --git a/pkg/config/common.go b/pkg/config/common.go
new file mode 100644
index 0000000..37e05fd
--- /dev/null
+++ b/pkg/config/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/pkg/config/configmanager.go b/pkg/config/configmanager.go
index 441c488..9f08b0d 100644
--- a/pkg/config/configmanager.go
+++ b/pkg/config/configmanager.go
@@ -24,13 +24,6 @@
 	"strings"
 )
 
-func init() {
-	_, err := log.AddPackage(log.JSON, log.FatalLevel, nil)
-	if err != nil {
-		log.Errorw("unable-to-register-package-to-the-log-map", log.Fields{"error": err})
-	}
-}
-
 const (
 	defaultkvStoreConfigPath = "config"
 	kvStoreDataPathPrefix    = "service/voltha"
@@ -171,7 +164,7 @@
 func (c *ComponentConfig) MonitorForConfigChange(ctx context.Context) chan *ConfigChangeEvent {
 	key := c.makeConfigPath()
 
-	log.Debugw("monitoring-for-config-change", log.Fields{"key": key})
+	logger.Debugw("monitoring-for-config-change", log.Fields{"key": key})
 
 	c.changeEventChan = make(chan *ConfigChangeEvent, 1)
 
@@ -188,14 +181,14 @@
 
 	ccKeyPrefix := c.makeConfigPath()
 
-	log.Debugw("processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
+	logger.Debugw("processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
 
 	ccPathPrefix := c.cManager.backend.PathPrefix + ccKeyPrefix + kvStorePathSeparator
 
 	for watchResp := range c.kvStoreEventChan {
 
 		if watchResp.EventType == kvstore.CONNECTIONDOWN || watchResp.EventType == kvstore.UNKNOWN {
-			log.Warnw("received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
+			logger.Warnw("received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
 			continue
 		}
 
@@ -215,7 +208,7 @@
 func (c *ComponentConfig) Retrieve(ctx context.Context, configKey string) (string, error) {
 	key := c.makeConfigPath() + "/" + configKey
 
-	log.Debugw("retrieving-config", log.Fields{"key": key})
+	logger.Debugw("retrieving-config", log.Fields{"key": key})
 
 	if kvpair, err := c.cManager.backend.Get(ctx, key); err != nil {
 		return "", err
@@ -225,7 +218,7 @@
 		}
 
 		value := strings.Trim(fmt.Sprintf("%s", kvpair.Value), "\"")
-		log.Debugw("retrieved-config", log.Fields{"key": key, "value": value})
+		logger.Debugw("retrieved-config", log.Fields{"key": key, "value": value})
 		return value, nil
 	}
 }
@@ -233,7 +226,7 @@
 func (c *ComponentConfig) RetrieveAll(ctx context.Context) (map[string]string, error) {
 	key := c.makeConfigPath()
 
-	log.Debugw("retreiving-list", log.Fields{"key": key})
+	logger.Debugw("retreiving-list", log.Fields{"key": key})
 
 	data, err := c.cManager.backend.List(ctx, key)
 	if err != nil {
@@ -256,7 +249,7 @@
 func (c *ComponentConfig) Save(ctx context.Context, configKey string, configValue string) error {
 	key := c.makeConfigPath() + "/" + configKey
 
-	log.Debugw("saving-config", log.Fields{"key": key, "value": configValue})
+	logger.Debugw("saving-config", log.Fields{"key": key, "value": configValue})
 
 	//save the data for update config
 	if err := c.cManager.backend.Put(ctx, key, configValue); err != nil {
@@ -269,7 +262,7 @@
 	//construct key using makeConfigPath
 	key := c.makeConfigPath() + "/" + configKey
 
-	log.Debugw("deleting-config", log.Fields{"key": key})
+	logger.Debugw("deleting-config", log.Fields{"key": key})
 	//delete the config
 	if err := c.cManager.backend.Delete(ctx, key); err != nil {
 		return err
diff --git a/pkg/config/logcontroller.go b/pkg/config/logcontroller.go
index 65927e6..b929c9d 100644
--- a/pkg/config/logcontroller.go
+++ b/pkg/config/logcontroller.go
@@ -51,7 +51,7 @@
 
 func NewComponentLogController(cm *ConfigManager) (*ComponentLogController, error) {
 
-	log.Debug("creating-new-component-log-controller")
+	logger.Debug("creating-new-component-log-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
 		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
@@ -80,15 +80,15 @@
 func StartLogLevelConfigProcessing(cm *ConfigManager, ctx context.Context) {
 	cc, err := NewComponentLogController(cm)
 	if err != nil {
-		log.Errorw("unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
+		logger.Errorw("unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
 		return
 	}
 
 	cc.GlobalConfig = cm.InitComponentConfig(globalConfigRootNode, ConfigTypeLogLevel)
-	log.Debugw("global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
+	logger.Debugw("global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
 
 	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogLevel)
-	log.Debugw("component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+	logger.Debugw("component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
 
 	cc.persistInitialDefaultLogConfigs(ctx)
 
@@ -101,21 +101,21 @@
 
 	_, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		log.Debugw("failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw("failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.GlobalConfig.Save(ctx, defaultLogLevelKey, initialGlobalDefaultLogLevelValue)
 		if err != nil {
-			log.Errorw("failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
+			logger.Errorw("failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
 		}
 	}
 
 	_, err = c.componentNameConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		log.Debugw("failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw("failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.componentNameConfig.Save(ctx, defaultLogLevelKey, c.initialLogLevel)
 		if err != nil {
-			log.Errorw("failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
+			logger.Errorw("failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
 		}
 	}
 }
@@ -129,10 +129,10 @@
 	// Load and apply Log Config for first time
 	initialLogConfig, err := c.buildUpdatedLogConfig(ctx)
 	if err != nil {
-		log.Warnw("unable-to-load-log-config-at-startup", log.Fields{"error": err})
+		logger.Warnw("unable-to-load-log-config-at-startup", log.Fields{"error": err})
 	} else {
 		if err := c.loadAndApplyLogConfig(initialLogConfig); err != nil {
-			log.Warnw("unable-to-apply-log-config-at-startup", log.Fields{"error": err})
+			logger.Warnw("unable-to-apply-log-config-at-startup", log.Fields{"error": err})
 		}
 	}
 
@@ -148,18 +148,18 @@
 		case configEvent = <-componentConfigEventChan:
 
 		}
-		log.Debugw("processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+		logger.Debugw("processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
 
 		updatedLogConfig, err := c.buildUpdatedLogConfig(ctx)
 		if err != nil {
-			log.Warnw("unable-to-fetch-updated-log-config", log.Fields{"error": err})
+			logger.Warnw("unable-to-fetch-updated-log-config", log.Fields{"error": err})
 			continue
 		}
 
-		log.Debugw("applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
+		logger.Debugw("applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
 
 		if err := c.loadAndApplyLogConfig(updatedLogConfig); err != nil {
-			log.Warnw("unable-to-load-and-apply-log-config", log.Fields{"error": err})
+			logger.Warnw("unable-to-load-and-apply-log-config", log.Fields{"error": err})
 		}
 	}
 
@@ -178,7 +178,7 @@
 	for _, packageName := range log.GetPackageNames() {
 		level, err := log.GetPackageLogLevel(packageName)
 		if err != nil {
-			log.Warnw("unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
+			logger.Warnw("unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
 			continue
 		}
 
@@ -187,7 +187,7 @@
 		}
 	}
 
-	log.Debugw("retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
+	logger.Debugw("retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
 
 	return loglevels
 }
@@ -202,16 +202,16 @@
 	// Handle edge cases when global default loglevel is deleted directly from etcd or set to a invalid value
 	// We should use hard-coded initial default value in such cases
 	if globalDefaultLogLevel == "" {
-		log.Warn("global-default-loglevel-not-found-in-config-store")
+		logger.Warn("global-default-loglevel-not-found-in-config-store")
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
 	if _, err := log.StringToLogLevel(globalDefaultLogLevel); err != nil {
-		log.Warnw("unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
+		logger.Warnw("unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
-	log.Debugw("retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
+	logger.Debugw("retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
 
 	return globalDefaultLogLevel, nil
 }
@@ -225,7 +225,7 @@
 	effectiveDefaultLogLevel := ""
 	for logConfigKey, logConfigValue := range componentLogConfig {
 		if _, err := log.StringToLogLevel(logConfigValue); err != nil || logConfigKey == "" {
-			log.Warnw("unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
+			logger.Warnw("unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
 			delete(componentLogConfig, logConfigKey)
 		} else {
 			if logConfigKey == defaultLogLevelKey {
@@ -242,7 +242,7 @@
 
 	componentLogConfig[defaultLogLevelKey] = effectiveDefaultLogLevel
 
-	log.Debugw("retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
+	logger.Debugw("retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
 
 	return componentLogConfig, nil
 }
@@ -256,7 +256,7 @@
 func (c *ComponentLogController) buildUpdatedLogConfig(ctx context.Context) (map[string]string, error) {
 	globalLogLevel, err := c.getGlobalLogConfig(ctx)
 	if err != nil {
-		log.Errorw("unable-to-retrieve-global-log-config", log.Fields{"err": err})
+		logger.Errorw("unable-to-retrieve-global-log-config", log.Fields{"err": err})
 	}
 
 	componentLogConfig, err := c.getComponentLogConfig(ctx, globalLogLevel)
@@ -286,7 +286,7 @@
 		UpdateLogLevels(logConfig)
 		c.logHash = currentLogHash
 	} else {
-		log.Debug("effective-loglevel-config-same-as-currently-active")
+		logger.Debug("effective-loglevel-config-same-as-currently-active")
 	}
 
 	return nil
@@ -313,7 +313,7 @@
 	// Log warnings for all invalid packages for which log config has been set
 	for key, value := range updatedLogLevels {
 		if _, exist := activeLogLevels[key]; !exist {
-			log.Warnw("ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
+			logger.Warnw("ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
 		}
 	}
 
@@ -330,11 +330,11 @@
 
 	// If no changed log levels are found, just return. It may happen on configuration of a invalid package
 	if len(changedLogLevels) == 0 {
-		log.Debug("no-change-in-effective-loglevel-config")
+		logger.Debug("no-change-in-effective-loglevel-config")
 		return
 	}
 
-	log.Debugw("applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
+	logger.Debugw("applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
 	for key, level := range changedLogLevels {
 		if key == defaultLogLevelKey {
 			if l, err := log.StringToLogLevel(level); err == nil {
diff --git a/pkg/db/common.go b/pkg/db/common.go
index a5a79ae..1cf2e1c 100644
--- a/pkg/db/common.go
+++ b/pkg/db/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/db/kvstore/common.go b/pkg/db/kvstore/common.go
index 2d2a6a6..aa7aeb0 100644
--- a/pkg/db/kvstore/common.go
+++ b/pkg/db/kvstore/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kvstore"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/flows/common.go b/pkg/flows/common.go
index b4fe7ec..557de3f 100644
--- a/pkg/flows/common.go
+++ b/pkg/flows/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "flowsUtils"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "flowsUtils"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/grpc/common.go b/pkg/grpc/common.go
new file mode 100644
index 0000000..17eeeaf
--- /dev/null
+++ b/pkg/grpc/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "grpc"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/pkg/grpc/server.go b/pkg/grpc/server.go
index 33c9b76..43f2912 100644
--- a/pkg/grpc/server.go
+++ b/pkg/grpc/server.go
@@ -18,7 +18,6 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
@@ -100,19 +99,19 @@
 
 	lis, err := net.Listen("tcp", host)
 	if err != nil {
-		log.Fatalf("failed to listen: %v", err)
+		logger.Fatalf("failed to listen: %v", err)
 	}
 
 	if s.secure && s.GrpcSecurity != nil {
 		creds, err := credentials.NewServerTLSFromFile(s.CertFile, s.KeyFile)
 		if err != nil {
-			log.Fatalf("could not load TLS keys: %s", err)
+			logger.Fatalf("could not load TLS keys: %s", err)
 		}
 		s.gs = grpc.NewServer(grpc.Creds(creds),
 			withServerUnaryInterceptor(s))
 
 	} else {
-		log.Info("starting-insecure-grpc-server")
+		logger.Info("starting-insecure-grpc-server")
 		s.gs = grpc.NewServer(withServerUnaryInterceptor(s))
 	}
 
@@ -122,7 +121,7 @@
 	}
 
 	if err := s.gs.Serve(lis); err != nil {
-		log.Fatalf("failed to serve: %v\n", err)
+		logger.Fatalf("failed to serve: %v\n", err)
 	}
 }
 
@@ -145,7 +144,7 @@
 		handler grpc.UnaryHandler) (interface{}, error) {
 
 		if (s.probe != nil) && (!s.probe.IsReady()) {
-			log.Warnf("Grpc request received while not ready %v", req)
+			logger.Warnf("Grpc request received while not ready %v", req)
 			return nil, status.Error(codes.Unavailable, "system is not ready")
 		}
 
diff --git a/pkg/kafka/common.go b/pkg/kafka/common.go
index cb6acb2..149c150 100644
--- a/pkg/kafka/common.go
+++ b/pkg/kafka/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kafka"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/kafka/kafka_inter_container_library.go b/pkg/kafka/kafka_inter_container_library.go
index 5dbde9c..fc2334d 100644
--- a/pkg/kafka/kafka_inter_container_library.go
+++ b/pkg/kafka/kafka_inter_container_library.go
@@ -194,11 +194,11 @@
 	kp.kafkaClient.Stop()
 	err := kp.deleteAllTopicRequestHandlerChannelMap()
 	if err != nil {
-		log.Errorw("failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+		logger.Errorw("failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
 	}
 	err = kp.deleteAllTopicResponseChannelMap()
 	if err != nil {
-		log.Errorw("failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+		logger.Errorw("failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
 	}
 	kp.deleteAllTransactionIdToChannelMap()
 }
diff --git a/pkg/log/log.go b/pkg/log/log.go
index 47fa3fb..d0169bd 100644
--- a/pkg/log/log.go
+++ b/pkg/log/log.go
@@ -499,11 +499,11 @@
 }
 
 func getPackageLevelSugaredLogger() *zp.SugaredLogger {
-	pkgName, fileName, funcName, line := getCallerInfo()
+	pkgName, _, _, _ := getCallerInfo()
 	if _, exist := loggers[pkgName]; exist {
-		return loggers[pkgName].log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+		return loggers[pkgName].log
 	}
-	return defaultLogger.log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+	return defaultLogger.log
 }
 
 func getPackageLevelLogger() Logger {
diff --git a/pkg/mocks/common.go b/pkg/mocks/common.go
index 839ac3e..90612bb 100644
--- a/pkg/mocks/common.go
+++ b/pkg/mocks/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "mocks"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/ponresourcemanager/common.go b/pkg/ponresourcemanager/common.go
new file mode 100644
index 0000000..0f4339e
--- /dev/null
+++ b/pkg/ponresourcemanager/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ponresourcemanager
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "ponresourcemanager"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/pkg/ponresourcemanager/ponresourcemanager.go b/pkg/ponresourcemanager/ponresourcemanager.go
index 79fefc5..9854b2b 100755
--- a/pkg/ponresourcemanager/ponresourcemanager.go
+++ b/pkg/ponresourcemanager/ponresourcemanager.go
@@ -156,7 +156,7 @@
 }
 
 func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
-	log.Infow("kv-store-type", log.Fields{"store": storeType})
+	logger.Infow("kv-store-type", log.Fields{"store": storeType})
 	switch storeType {
 	case "consul":
 		return kvstore.NewConsulClient(address, timeout)
@@ -172,7 +172,7 @@
 	// issue between kv store and backend , core is not calling NewBackend directly
 	kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
 	if err != nil {
-		log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+		logger.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
 		return nil
 	}
 
@@ -205,18 +205,18 @@
 	PONMgr.Port = Port
 	PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port, false)
 	if PONMgr.KVStore == nil {
-		log.Error("KV Client initilization failed")
+		logger.Error("KV Client initilization failed")
 		return nil, errors.New("Failed to init KV client")
 	}
 	// init kv client to read from the config path
 	PONMgr.KVStoreForConfig = SetKVClient(Technology, Backend, Host, Port, true)
 	if PONMgr.KVStoreForConfig == nil {
-		log.Error("KV Config Client initilization failed")
+		logger.Error("KV Config Client initilization failed")
 		return nil, errors.New("Failed to init KV Config client")
 	}
 	// Initialize techprofile for this technology
 	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Host, Port); PONMgr.TechProfileMgr == nil {
-		log.Error("Techprofile initialization failed")
+		logger.Error("Techprofile initialization failed")
 		return nil, errors.New("Failed to init tech profile")
 	}
 	PONMgr.PonResourceRanges = make(map[string]interface{})
@@ -244,32 +244,32 @@
 	// Try to initialize the PON Resource Ranges from KV store based on the
 	// OLT model key, if available
 	if PONRMgr.OLTModel == "" {
-		log.Error("Failed to get OLT model")
+		logger.Error("Failed to get OLT model")
 		return false
 	}
 	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
 	//get resource from kv store
 	Result, err := PONRMgr.KVStore.Get(ctx, Path)
 	if err != nil {
-		log.Debugf("Error in fetching resource %s from KV strore", Path)
+		logger.Debugf("Error in fetching resource %s from KV strore", Path)
 		return false
 	}
 	if Result == nil {
-		log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+		logger.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
 		return false
 	}
 	//update internal ranges from kv ranges. If there are missing
 	// values in the KV profile, continue to use the defaults
 	Value, err := ToByte(Result.Value)
 	if err != nil {
-		log.Error("Failed to convert kvpair to byte string")
+		logger.Error("Failed to convert kvpair to byte string")
 		return false
 	}
 	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
-		log.Error("Failed to Unmarshal json byte")
+		logger.Error("Failed to Unmarshal json byte")
 		return false
 	}
-	log.Debug("Init resource ranges from kvstore success")
+	logger.Debug("Init resource ranges from kvstore success")
 	return true
 }
 
@@ -285,7 +285,7 @@
 	   param: shared pool id
 	   param: global resource manager
 	*/
-	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
+	logger.Debugf("update ranges for %s, %d", StartIDx, StartID)
 
 	if StartID != 0 {
 		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
@@ -344,7 +344,7 @@
 	PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
 	PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
 	PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
-	log.Debug("Initialize default range values")
+	logger.Debug("Initialize default range values")
 	var i uint32
 	if IntfIDs == nil {
 		for i = 0; i < NoOfPONPorts; i++ {
@@ -360,7 +360,7 @@
 
 	//Initialize resource pool for all PON ports.
 
-	log.Debug("Init resource ranges")
+	logger.Debug("Init resource ranges")
 
 	var err error
 	for _, Intf := range PONRMgr.IntfIDs {
@@ -371,7 +371,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ONU_ID,
 			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init ONU ID resource pool")
+			logger.Error("Failed to init ONU ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -387,7 +387,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ALLOC_ID,
 			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init ALLOC ID resource pool ")
+			logger.Error("Failed to init ALLOC ID resource pool ")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -402,7 +402,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, GEMPORT_ID,
 			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init GEMPORT ID resource pool")
+			logger.Error("Failed to init GEMPORT ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -418,7 +418,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, FLOW_ID,
 			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init FLOW ID resource pool")
+			logger.Error("Failed to init FLOW ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -432,7 +432,7 @@
 
 	//Clear resource pool for all PON ports.
 
-	log.Debug("Clear resource ranges")
+	logger.Debug("Clear resource ranges")
 
 	for _, Intf := range PONRMgr.IntfIDs {
 		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
@@ -440,7 +440,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ONU_ID); !status {
-			log.Error("Failed to clear ONU ID resource pool")
+			logger.Error("Failed to clear ONU ID resource pool")
 			return errors.New("Failed to clear ONU ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -454,7 +454,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ALLOC_ID); !status {
-			log.Error("Failed to clear ALLOC ID resource pool ")
+			logger.Error("Failed to clear ALLOC ID resource pool ")
 			return errors.New("Failed to clear ALLOC ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -467,7 +467,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, GEMPORT_ID); !status {
-			log.Error("Failed to clear GEMPORT ID resource pool")
+			logger.Error("Failed to clear GEMPORT ID resource pool")
 			return errors.New("Failed to clear GEMPORT ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -481,7 +481,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, FLOW_ID); !status {
-			log.Error("Failed to clear FLOW ID resource pool")
+			logger.Error("Failed to clear FLOW ID resource pool")
 			return errors.New("Failed to clear FLOW ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -510,7 +510,7 @@
 
 	Path := PONRMgr.GetPath(Intf, ResourceType)
 	if Path == "" {
-		log.Errorf("Failed to get path for resource type %s", ResourceType)
+		logger.Errorf("Failed to get path for resource type %s", ResourceType)
 		return fmt.Errorf("Failed to get path for resource type %s", ResourceType)
 	}
 
@@ -518,7 +518,7 @@
 	//checked for its presence if not kv store update happens
 	Res, err := PONRMgr.GetResource(ctx, Path)
 	if (err == nil) && (Res != nil) {
-		log.Debugf("Resource %s already present in store ", Path)
+		logger.Debugf("Resource %s already present in store ", Path)
 		return nil
 	} else {
 		var excluded []uint32
@@ -526,23 +526,23 @@
 			//get gem port ids defined in the KV store, if any, and exclude them from the gem port id pool
 			if reservedGemPortIds, defined := PONRMgr.getReservedGemPortIdsFromKVStore(ctx); defined {
 				excluded = reservedGemPortIds
-				log.Debugw("Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
+				logger.Debugw("Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
 			}
 		}
 		FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID, excluded)
 		if err != nil {
-			log.Errorf("Failed to format resource")
+			logger.Errorf("Failed to format resource")
 			return err
 		}
 		// Add resource as json in kv store.
 		err = PONRMgr.KVStore.Put(ctx, Path, FormatResult)
 		if err == nil {
-			log.Debug("Successfuly posted to kv store")
+			logger.Debug("Successfuly posted to kv store")
 			return err
 		}
 	}
 
-	log.Debug("Error initializing pool")
+	logger.Debug("Error initializing pool")
 
 	return err
 }
@@ -552,7 +552,7 @@
 	// read reserved gem ports from the config path
 	KvPair, err := PONRMgr.KVStoreForConfig.Get(ctx, RESERVED_GEMPORT_IDS_PATH)
 	if err != nil {
-		log.Errorw("Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
+		logger.Errorw("Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	if KvPair == nil || KvPair.Value == nil {
@@ -561,11 +561,11 @@
 	}
 	Val, err := kvstore.ToByte(KvPair.Value)
 	if err != nil {
-		log.Errorw("Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
+		logger.Errorw("Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	if err = json.Unmarshal(Val, &reservedGemPortIds); err != nil {
-		log.Errorw("Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
+		logger.Errorw("Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	return reservedGemPortIds, true
@@ -593,12 +593,12 @@
 	*/
 	var TSData *bitmap.Threadsafe
 	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
-		log.Error("Failed to create a bitmap")
+		logger.Error("Failed to create a bitmap")
 		return nil, errors.New("Failed to create bitmap")
 	}
 	for _, excludedID := range Excluded {
 		if excludedID < StartIDx || excludedID > EndIDx {
-			log.Warnf("Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
+			logger.Warnf("Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
 				StartIDx, EndIDx)
 			continue
 		}
@@ -608,7 +608,7 @@
 
 	Value, err := json.Marshal(Resource)
 	if err != nil {
-		log.Errorf("Failed to marshall resource")
+		logger.Errorf("Failed to marshall resource")
 		return nil, err
 	}
 	return Value, err
@@ -628,7 +628,7 @@
 
 	Resource, err := PONRMgr.KVStore.Get(ctx, Path)
 	if (err != nil) || (Resource == nil) {
-		log.Debugf("Resource  unavailable at %s", Path)
+		logger.Debugf("Resource  unavailable at %s", Path)
 		return nil, err
 	}
 
@@ -640,7 +640,7 @@
 	// decode resource fetched from backend store to dictionary
 	err = json.Unmarshal(Value, &Result)
 	if err != nil {
-		log.Error("Failed to decode resource")
+		logger.Error("Failed to decode resource")
 		return Result, err
 	}
 	/*
@@ -650,13 +650,13 @@
 	*/
 	Str, err = ToString(Result[POOL])
 	if err != nil {
-		log.Error("Failed to conver to kv pair to string")
+		logger.Error("Failed to conver to kv pair to string")
 		return Result, err
 	}
 	Decode64, _ := base64.StdEncoding.DecodeString(Str)
 	Result[POOL], err = ToByte(Decode64)
 	if err != nil {
-		log.Error("Failed to convert resource pool to byte")
+		logger.Error("Failed to convert resource pool to byte")
 		return Result, err
 	}
 
@@ -689,7 +689,7 @@
 	} else if ResourceType == FLOW_ID {
 		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
 	} else {
-		log.Error("Invalid resource pool identifier")
+		logger.Error("Invalid resource pool identifier")
 	}
 	return Path
 }
@@ -704,7 +704,7 @@
 	    alloc_id/gemport_id, onu_id or invalid type respectively
 	*/
 	if NumIDs < 1 {
-		log.Error("Invalid number of resources requested")
+		logger.Error("Invalid number of resources requested")
 		return nil, fmt.Errorf("Invalid number of resources requested %d", NumIDs)
 	}
 	// delegate to the master instance if sharing enabled across instances
@@ -713,34 +713,34 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
 	}
-	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+	logger.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
 
 	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
-		log.Errorf("Failed to get path for resource type %s", ResourceType)
+		logger.Errorf("Failed to get path for resource type %s", ResourceType)
 		return nil, fmt.Errorf("Failed to get path for resource type %s", ResourceType)
 	}
-	log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+	logger.Debugf("Get resource for type %s on path %s", ResourceType, Path)
 	var Result []uint32
 	var NextID uint32
 	Resource, err := PONRMgr.GetResource(ctx, Path)
 	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
 		if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-			log.Error("Failed to Generate ID")
+			logger.Error("Failed to Generate ID")
 			return Result, err
 		}
 		Result = append(Result, NextID)
 	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
 		if NumIDs == 1 {
 			if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-				log.Error("Failed to Generate ID")
+				logger.Error("Failed to Generate ID")
 				return Result, err
 			}
 			Result = append(Result, NextID)
 		} else {
 			for NumIDs > 0 {
 				if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-					log.Error("Failed to Generate ID")
+					logger.Error("Failed to Generate ID")
 					return Result, err
 				}
 				Result = append(Result, NextID)
@@ -748,13 +748,13 @@
 			}
 		}
 	} else {
-		log.Error("get resource failed")
+		logger.Error("get resource failed")
 		return Result, err
 	}
 
 	//Update resource in kv store
 	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
-		log.Errorf("Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return nil, fmt.Errorf("Failed to update resource %s", Path)
 	}
 	return Result, nil
@@ -780,11 +780,11 @@
 	   :return boolean: True if all IDs in given release_content release else False
 	*/
 	if !checkValidResourceType(ResourceType) {
-		log.Error("Invalid resource type")
+		logger.Error("Invalid resource type")
 		return false
 	}
 	if ReleaseContent == nil {
-		log.Debug("Nothing to release")
+		logger.Debug("Nothing to release")
 		return true
 	}
 	// delegate to the master instance if sharing enabled across instances
@@ -794,19 +794,19 @@
 	}
 	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
-		log.Error("Failed to get path")
+		logger.Error("Failed to get path")
 		return false
 	}
 	Resource, err := PONRMgr.GetResource(ctx, Path)
 	if err != nil {
-		log.Error("Failed to get resource")
+		logger.Error("Failed to get resource")
 		return false
 	}
 	for _, Val := range ReleaseContent {
 		PONRMgr.ReleaseID(Resource, Val)
 	}
 	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
-		log.Errorf("Free resource for %s failed", Path)
+		logger.Errorf("Free resource for %s failed", Path)
 		return false
 	}
 	return true
@@ -822,12 +822,12 @@
 	// TODO resource[POOL] = resource[POOL].bin
 	Value, err := json.Marshal(Resource)
 	if err != nil {
-		log.Error("failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 	err = PONRMgr.KVStore.Put(ctx, Path, Value)
 	if err != nil {
-		log.Error("failed to put data to kv store %s", Path)
+		logger.Error("failed to put data to kv store %s", Path)
 		return err
 	}
 	return nil
@@ -846,15 +846,15 @@
 	}
 	Path := PONRMgr.GetPath(contIntfID, ResourceType)
 	if Path == "" {
-		log.Error("Failed to get path")
+		logger.Error("Failed to get path")
 		return false
 	}
 
 	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
-		log.Errorf("Failed to delete resource %s", Path)
+		logger.Errorf("Failed to delete resource %s", Path)
 		return false
 	}
-	log.Debugf("Cleared resource %s", Path)
+	logger.Debugf("Cleared resource %s", Path)
 	return true
 }
 
@@ -868,7 +868,7 @@
 	var AllocIDs []byte
 	Result := PONRMgr.KVStore.Put(ctx, AllocIDPath, AllocIDs)
 	if Result != nil {
-		log.Error("Failed to update the KV store")
+		logger.Error("Failed to update the KV store")
 		return
 	}
 	// initialize pon_intf_onu_id tuple to gemport_ids map
@@ -876,7 +876,7 @@
 	var GEMPortIDs []byte
 	Result = PONRMgr.KVStore.Put(ctx, GEMPortIDPath, GEMPortIDs)
 	if Result != nil {
-		log.Error("Failed to update the KV store")
+		logger.Error("Failed to update the KV store")
 		return
 	}
 }
@@ -890,14 +890,14 @@
 	var err error
 	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
 	if err = PONRMgr.KVStore.Delete(ctx, AllocIDPath); err != nil {
-		log.Errorf("Failed to remove resource %s", AllocIDPath)
+		logger.Errorf("Failed to remove resource %s", AllocIDPath)
 		return false
 	}
 	// remove pon_intf_onu_id tuple to gemport_ids map
 	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
 	err = PONRMgr.KVStore.Delete(ctx, GEMPortIDPath)
 	if err != nil {
-		log.Errorf("Failed to remove resource %s", GEMPortIDPath)
+		logger.Errorf("Failed to remove resource %s", GEMPortIDPath)
 		return false
 	}
 
@@ -906,14 +906,14 @@
 		for _, Flow := range FlowIDs {
 			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
 			if err = PONRMgr.KVStore.Delete(ctx, FlowIDInfoPath); err != nil {
-				log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+				logger.Errorf("Failed to remove resource %s", FlowIDInfoPath)
 				return false
 			}
 		}
 	}
 
 	if err = PONRMgr.KVStore.Delete(ctx, FlowIDPath); err != nil {
-		log.Errorf("Failed to remove resource %s", FlowIDPath)
+		logger.Errorf("Failed to remove resource %s", FlowIDPath)
 		return false
 	}
 
@@ -934,11 +934,11 @@
 		if Value != nil {
 			Val, err := ToByte(Value.Value)
 			if err != nil {
-				log.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+				logger.Errorw("Failed to convert into byte array", log.Fields{"error": err})
 				return Data
 			}
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				log.Error("Failed to unmarshal", log.Fields{"error": err})
+				logger.Error("Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
@@ -954,19 +954,19 @@
 	*/
 
 	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	log.Debugf("Getting current gemports for %s", Path)
+	logger.Debugf("Getting current gemports for %s", Path)
 	var Data []uint32
 	Value, err := PONRMgr.KVStore.Get(ctx, Path)
 	if err == nil {
 		if Value != nil {
 			Val, _ := ToByte(Value.Value)
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
 	} else {
-		log.Errorf("Failed to get data from kvstore for %s", Path)
+		logger.Errorf("Failed to get data from kvstore for %s", Path)
 	}
 	return Data
 }
@@ -986,7 +986,7 @@
 		if Value != nil {
 			Val, _ := ToByte(Value.Value)
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				log.Error("Failed to unmarshal")
+				logger.Error("Failed to unmarshal")
 				return Data
 			}
 		}
@@ -1010,11 +1010,11 @@
 		if Value != nil {
 			Val, err := ToByte(Value.Value)
 			if err != nil {
-				log.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
+				logger.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
 				return err
 			}
 			if err = json.Unmarshal(Val, Data); err != nil {
-				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
 				return err
 			}
 		}
@@ -1031,7 +1031,7 @@
 	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
 
 	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
-		log.Errorf("Falied to remove resource %s", Path)
+		logger.Errorf("Falied to remove resource %s", Path)
 		return false
 	}
 	return true
@@ -1048,12 +1048,12 @@
 	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
 	Value, err = json.Marshal(AllocIDs)
 	if err != nil {
-		log.Error("failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1069,15 +1069,15 @@
 	var Value []byte
 	var err error
 	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	log.Debugf("Updating gemport ids for %s", Path)
+	logger.Debugf("Updating gemport ids for %s", Path)
 	Value, err = json.Marshal(GEMPortIDs)
 	if err != nil {
-		log.Error("failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1128,12 +1128,12 @@
 	}
 	Value, err = json.Marshal(FlowIDs)
 	if err != nil {
-		log.Error("Failed to Marshal")
+		logger.Error("Failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1152,12 +1152,12 @@
 	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
 	Value, err = json.Marshal(FlowData)
 	if err != nil {
-		log.Error("failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1171,12 +1171,12 @@
 	*/
 	ByteArray, err := ToByte(Resource[POOL])
 	if err != nil {
-		log.Error("Failed to convert resource to byte array")
+		logger.Error("Failed to convert resource to byte array")
 		return 0, err
 	}
 	Data := bitmap.TSFromData(ByteArray, false)
 	if Data == nil {
-		log.Error("Failed to get data from byte array")
+		logger.Error("Failed to get data from byte array")
 		return 0, errors.New("Failed to get data from byte array")
 	}
 
@@ -1190,7 +1190,7 @@
 	Data.Set(Idx, true)
 	res := uint32(Resource[START_IDX].(float64))
 	Resource[POOL] = Data.Data(false)
-	log.Debugf("Generated ID for %d", (uint32(Idx) + res))
+	logger.Debugf("Generated ID for %d", (uint32(Idx) + res))
 	return (uint32(Idx) + res), err
 }
 
@@ -1202,12 +1202,12 @@
 	*/
 	ByteArray, err := ToByte(Resource[POOL])
 	if err != nil {
-		log.Error("Failed to convert resource to byte array")
+		logger.Error("Failed to convert resource to byte array")
 		return false
 	}
 	Data := bitmap.TSFromData(ByteArray, false)
 	if Data == nil {
-		log.Error("Failed to get resource pool")
+		logger.Error("Failed to get resource pool")
 		return false
 	}
 	Idx := Id - uint32(Resource[START_IDX].(float64))
@@ -1224,7 +1224,7 @@
 func (PONRMgr *PONResourceManager) reserveID(TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
 	Data := bitmap.TSFromData(TSData.Data(false), false)
 	if Data == nil {
-		log.Error("Failed to get resource pool")
+		logger.Error("Failed to get resource pool")
 		return false
 	}
 	Idx := Id - StartIndex
@@ -1281,12 +1281,12 @@
 	Path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfID)
 	Value, err = json.Marshal(onuGemData)
 	if err != nil {
-		log.Error("failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1303,22 +1303,22 @@
 	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, IntfId)
 	value, err := PONRMgr.KVStore.Get(ctx, path)
 	if err != nil {
-		log.Errorw("Failed to get from kv store", log.Fields{"path": path})
+		logger.Errorw("Failed to get from kv store", log.Fields{"path": path})
 		return err
 	} else if value == nil {
-		log.Debug("No onuinfo for path", log.Fields{"path": path})
+		logger.Debug("No onuinfo for path", log.Fields{"path": path})
 		return nil // returning nil as this could happen if there are no onus for the interface yet
 	}
 	if Val, err = kvstore.ToByte(value.Value); err != nil {
-		log.Error("Failed to convert to byte array")
+		logger.Error("Failed to convert to byte array")
 		return err
 	}
 
 	if err = json.Unmarshal(Val, &onuGemInfo); err != nil {
-		log.Error("Failed to unmarshall")
+		logger.Error("Failed to unmarshall")
 		return err
 	}
-	log.Debugw("found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
+	logger.Debugw("found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
 	return err
 }
 
@@ -1330,7 +1330,7 @@
 
 	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfId)
 	if err := PONRMgr.KVStore.Delete(ctx, path); err != nil {
-		log.Errorf("Falied to remove resource %s", path)
+		logger.Errorf("Falied to remove resource %s", path)
 		return err
 	}
 	return nil
diff --git a/pkg/ponresourcemanager/ponresourcemanager_test.go b/pkg/ponresourcemanager/ponresourcemanager_test.go
index e4d8021..11c0072 100644
--- a/pkg/ponresourcemanager/ponresourcemanager_test.go
+++ b/pkg/ponresourcemanager/ponresourcemanager_test.go
@@ -33,13 +33,6 @@
 	RESERVED_GEM_PORT_ID = uint32(5)
 )
 
-func init() {
-	_, err := log.SetDefaultLogger(log.JSON, log.DebugLevel, nil)
-	if err != nil {
-		panic(err)
-	}
-}
-
 // MockKVClient mocks the AdapterProxy interface.
 type MockResKVClient struct {
 	resourceMap map[string]interface{}
@@ -58,16 +51,16 @@
 
 // Get mock function implementation for KVClient
 func (kvclient *MockResKVClient) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	log.Debugw("Get of MockKVClient called", log.Fields{"key": key})
+	logger.Debugw("Get of MockKVClient called", log.Fields{"key": key})
 	if key != "" {
 		if strings.Contains(key, RESERVED_GEMPORT_IDS_PATH) {
-			log.Debug("Getting Key:", RESERVED_GEMPORT_IDS_PATH)
+			logger.Debug("Getting Key:", RESERVED_GEMPORT_IDS_PATH)
 			reservedGemPorts := []uint32{RESERVED_GEM_PORT_ID}
 			str, _ := json.Marshal(reservedGemPorts)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, GEM_POOL_PATH) {
-			log.Debug("Getting Key:", GEM_POOL_PATH)
+			logger.Debug("Getting Key:", GEM_POOL_PATH)
 			resource := kvclient.resourceMap[key]
 			return kvstore.NewKVPair(key, resource, "mock", 3000, 1), nil
 		}
diff --git a/pkg/probe/common.go b/pkg/probe/common.go
new file mode 100644
index 0000000..211419d
--- /dev/null
+++ b/pkg/probe/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/pkg/probe/probe.go b/pkg/probe/probe.go
index 932c287..e89d5bc 100644
--- a/pkg/probe/probe.go
+++ b/pkg/probe/probe.go
@@ -118,7 +118,7 @@
 	for _, name := range names {
 		if _, ok := p.status[name]; !ok {
 			p.status[name] = ServiceStatusUnknown
-			log.Debugw("probe-service-registered", log.Fields{"service-name": name})
+			logger.Debugw("probe-service-registered", log.Fields{"service-name": name})
 		}
 	}
 
@@ -161,7 +161,7 @@
 	} else {
 		p.isHealthy = defaultHealthFunc(p.status)
 	}
-	log.Debugw("probe-service-status-updated",
+	logger.Debugw("probe-service-status-updated",
 		log.Fields{
 			"service-name": name,
 			"status":       status.String(),
@@ -232,21 +232,21 @@
 	defer p.mutex.RUnlock()
 	w.Header().Set("Content-Type", "application/json")
 	if _, err := w.Write([]byte("{")); err != nil {
-		log.Errorw("write-response", log.Fields{"error": err})
+		logger.Errorw("write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
 	comma := ""
 	for c, s := range p.status {
 		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
-			log.Errorw("write-response", log.Fields{"error": err})
+			logger.Errorw("write-response", log.Fields{"error": err})
 			w.WriteHeader(http.StatusInternalServerError)
 			return
 		}
 		comma = ", "
 	}
 	if _, err := w.Write([]byte("}")); err != nil {
-		log.Errorw("write-response", log.Fields{"error": err})
+		logger.Errorw("write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -269,7 +269,7 @@
 		Addr:    address,
 		Handler: mux,
 	}
-	log.Fatal(s.ListenAndServe())
+	logger.Fatal(s.ListenAndServe())
 }
 
 func (p *Probe) IsReady() bool {
diff --git a/pkg/probe/probe_test.go b/pkg/probe/probe_test.go
index 93f9a03..77cd6a8 100644
--- a/pkg/probe/probe_test.go
+++ b/pkg/probe/probe_test.go
@@ -18,22 +18,14 @@
 import (
 	"context"
 	"encoding/json"
+	"github.com/stretchr/testify/assert"
 	"io/ioutil"
 	"net/http"
 	"net/http/httptest"
 	"testing"
 	"time"
-
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	"github.com/stretchr/testify/assert"
 )
 
-func init() {
-	if _, err := log.AddPackage(log.JSON, log.WarnLevel, nil); err != nil {
-		log.Fatalw("adding-log-package", log.Fields{"error": err})
-	}
-}
-
 func TestServiceStatusString(t *testing.T) {
 	assert.Equal(t, "Unknown", ServiceStatusUnknown.String(), "ServiceStatusUnknown")
 	assert.Equal(t, "Preparing", ServiceStatusPreparing.String(), "ServiceStatusPreparing")
diff --git a/pkg/techprofile/common.go b/pkg/techprofile/common.go
new file mode 100644
index 0000000..42818f1
--- /dev/null
+++ b/pkg/techprofile/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "techprofile"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/pkg/techprofile/tech_profile.go b/pkg/techprofile/tech_profile.go
index ba8855f..2c6e67b 100644
--- a/pkg/techprofile/tech_profile.go
+++ b/pkg/techprofile/tech_profile.go
@@ -245,7 +245,7 @@
 	addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort)
 	kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout)
 	if err != nil {
-		log.Errorw("failed-to-create-kv-client",
+		logger.Errorw("failed-to-create-kv-client",
 			log.Fields{
 				"type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort,
 				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
@@ -270,7 +270,7 @@
 
 func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
 
-	log.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
+	logger.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
 	switch storeType {
 	case "consul":
 		return kvstore.NewConsulClient(address, timeout)
@@ -282,15 +282,15 @@
 
 func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreHost string, KVStorePort int) (*TechProfileMgr, error) {
 	var techprofileObj TechProfileMgr
-	log.Debug("Initializing techprofile Manager")
+	logger.Debug("Initializing techprofile Manager")
 	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreHost, KVStorePort)
 	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
 	if techprofileObj.config.KVBackend == nil {
-		log.Error("Failed to initialize KV backend\n")
+		logger.Error("Failed to initialize KV backend\n")
 		return nil, errors.New("KV backend init failed")
 	}
 	techprofileObj.resourceMgr = resourceMgr
-	log.Debug("Initializing techprofile object instance success")
+	logger.Debug("Initializing techprofile object instance success")
 	return &techprofileObj, nil
 }
 
@@ -306,12 +306,12 @@
 
 	kvResult, _ = t.config.KVBackend.Get(ctx, path)
 	if kvResult == nil {
-		log.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
+		logger.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
 		return nil, nil
 	} else {
 		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
 			if err = json.Unmarshal(value, resPtr); err != nil {
-				log.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+				logger.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
 				return nil, errors.New("error-unmarshal-kv-result")
 			} else {
 				return resPtr, nil
@@ -323,35 +323,35 @@
 
 func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
 	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	logger.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
 	tpInstanceJson, err := json.Marshal(*tpInstance)
 	if err == nil {
 		// Backend will convert JSON byte array into string format
-		log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		logger.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
 		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
 	} else {
-		log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+		logger.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
 	}
 	return err
 }
 func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
 	var kvtechprofile DefaultTechProfile
 	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	logger.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
 	kvresult, err := t.config.KVBackend.Get(ctx, key)
 	if err != nil {
-		log.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		logger.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
 		return nil
 	}
 	if kvresult != nil {
 		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
 		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
 			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				log.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				logger.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
 				return nil
 			}
 
-			log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			logger.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
 			return &kvtechprofile
 		}
 	}
@@ -360,36 +360,36 @@
 
 func (t *TechProfileMgr) CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error) {
 	var tpInstance *TechProfile
-	log.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+	logger.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
 
 	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
 	if !uniPortNameFormat.Match([]byte(uniPortName)) {
-		log.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		logger.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
 		return nil, errors.New("uni-port-name-not-confirming-to-format")
 	}
 
 	tp := t.getTPFromKVStore(ctx, techProfiletblID)
 	if tp != nil {
 		if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
-			log.Error("invalid-instance-ctrl-attr--using-default-tp")
+			logger.Error("invalid-instance-ctrl-attr--using-default-tp")
 			tp = t.getDefaultTechProfile()
 		} else {
-			log.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+			logger.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
 		}
 	} else {
-		log.Info("tp-not-found-on-kv--creating-default-tp")
+		logger.Info("tp-not-found-on-kv--creating-default-tp")
 		tp = t.getDefaultTechProfile()
 	}
 	tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
 	if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
-		log.Error("tp-intance-allocation-failed")
+		logger.Error("tp-intance-allocation-failed")
 		return nil, errors.New("tp-intance-allocation-failed")
 	}
 	if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
-		log.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+		logger.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
 		return nil, errors.New("error-adding-tp-to-kv-store")
 	}
-	log.Infow("tp-added-to-kv-store-successfully",
+	logger.Infow("tp-added-to-kv-store-successfully",
 		log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
 	return tpInstance, nil
 }
@@ -401,17 +401,17 @@
 
 func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
 	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
-		log.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		logger.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
 		return errors.New("invalid-onu-instance-ctl-attr")
 	}
 
 	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
-		log.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		logger.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
 		return errors.New("invalid-uni-instance-ctl-attr")
 	}
 
 	if instCtl.Uni == "multi-instance" {
-		log.Error("uni-multi-instance-tp-not-supported")
+		logger.Error("uni-multi-instance-tp-not-supported")
 		return errors.New("uni-multi-instance-tp-not-supported")
 	}
 
@@ -428,22 +428,22 @@
 	var gemPorts []uint32
 	var err error
 
-	log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+	logger.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
 
 	if tp.InstanceCtrl.Onu == "multi-instance" {
 		if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		}
 	} else { // "single-instance"
 		if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
-			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		} else if tpInst == nil {
 			// No "single-instance" tp found on one any uni port for the given TP ID
 			// Allocate a new TcontID or AllocID
 			if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-				log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 				return nil
 			}
 		} else {
@@ -451,12 +451,12 @@
 			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
 		}
 	}
-	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	logger.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	if gemPorts, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
-		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		logger.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
 	}
-	log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	logger.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
@@ -470,7 +470,7 @@
 				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 	}
 
-	log.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	logger.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
 	//put multicast and unicast downstream GEM port attributes in different lists first
 	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
 		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
@@ -559,10 +559,10 @@
 	for keyPath, kvPair := range kvPairs {
 		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 			if err = json.Unmarshal(value, &tpInst); err != nil {
-				log.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				logger.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
 				return nil, errors.New("error-unmarshal-kv-pair")
 			} else {
-				log.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				logger.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
 				return &tpInst, nil
 			}
 		}
@@ -576,7 +576,7 @@
 	var dsGemPortAttributeList []GemPortAttribute
 
 	for _, pbit := range t.config.DefaultPbits {
-		log.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
+		logger.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			GemPortAttribute{
 				MaxQueueSize:     defaultMaxQueueSize,
@@ -651,7 +651,7 @@
 	} else if paramType == "sched_policy" {
 		for key, val := range tp_pb.SchedulingPolicy_value {
 			if key == paramKey {
-				log.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+				logger.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
 				result = val
 			}
 		}
@@ -662,29 +662,29 @@
 			}
 		}
 	} else {
-		log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		logger.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
 		return -1
 	}
-	log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+	logger.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
 	return result
 }
 
 func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
 	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
-		log.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+		logger.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 	}
 
 	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
-		log.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+		logger.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 	}
 
 	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
-		log.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+		logger.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 	}
 
@@ -700,19 +700,19 @@
 
 	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
-		log.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+		logger.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 	}
 
 	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
-		log.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+		logger.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 	}
 
 	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
-		log.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+		logger.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 	}
 
@@ -752,13 +752,13 @@
 
 			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				log.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
 			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				log.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
@@ -773,7 +773,7 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		log.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	} else if Dir == tp_pb.Direction_DOWNSTREAM {
 		//downstream GEM ports
@@ -792,13 +792,13 @@
 
 			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				log.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
 			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				log.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
@@ -813,11 +813,11 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		log.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	}
 
-	log.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
+	logger.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
 	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
 }
 
@@ -851,7 +851,7 @@
 			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
 		})
 	}
-	log.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	logger.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
 	return mcastTrafficQueues
 }
 
@@ -875,7 +875,7 @@
 			for ICount := 2; ICount < NumPbitMaps; ICount++ {
 				if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
 					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
-						log.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID})
+						logger.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID})
 						return tp.UpstreamGemPortAttributeList[Count].GemportID
 					}
 				}
@@ -889,14 +889,14 @@
 			for ICount := 2; ICount < NumPbitMaps; ICount++ {
 				if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
 					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
-						log.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID})
+						logger.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID})
 						return tp.DownstreamGemPortAttributeList[Count].GemportID
 					}
 				}
 			}
 		}
 	}
-	log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+	logger.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
 	return 0
 }
 
@@ -910,7 +910,7 @@
 		for kvPath, kvPair := range kvPairs {
 			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 				if err = json.Unmarshal(value, &tp); err != nil {
-					log.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+					logger.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
 					continue
 				} else {
 					tpInstances = append(tpInstances, tp)