[VOL-2694] Use package specific logger instance in all log statements

Change-Id: Icf1cb5ade42e42179aed7731b767af2f52481e3d
diff --git a/db/model/base_test.go b/db/model/base_test.go
index 91fa89f..94bb185 100644
--- a/db/model/base_test.go
+++ b/db/model/base_test.go
@@ -20,17 +20,16 @@
 	"runtime/debug"
 	"sync"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 var callbackMutex sync.Mutex
 
 func commonChanCallback(ctx context.Context, args ...interface{}) interface{} {
-	log.Infof("Running common callback - arg count: %d", len(args))
+	logger.Infof("Running common callback - arg count: %d", len(args))
 
 	//for i := 0; i < len(args); i++ {
-	//	log.Infof("ARG %d : %+v", i, args[i])
+	//	logger.Infof("ARG %d : %+v", i, args[i])
 	//}
 
 	callbackMutex.Lock()
@@ -40,7 +39,7 @@
 
 	// Inform the caller that the callback was executed
 	if *execDoneChan != nil {
-		log.Infof("Sending completion indication - stack:%s", string(debug.Stack()))
+		logger.Infof("Sending completion indication - stack:%s", string(debug.Stack()))
 		close(*execDoneChan)
 		*execDoneChan = nil
 	}
@@ -49,16 +48,16 @@
 }
 
 func commonCallback2(ctx context.Context, args ...interface{}) interface{} {
-	log.Infof("Running common2 callback - arg count: %d %+v", len(args), args)
+	logger.Infof("Running common2 callback - arg count: %d %+v", len(args), args)
 
 	return nil
 }
 
 func commonCallbackFunc(ctx context.Context, args ...interface{}) interface{} {
-	log.Infof("Running common callback - arg count: %d", len(args))
+	logger.Infof("Running common callback - arg count: %d", len(args))
 
 	for i := 0; i < len(args); i++ {
-		log.Infof("ARG %d : %+v", i, args[i])
+		logger.Infof("ARG %d : %+v", i, args[i])
 	}
 	execStatusFunc := args[1].(func(bool))
 
@@ -71,14 +70,14 @@
 func firstCallback(ctx context.Context, args ...interface{}) interface{} {
 	name := args[0]
 	id := args[1]
-	log.Infof("Running first callback - name: %s, id: %s\n", name, id)
+	logger.Infof("Running first callback - name: %s, id: %s\n", name, id)
 	return nil
 }
 
 func secondCallback(ctx context.Context, args ...interface{}) interface{} {
 	name := args[0].(map[string]string)
 	id := args[1]
-	log.Infof("Running second callback - name: %s, id: %f\n", name["name"], id)
+	logger.Infof("Running second callback - name: %s, id: %f\n", name["name"], id)
 	// FIXME: the panic call seem to interfere with the logging mechanism
 	//panic("Generating a panic in second callback")
 	return nil
@@ -87,6 +86,6 @@
 func thirdCallback(ctx context.Context, args ...interface{}) interface{} {
 	name := args[0]
 	id := args[1].(*voltha.Device)
-	log.Infof("Running third callback - name: %+v, id: %s\n", name, id.Id)
+	logger.Infof("Running third callback - name: %+v, id: %s\n", name, id.Id)
 	return nil
 }
diff --git a/db/model/branch.go b/db/model/branch.go
index 675c45d..17d9ece 100644
--- a/db/model/branch.go
+++ b/db/model/branch.go
@@ -90,7 +90,7 @@
 	defer b.mutex.Unlock()
 
 	if b.Latest != nil {
-		log.Debugw("updating-latest-revision", log.Fields{"current": b.Latest.GetHash(), "new": latest.GetHash()})
+		logger.Debugw("updating-latest-revision", log.Fields{"current": b.Latest.GetHash(), "new": latest.GetHash()})
 
 		// Go through list of children names in current revision and new revision
 		// and then compare the resulting outputs to ensure that we have not lost any entries.
@@ -99,20 +99,20 @@
 			var previousNames, latestNames, missingNames []string
 
 			if previousNames = b.retrieveChildrenNames(b.Latest); len(previousNames) > 0 {
-				log.Debugw("children-of-previous-revision", log.Fields{"hash": b.Latest.GetHash(), "names": previousNames})
+				logger.Debugw("children-of-previous-revision", log.Fields{"hash": b.Latest.GetHash(), "names": previousNames})
 			}
 
 			if latestNames = b.retrieveChildrenNames(b.Latest); len(latestNames) > 0 {
-				log.Debugw("children-of-latest-revision", log.Fields{"hash": latest.GetHash(), "names": latestNames})
+				logger.Debugw("children-of-latest-revision", log.Fields{"hash": latest.GetHash(), "names": latestNames})
 			}
 
 			if missingNames = b.findMissingChildrenNames(previousNames, latestNames); len(missingNames) > 0 {
-				log.Debugw("children-missing-in-latest-revision", log.Fields{"hash": latest.GetHash(), "names": missingNames})
+				logger.Debugw("children-missing-in-latest-revision", log.Fields{"hash": latest.GetHash(), "names": missingNames})
 			}
 		}
 
 	} else {
-		log.Debugw("setting-latest-revision", log.Fields{"new": latest.GetHash()})
+		logger.Debugw("setting-latest-revision", log.Fields{"new": latest.GetHash()})
 	}
 
 	b.Latest = latest
diff --git a/db/model/child_type.go b/db/model/child_type.go
index a0e15ef..494c0ef 100644
--- a/db/model/child_type.go
+++ b/db/model/child_type.go
@@ -20,7 +20,6 @@
 	desc "github.com/golang/protobuf/descriptor"
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/protoc-gen-go/descriptor"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/opencord/voltha-protos/v3/go/common"
 	"reflect"
 	"strconv"
@@ -111,7 +110,7 @@
 							return uint64(i)
 						}
 					default:
-						log.Errorf("Key type not implemented - type: %s\n", keyType.(reflect.Type))
+						logger.Errorf("Key type not implemented - type: %s\n", keyType.(reflect.Type))
 					}
 				}
 
diff --git a/db/model/common.go b/db/model/common.go
new file mode 100644
index 0000000..a501c52
--- /dev/null
+++ b/db/model/common.go
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package model Common Logger initialization
+package model
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "model"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/db/model/data_revision.go b/db/model/data_revision.go
index a0d5fc4..0861d76 100644
--- a/db/model/data_revision.go
+++ b/db/model/data_revision.go
@@ -24,7 +24,6 @@
 	"reflect"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 // DataRevision stores the data associated to a revision along with its calculated checksum hash value
@@ -47,7 +46,7 @@
 
 	if IsProtoMessage(data) {
 		if pbdata, err := proto.Marshal(data.(proto.Message)); err != nil {
-			log.Debugf("problem to marshal protobuf data --> err: %s", err.Error())
+			logger.Debugf("problem to marshal protobuf data --> err: %s", err.Error())
 		} else {
 			buffer.Write(pbdata)
 			// To ensure uniqueness in case data is nil, also include data type
@@ -57,7 +56,7 @@
 	} else if reflect.ValueOf(data).IsValid() {
 		dataObj := reflect.New(reflect.TypeOf(data).Elem())
 		if json, err := json.Marshal(dataObj.Interface()); err != nil {
-			log.Debugf("problem to marshal data --> err: %s", err.Error())
+			logger.Debugf("problem to marshal data --> err: %s", err.Error())
 		} else {
 			buffer.Write(json)
 		}
diff --git a/db/model/event_bus.go b/db/model/event_bus.go
index 4295c2d..f3dfcb2 100644
--- a/db/model/event_bus.go
+++ b/db/model/event_bus.go
@@ -20,7 +20,6 @@
 	"encoding/json"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
@@ -57,7 +56,7 @@
 	data := args[2:]
 
 	if _, ok := ignoredCallbacks[eventType]; ok {
-		log.Debugf("ignoring event - type:%s, data:%+v", eventType, data)
+		logger.Debugf("ignoring event - type:%s, data:%+v", eventType, data)
 	}
 	var kind voltha.ConfigEventType_Types
 	switch eventType {
@@ -73,14 +72,14 @@
 	var err error
 	if IsProtoMessage(data) {
 		if msg, err = proto.Marshal(data[0].(proto.Message)); err != nil {
-			log.Debugf("problem marshalling proto data: %+v, err:%s", data[0], err.Error())
+			logger.Debugf("problem marshalling proto data: %+v, err:%s", data[0], err.Error())
 		}
 	} else if data[0] != nil {
 		if msg, err = json.Marshal(data[0]); err != nil {
-			log.Debugf("problem marshalling json data: %+v, err:%s", data[0], err.Error())
+			logger.Debugf("problem marshalling json data: %+v, err:%s", data[0], err.Error())
 		}
 	} else {
-		log.Debugf("no data to advertise : %+v", data[0])
+		logger.Debugf("no data to advertise : %+v", data[0])
 	}
 
 	event := voltha.ConfigEvent{
diff --git a/db/model/event_bus_client.go b/db/model/event_bus_client.go
index e25f246..93a64f9 100644
--- a/db/model/event_bus_client.go
+++ b/db/model/event_bus_client.go
@@ -17,7 +17,6 @@
 package model
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
@@ -32,5 +31,5 @@
 
 // Publish sends a event to the bus
 func (ebc *EventBusClient) Publish(topic string, event voltha.ConfigEvent) {
-	log.Debugf("publishing event:%+v, topic:%s\n", event, topic)
+	logger.Debugf("publishing event:%+v, topic:%s\n", event, topic)
 }
diff --git a/db/model/merge.go b/db/model/merge.go
index 5d46545..01e942b 100644
--- a/db/model/merge.go
+++ b/db/model/merge.go
@@ -93,7 +93,7 @@
 	mergeChildFunc func(Revision) Revision,
 	dryRun bool) (rev Revision, changes []ChangeTuple) {
 
-	log.Debugw("3-way-merge-request", log.Fields{"dryRun": dryRun})
+	logger.Debugw("3-way-merge-request", log.Fields{"dryRun": dryRun})
 
 	var configChanged bool
 	var revsToDiscard []Revision
@@ -102,7 +102,7 @@
 		configChanged = dstRev.GetConfig() != srcRev.GetConfig()
 	} else {
 		if dstRev.GetConfig().Hash != srcRev.GetConfig().Hash {
-			log.Error("config-collision")
+			logger.Error("config-collision")
 		}
 		configChanged = true
 	}
@@ -132,7 +132,7 @@
 		if field.Key == "" {
 			if revisionsAreEqual(dstList, forkList) {
 				if !revisionsAreEqual(srcList, forkList) {
-					log.Error("we should not be here")
+					logger.Error("we should not be here")
 				} else {
 					for _, rev := range srcList {
 						newChildren[fieldName] = append(newChildren[fieldName], mergeChildFunc(rev))
@@ -146,7 +146,7 @@
 				}
 			} else {
 				if !revisionsAreEqual(srcList, forkList) {
-					log.Error("cannot merge - single child node or un-keyed children list has changed")
+					logger.Error("cannot merge - single child node or un-keyed children list has changed")
 				}
 			}
 		} else {
@@ -198,7 +198,7 @@
 						if childDstRev.GetHash() == childSrcRev.GetHash() {
 							mergeChildFunc(childDstRev)
 						} else {
-							log.Error("conflict error - revision has been added is different")
+							logger.Error("conflict error - revision has been added is different")
 						}
 					} else {
 						newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
@@ -208,14 +208,14 @@
 				}
 				for key := range src.ChangedKeys {
 					if _, removed := dst.RemovedKeys[key]; removed {
-						log.Error("conflict error - revision has been removed")
+						logger.Error("conflict error - revision has been removed")
 					} else if _, changed := dst.ChangedKeys[key]; changed {
 						childDstRev := dstList[dst.KeyMap2[key]]
 						childSrcRev := srcList[src.KeyMap2[key]]
 						if childDstRev.GetHash() == childSrcRev.GetHash() {
 							mergeChildFunc(childSrcRev)
 						} else if childDstRev.GetConfig().Hash != childSrcRev.GetConfig().Hash {
-							log.Error("conflict error - revision has been changed and is different")
+							logger.Error("conflict error - revision has been changed and is different")
 						} else {
 							newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
 							newList[dst.KeyMap2[key]] = newRev
@@ -229,7 +229,7 @@
 				// TODO: how do i sort this map in reverse order?
 				for key := range src.RemovedKeys {
 					if _, changed := dst.ChangedKeys[key]; changed {
-						log.Error("conflict error - revision has changed")
+						logger.Error("conflict error - revision has changed")
 					}
 					if _, removed := dst.RemovedKeys[key]; !removed {
 						dstIdx := dst.KeyMap2[key]
diff --git a/db/model/model.go b/db/model/model.go
index f80f957..0453122 100644
--- a/db/model/model.go
+++ b/db/model/model.go
@@ -16,16 +16,6 @@
 
 package model
 
-import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-)
-
-func init() {
-	if _, err := log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"}); err != nil {
-		log.Errorw("Unable to register package to the log map", log.Fields{"error": err})
-	}
-}
-
 type contextKey string
 
 const (
diff --git a/db/model/node.go b/db/model/node.go
index 152bf29..947cfc7 100644
--- a/db/model/node.go
+++ b/db/model/node.go
@@ -99,7 +99,7 @@
 		n.Type = reflect.ValueOf(initialData).Interface()
 	} else {
 		// not implemented error
-		log.Errorf("cannot process initial data - %+v", initialData)
+		logger.Errorf("cannot process initial data - %+v", initialData)
 	}
 
 	return n
@@ -128,7 +128,7 @@
 	// If anything is new, then set the revision as the latest
 	if branch.GetLatest() == nil || revision.GetHash() != branch.GetLatest().GetHash() {
 		if revision.GetName() != "" {
-			log.Debugw("saving-latest-data", log.Fields{"hash": revision.GetHash(), "data": revision.GetData()})
+			logger.Debugw("saving-latest-data", log.Fields{"hash": revision.GetHash(), "data": revision.GetData()})
 			// Tag a timestamp to that revision
 			revision.SetLastUpdate()
 			getRevCache().Set(revision.GetName(), revision)
@@ -144,7 +144,7 @@
 	if changeAnnouncement != nil && branch.Txid == "" {
 		if n.Proxy != nil {
 			for _, change := range changeAnnouncement {
-				log.Debugw("adding-callback",
+				logger.Debugw("adding-callback",
 					log.Fields{
 						"callbacks":    n.GetProxy().getCallbacks(change.Type),
 						"type":         change.Type,
@@ -197,7 +197,7 @@
 						//_, key := GetAttributeValue(v.Interface(), field.Key, 0)
 						//for _, k := range keysSeen {
 						//	if k == key.String() {
-						//		//log.Errorf("duplicate key - %s", k)
+						//		//logger.Errorf("duplicate key - %s", k)
 						//	}
 						//}
 						//keysSeen = append(keysSeen, key.String())
@@ -217,7 +217,7 @@
 				}
 			}
 		} else {
-			log.Errorf("field is invalid - %+v", fieldValue)
+			logger.Errorf("field is invalid - %+v", fieldValue)
 		}
 	}
 
@@ -255,7 +255,7 @@
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
-	log.Debugw("node-list-request", log.Fields{"path": path, "hash": hash, "depth": depth, "deep": deep, "txid": txid})
+	logger.Debugw("node-list-request", log.Fields{"path": path, "hash": hash, "depth": depth, "deep": deep, "txid": txid})
 
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
@@ -279,7 +279,7 @@
 
 	pr, err := rev.LoadFromPersistence(ctx, path, txid, nil)
 	if err != nil {
-		log.Errorf("failed-to-load-from-persistence")
+		logger.Errorf("failed-to-load-from-persistence")
 		return nil, err
 	}
 	if pr != nil {
@@ -296,7 +296,7 @@
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
-	log.Debugw("node-get-request", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
+	logger.Debugw("node-get-request", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
 
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
@@ -326,19 +326,19 @@
 		if entry, exists := getRevCache().Get(path); exists && entry.(Revision) != nil {
 			entryAge := time.Since(entry.(Revision).GetLastUpdate()).Nanoseconds() / int64(time.Millisecond)
 			if entryAge < DataRefreshPeriod {
-				log.Debugw("using-cache-entry", log.Fields{
+				logger.Debugw("using-cache-entry", log.Fields{
 					"path": path,
 					"hash": hash,
 					"age":  entryAge,
 				})
 				return proto.Clone(entry.(Revision).GetData().(proto.Message)), nil
 			}
-			log.Debugw("cache-entry-expired", log.Fields{"path": path, "hash": hash, "age": entryAge})
+			logger.Debugw("cache-entry-expired", log.Fields{"path": path, "hash": hash, "age": entryAge})
 		} else if result = n.getPath(ctx, rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
-			log.Debugw("using-rev-tree-entry", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
+			logger.Debugw("using-rev-tree-entry", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
 			return result, nil
 		} else {
-			log.Debugw("not-using-cache-entry", log.Fields{
+			logger.Debugw("not-using-cache-entry", log.Fields{
 				"path": path,
 				"hash": hash, "depth": depth,
 				"reconcile": reconcile,
@@ -346,7 +346,7 @@
 			})
 		}
 	} else {
-		log.Debugw("reconcile-requested", log.Fields{
+		logger.Debugw("reconcile-requested", log.Fields{
 			"path":      path,
 			"hash":      hash,
 			"reconcile": reconcile,
@@ -357,7 +357,7 @@
 	// or we simply failed at getting information from memory
 	if n.Root.KvStore != nil {
 		if pr, err := rev.LoadFromPersistence(ctx, path, txid, nil); err != nil {
-			log.Errorf("failed-to-load-from-persistence")
+			logger.Errorf("failed-to-load-from-persistence")
 			return nil, err
 		} else if len(pr) > 0 {
 			// Did we receive a single or multiple revisions?
@@ -444,7 +444,7 @@
 	var modifiedMsg interface{}
 
 	if n.GetProxy() != nil {
-		log.Debugw("invoking-get-callbacks", log.Fields{"data": msg})
+		logger.Debugw("invoking-get-callbacks", log.Fields{"data": msg})
 		if modifiedMsg = n.GetProxy().InvokeCallbacks(ctx, Get, false, msg); modifiedMsg != nil {
 			msg = modifiedMsg
 		}
@@ -459,7 +459,7 @@
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
-	log.Debugw("node-update-request", log.Fields{"path": path, "strict": strict, "txid": txid})
+	logger.Debugw("node-update-request", log.Fields{"path": path, "strict": strict, "txid": txid})
 
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
@@ -473,7 +473,7 @@
 	}
 
 	if branch.GetLatest() != nil {
-		log.Debugf("Branch data : %+v, Passed data: %+v", branch.GetLatest().GetData(), data)
+		logger.Debugf("Branch data : %+v, Passed data: %+v", branch.GetLatest().GetData(), data)
 	}
 	if path == "" {
 		return n.doUpdate(ctx, branch, data, strict)
@@ -499,7 +499,7 @@
 
 	if field.IsContainer {
 		if path == "" {
-			log.Errorf("cannot update a list")
+			logger.Errorf("cannot update a list")
 		} else if field.Key != "" {
 			partition := strings.SplitN(path, "/", 2)
 			key := partition[0]
@@ -516,7 +516,7 @@
 			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
 
 			if childRev == nil {
-				log.Debugw("child-revision-is-nil", log.Fields{"key": keyValue})
+				logger.Debugw("child-revision-is-nil", log.Fields{"key": keyValue})
 				return branch.GetLatest()
 			}
 
@@ -532,10 +532,10 @@
 
 			if newChildRev.GetHash() == childRev.GetHash() {
 				if newChildRev != childRev {
-					log.Debug("clear-hash - %s %+v", newChildRev.GetHash(), newChildRev)
+					logger.Debug("clear-hash - %s %+v", newChildRev.GetHash(), newChildRev)
 					newChildRev.ClearHash()
 				}
-				log.Debugw("child-revisions-have-matching-hash", log.Fields{"hash": childRev.GetHash(), "key": keyValue})
+				logger.Debugw("child-revisions-have-matching-hash", log.Fields{"hash": childRev.GetHash(), "key": keyValue})
 				return branch.GetLatest()
 			}
 
@@ -545,7 +545,7 @@
 			_keyValueType := fmt.Sprintf("%s", keyValue)
 
 			if _newKeyType != _keyValueType {
-				log.Errorf("cannot change key field")
+				logger.Errorf("cannot change key field")
 			}
 
 			// Prefix the hash value with the data type (e.g. devices, logical_devices, adapters)
@@ -568,7 +568,7 @@
 			return newChildRev
 
 		} else {
-			log.Errorf("cannot index into container with no keys")
+			logger.Errorf("cannot index into container with no keys")
 		}
 	} else {
 		childRev := rev.GetChildren(name)[0]
@@ -590,22 +590,22 @@
 }
 
 func (n *node) doUpdate(ctx context.Context, branch *Branch, data interface{}, strict bool) Revision {
-	log.Debugw("comparing-types", log.Fields{"expected": reflect.ValueOf(n.Type).Type(), "actual": reflect.TypeOf(data)})
+	logger.Debugw("comparing-types", log.Fields{"expected": reflect.ValueOf(n.Type).Type(), "actual": reflect.TypeOf(data)})
 
 	if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
 		// TODO raise error
-		log.Errorw("types-do-not-match: %+v", log.Fields{"actual": reflect.TypeOf(data), "expected": n.Type})
+		logger.Errorw("types-do-not-match: %+v", log.Fields{"actual": reflect.TypeOf(data), "expected": n.Type})
 		return nil
 	}
 
 	if n.GetProxy() != nil {
-		log.Debug("invoking proxy PreUpdate Callbacks")
+		logger.Debug("invoking proxy PreUpdate Callbacks")
 		n.GetProxy().InvokeCallbacks(ctx, PreUpdate, false, branch.GetLatest(), data)
 	}
 
 	if strict {
 		// TODO: checkAccessViolations(data, Branch.GetLatest.data)
-		log.Warn("access-violations-not-supported")
+		logger.Warn("access-violations-not-supported")
 	}
 
 	// The way the model is used, this function is only invoked upon data change.  Therefore, to also
@@ -622,14 +622,14 @@
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
-	log.Debugw("node-add-request", log.Fields{"path": path, "txid": txid})
+	logger.Debugw("node-add-request", log.Fields{"path": path, "txid": txid})
 
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
 	}
 	if path == "" {
 		// TODO raise error
-		log.Errorf("cannot add for non-container mode")
+		logger.Errorf("cannot add for non-container mode")
 		return nil
 	}
 
@@ -659,7 +659,7 @@
 		if path == "" {
 			if field.Key != "" {
 				if n.GetProxy() != nil {
-					log.Debug("invoking proxy PreAdd Callbacks")
+					logger.Debug("invoking proxy PreAdd Callbacks")
 					n.GetProxy().InvokeCallbacks(ctx, PreAdd, false, data)
 				}
 
@@ -670,7 +670,7 @@
 
 				if _, exists := n.findRevByKey(children, field.Key, key.String()); exists != nil {
 					// TODO raise error
-					log.Warnw("duplicate-key-found", log.Fields{"key": key.String()})
+					logger.Warnw("duplicate-key-found", log.Fields{"key": key.String()})
 					return exists
 				}
 				childRev := n.MakeNode(data, "").Latest()
@@ -690,7 +690,7 @@
 
 				return childRev
 			}
-			log.Errorf("cannot add to non-keyed container")
+			logger.Errorf("cannot add to non-keyed container")
 
 		} else if field.Key != "" {
 			partition := strings.SplitN(path, "/", 2)
@@ -733,10 +733,10 @@
 
 			return newChildRev
 		} else {
-			log.Errorf("cannot add to non-keyed container")
+			logger.Errorf("cannot add to non-keyed container")
 		}
 	} else {
-		log.Errorf("cannot add to non-container field")
+		logger.Errorf("cannot add to non-container field")
 	}
 
 	return nil
@@ -747,14 +747,14 @@
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
-	log.Debugw("node-remove-request", log.Fields{"path": path, "txid": txid, "makeBranch": makeBranch})
+	logger.Debugw("node-remove-request", log.Fields{"path": path, "txid": txid, "makeBranch": makeBranch})
 
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
 	}
 	if path == "" {
 		// TODO raise error
-		log.Errorf("cannot remove for non-container mode")
+		logger.Errorf("cannot remove for non-container mode")
 	}
 	var branch *Branch
 	if txid == "" {
@@ -779,7 +779,7 @@
 
 	if field.IsContainer {
 		if path == "" {
-			log.Errorw("cannot-remove-without-key", log.Fields{"name": name, "key": path})
+			logger.Errorw("cannot-remove-without-key", log.Fields{"name": name, "key": path})
 		} else if field.Key != "" {
 			partition := strings.SplitN(path, "/", 2)
 			key := partition[0]
@@ -841,12 +841,12 @@
 
 				return rev
 			}
-			log.Errorw("failed-to-find-revision", log.Fields{"name": name, "key": keyValue.(string)})
+			logger.Errorw("failed-to-find-revision", log.Fields{"name": name, "key": keyValue.(string)})
 		}
-		log.Errorw("cannot-add-to-non-keyed-container", log.Fields{"name": name, "path": path, "fieldKey": field.Key})
+		logger.Errorw("cannot-add-to-non-keyed-container", log.Fields{"name": name, "path": path, "fieldKey": field.Key})
 
 	} else {
-		log.Errorw("cannot-add-to-non-container-field", log.Fields{"name": name, "path": path})
+		logger.Errorw("cannot-add-to-non-container-field", log.Fields{"name": name, "path": path})
 	}
 
 	return nil
@@ -912,7 +912,7 @@
 }
 
 func (n *node) createProxy(ctx context.Context, path string, fullPath string, parentNode *node, exclusive bool) (*Proxy, error) {
-	log.Debugw("node-create-proxy", log.Fields{
+	logger.Debugw("node-create-proxy", log.Fields{
 		"node-type":        reflect.ValueOf(n.Type).Type(),
 		"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 		"path":             path,
@@ -942,14 +942,14 @@
 
 	if field != nil {
 		if field.IsContainer {
-			log.Debugw("container-field", log.Fields{
+			logger.Debugw("container-field", log.Fields{
 				"node-type":        reflect.ValueOf(n.Type).Type(),
 				"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 				"path":             path,
 				"name":             name,
 			})
 			if path == "" {
-				log.Debugw("folder-proxy", log.Fields{
+				logger.Debugw("folder-proxy", log.Fields{
 					"node-type":        reflect.ValueOf(n.Type).Type(),
 					"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 					"fullPath":         fullPath,
@@ -958,7 +958,7 @@
 				newNode := n.MakeNode(reflect.New(field.ClassType.Elem()).Interface(), "")
 				return newNode.makeProxy(path, fullPath, parentNode, exclusive), nil
 			} else if field.Key != "" {
-				log.Debugw("key-proxy", log.Fields{
+				logger.Debugw("key-proxy", log.Fields{
 					"node-type":        reflect.ValueOf(n.Type).Type(),
 					"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 					"fullPath":         fullPath,
@@ -977,17 +977,17 @@
 
 				var childRev Revision
 				if _, childRev = n.findRevByKey(children, field.Key, keyValue); childRev != nil {
-					log.Debugw("found-revision-matching-key-in-memory", log.Fields{
+					logger.Debugw("found-revision-matching-key-in-memory", log.Fields{
 						"node-type":        reflect.ValueOf(n.Type).Type(),
 						"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 						"fullPath":         fullPath,
 						"name":             name,
 					})
 				} else if revs, err := n.GetBranch(NONE).GetLatest().LoadFromPersistence(ctx, fullPath, "", nil); err != nil {
-					log.Errorf("failed-to-load-from-persistence")
+					logger.Errorf("failed-to-load-from-persistence")
 					return nil, err
 				} else if len(revs) > 0 {
-					log.Debugw("found-revision-matching-key-in-db", log.Fields{
+					logger.Debugw("found-revision-matching-key-in-db", log.Fields{
 						"node-type":        reflect.ValueOf(n.Type).Type(),
 						"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 						"fullPath":         fullPath,
@@ -995,7 +995,7 @@
 					})
 					childRev = revs[0]
 				} else {
-					log.Debugw("no-revision-matching-key", log.Fields{
+					logger.Debugw("no-revision-matching-key", log.Fields{
 						"node-type":        reflect.ValueOf(n.Type).Type(),
 						"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 						"fullPath":         fullPath,
@@ -1007,7 +1007,7 @@
 					return childNode.createProxy(ctx, path, fullPath, n, exclusive)
 				}
 			} else {
-				log.Errorw("cannot-access-index-of-empty-container", log.Fields{
+				logger.Errorw("cannot-access-index-of-empty-container", log.Fields{
 					"node-type":        reflect.ValueOf(n.Type).Type(),
 					"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 					"path":             path,
@@ -1015,7 +1015,7 @@
 				})
 			}
 		} else {
-			log.Debugw("non-container-field", log.Fields{
+			logger.Debugw("non-container-field", log.Fields{
 				"node-type":        reflect.ValueOf(n.Type).Type(),
 				"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 				"path":             path,
@@ -1026,7 +1026,7 @@
 			return childNode.createProxy(ctx, path, fullPath, n, exclusive)
 		}
 	} else {
-		log.Debugw("field-object-is-nil", log.Fields{
+		logger.Debugw("field-object-is-nil", log.Fields{
 			"node-type":        reflect.ValueOf(n.Type).Type(),
 			"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 			"fullPath":         fullPath,
@@ -1034,7 +1034,7 @@
 		})
 	}
 
-	log.Warnw("cannot-create-proxy", log.Fields{
+	logger.Warnw("cannot-create-proxy", log.Fields{
 		"node-type":        reflect.ValueOf(n.Type).Type(),
 		"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 		"path":             path,
@@ -1045,7 +1045,7 @@
 }
 
 func (n *node) makeProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
-	log.Debugw("node-make-proxy", log.Fields{
+	logger.Debugw("node-make-proxy", log.Fields{
 		"node-type":        reflect.ValueOf(n.Type).Type(),
 		"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 		"path":             path,
@@ -1063,7 +1063,7 @@
 	}
 
 	if n.Proxy == nil {
-		log.Debugw("constructing-new-proxy", log.Fields{
+		logger.Debugw("constructing-new-proxy", log.Fields{
 			"node-type":        reflect.ValueOf(n.Type).Type(),
 			"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
 			"path":             path,
@@ -1071,14 +1071,14 @@
 		})
 		n.Proxy = NewProxy(r, n, parentNode, path, fullPath, exclusive)
 	} else {
-		log.Debugw("node-has-existing-proxy", log.Fields{
+		logger.Debugw("node-has-existing-proxy", log.Fields{
 			"node-type":        reflect.ValueOf(n.GetProxy().Node.Type).Type(),
 			"parent-node-type": reflect.ValueOf(n.GetProxy().ParentNode.Type).Type(),
 			"path":             n.GetProxy().Path,
 			"fullPath":         n.GetProxy().FullPath,
 		})
 		if n.GetProxy().Exclusive {
-			log.Error("node is already owned exclusively")
+			logger.Error("node is already owned exclusively")
 		}
 	}
 
diff --git a/db/model/non_persisted_revision.go b/db/model/non_persisted_revision.go
index a8073c7..3bc888d 100644
--- a/db/model/non_persisted_revision.go
+++ b/db/model/non_persisted_revision.go
@@ -305,7 +305,7 @@
 	npr.mutex.Lock()
 	defer npr.mutex.Unlock()
 
-	log.Debugw("update-data", log.Fields{"hash": npr.GetHash(), "current": npr.Config.Data, "provided": data})
+	logger.Debugw("update-data", log.Fields{"hash": npr.GetHash(), "current": npr.Config.Data, "provided": data})
 
 	// Construct a new revision based on the current one
 	newRev := NonPersistedRevision{}
@@ -323,7 +323,7 @@
 
 	newRev.Finalize(ctx, false)
 
-	log.Debugw("update-data-complete", log.Fields{"updated": newRev.Config.Data, "provided": data})
+	logger.Debugw("update-data-complete", log.Fields{"updated": newRev.Config.Data, "provided": data})
 
 	return &newRev
 }
@@ -370,7 +370,7 @@
 			}
 		}
 
-		log.Debugw("existing-children-names", log.Fields{"hash": npr.GetHash(), "names": existingNames})
+		logger.Debugw("existing-children-names", log.Fields{"hash": npr.GetHash(), "names": existingNames})
 
 		// Merge existing and new children
 		for _, newChild := range children {
@@ -383,7 +383,7 @@
 				newChild.getNode().SetRoot(existingChildren[nameIndex].getNode().GetRoot())
 				updatedChildren = append(updatedChildren, newChild)
 			} else {
-				log.Debugw("adding-unknown-child", log.Fields{
+				logger.Debugw("adding-unknown-child", log.Fields{
 					"hash": newChild.GetHash(),
 					"data": newChild.GetData(),
 				})
@@ -401,7 +401,7 @@
 			updatedNames[updatedChild.GetName()] = i
 		}
 
-		log.Debugw("updated-children-names", log.Fields{"hash": npr.GetHash(), "names": updatedNames})
+		logger.Debugw("updated-children-names", log.Fields{"hash": npr.GetHash(), "names": updatedNames})
 
 	} else {
 		// There are no children available, just save the provided ones
@@ -436,7 +436,7 @@
 
 // Drop is used to indicate when a revision is no longer required
 func (npr *NonPersistedRevision) Drop(txid string, includeConfig bool) {
-	log.Debugw("dropping-revision", log.Fields{"hash": npr.GetHash(), "name": npr.GetName()})
+	logger.Debugw("dropping-revision", log.Fields{"hash": npr.GetHash(), "name": npr.GetName()})
 }
 
 // ChildDrop will remove a child entry matching the provided parameters from the current revision
diff --git a/db/model/persisted_revision.go b/db/model/persisted_revision.go
index 15e438c..822b8b2 100644
--- a/db/model/persisted_revision.go
+++ b/db/model/persisted_revision.go
@@ -74,20 +74,20 @@
 		return
 	}
 
-	log.Debugw("ready-to-store-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
+	logger.Debugw("ready-to-store-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
 
 	// clone the revision data to avoid any race conditions with processes
 	// accessing the same data
 	cloned := proto.Clone(pr.GetConfig().Data.(proto.Message))
 
 	if blob, err := proto.Marshal(cloned); err != nil {
-		log.Errorw("problem-to-marshal", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
+		logger.Errorw("problem-to-marshal", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
 	} else {
 		if pr.Compress {
 			var b bytes.Buffer
 			w := gzip.NewWriter(&b)
 			if _, err := w.Write(blob); err != nil {
-				log.Errorw("Unable to write a compressed form of p to the underlying io.Writer.", log.Fields{"error": err})
+				logger.Errorw("Unable to write a compressed form of p to the underlying io.Writer.", log.Fields{"error": err})
 			}
 			w.Close()
 			blob = b.Bytes()
@@ -95,9 +95,9 @@
 
 		getRevCache().Set(pr.GetName(), pr)
 		if err := pr.kvStore.Put(ctx, pr.GetName(), blob); err != nil {
-			log.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			logger.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
 		} else {
-			log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data, "version": pr.getVersion()})
+			logger.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data, "version": pr.getVersion()})
 			pr.isStored = true
 		}
 	}
@@ -105,7 +105,7 @@
 
 // UpdateData modifies the information in the data model and saves it in the persistent storage
 func (pr *PersistedRevision) UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision {
-	log.Debugw("updating-persisted-data", log.Fields{"hash": pr.GetHash()})
+	logger.Debugw("updating-persisted-data", log.Fields{"hash": pr.GetHash()})
 
 	newNPR := pr.Revision.UpdateData(ctx, data, branch)
 
@@ -130,7 +130,7 @@
 
 // UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
 func (pr *PersistedRevision) UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision {
-	log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
+	logger.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
 
 	newNPR := pr.Revision.UpdateChildren(ctx, name, children, branch)
 
@@ -154,7 +154,7 @@
 
 // UpdateAllChildren modifies the children for all components of a revision and saves it in the peristent storage
 func (pr *PersistedRevision) UpdateAllChildren(ctx context.Context, children map[string][]Revision, branch *Branch) Revision {
-	log.Debugw("updating-all-persisted-children", log.Fields{"hash": pr.GetHash()})
+	logger.Debugw("updating-all-persisted-children", log.Fields{"hash": pr.GetHash()})
 
 	newNPR := pr.Revision.UpdateAllChildren(ctx, children, branch)
 
@@ -185,21 +185,21 @@
 // StorageDrop takes care of eliminating a revision hash that is no longer needed
 // and its associated config when required
 func (pr *PersistedRevision) StorageDrop(ctx context.Context, txid string, includeConfig bool) {
-	log.Debugw("dropping-revision", log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "key": pr.GetName(), "isStored": pr.isStored})
+	logger.Debugw("dropping-revision", log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "key": pr.GetName(), "isStored": pr.isStored})
 
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
 	if pr.kvStore != nil && txid == "" {
 		if err := pr.kvStore.Delete(ctx, pr.GetName()); err != nil {
-			log.Errorw("failed-to-remove-revision", log.Fields{"hash": pr.GetHash(), "error": err.Error()})
+			logger.Errorw("failed-to-remove-revision", log.Fields{"hash": pr.GetHash(), "error": err.Error()})
 		} else {
 			pr.isStored = false
 		}
 	} else {
 		if includeConfig {
-			log.Debugw("attempted-to-remove-transacted-revision-config", log.Fields{"hash": pr.GetConfig().Hash, "txid": txid})
+			logger.Debugw("attempted-to-remove-transacted-revision-config", log.Fields{"hash": pr.GetConfig().Hash, "txid": txid})
 		}
-		log.Debugw("attempted-to-remove-transacted-revision", log.Fields{"hash": pr.GetHash(), "txid": txid})
+		logger.Debugw("attempted-to-remove-transacted-revision", log.Fields{"hash": pr.GetHash(), "txid": txid})
 	}
 
 	pr.Revision.Drop(txid, includeConfig)
@@ -221,7 +221,7 @@
 		// Verify if the data differs from what was retrieved from persistence
 		// Also check if we are treating a newer revision of the data or not
 		if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() && childRev.getVersion() < version {
-			log.Debugw("revision-data-is-different", log.Fields{
+			logger.Debugw("revision-data-is-different", log.Fields{
 				"key":               childRev.GetHash(),
 				"name":              childRev.GetName(),
 				"data":              childRev.GetData(),
@@ -266,7 +266,7 @@
 			parent.GetBranch(NONE).Latest.ChildDrop(typeName, childRev.GetHash())
 
 			if updatedChildRev != nil {
-				log.Debugw("verify-persisted-entry--adding-child", log.Fields{
+				logger.Debugw("verify-persisted-entry--adding-child", log.Fields{
 					"key":  updatedChildRev.GetHash(),
 					"name": updatedChildRev.GetName(),
 					"data": updatedChildRev.GetData(),
@@ -274,7 +274,7 @@
 				response = updatedChildRev
 			}
 		} else {
-			log.Debugw("keeping-revision-data", log.Fields{
+			logger.Debugw("keeping-revision-data", log.Fields{
 				"key":                 childRev.GetHash(),
 				"name":                childRev.GetName(),
 				"data":                childRev.GetData(),
@@ -294,7 +294,7 @@
 	} else {
 		// There is no available child with that key value.
 		// Create a new child and update the parent revision.
-		log.Debugw("no-such-revision-entry", log.Fields{
+		logger.Debugw("no-such-revision-entry", log.Fields{
 			"key":     keyValue,
 			"name":    typeName,
 			"data":    data,
@@ -332,7 +332,7 @@
 
 		// Child entry is valid and can be included in the response object
 		if childRev != nil {
-			log.Debugw("adding-revision-to-response", log.Fields{
+			logger.Debugw("adding-revision-to-response", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
 				"data": childRev.GetData(),
@@ -350,7 +350,7 @@
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
 
-	log.Debugw("loading-from-persistence", log.Fields{"path": path, "txid": txid})
+	logger.Debugw("loading-from-persistence", log.Fields{"path": path, "txid": txid})
 
 	var response []Revision
 	var err error
@@ -361,10 +361,10 @@
 
 	if pr.kvStore != nil && path != "" {
 		if len(blobs) == 0 {
-			log.Debugw("retrieve-from-kv", log.Fields{"path": path, "txid": txid})
+			logger.Debugw("retrieve-from-kv", log.Fields{"path": path, "txid": txid})
 
 			if blobs, err = pr.kvStore.List(ctx, path); err != nil {
-				log.Errorw("failed-to-retrieve-data-from-kvstore", log.Fields{"error": err})
+				logger.Errorw("failed-to-retrieve-data-from-kvstore", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -384,7 +384,7 @@
 		field := ChildrenFields(nodeType)[name]
 
 		if field != nil && field.IsContainer {
-			log.Debugw("parsing-data-blobs", log.Fields{
+			logger.Debugw("parsing-data-blobs", log.Fields{
 				"path": path,
 				"name": name,
 				"size": len(blobs),
@@ -396,14 +396,14 @@
 				data := reflect.New(field.ClassType.Elem())
 
 				if err := proto.Unmarshal(output, data.Interface().(proto.Message)); err != nil {
-					log.Errorw("failed-to-unmarshal", log.Fields{
+					logger.Errorw("failed-to-unmarshal", log.Fields{
 						"path":  path,
 						"txid":  txid,
 						"error": err,
 					})
 				} else if path == "" {
 					if field.Key != "" {
-						log.Debugw("no-path-with-container-key", log.Fields{
+						logger.Debugw("no-path-with-container-key", log.Fields{
 							"path": path,
 							"txid": txid,
 							"data": data.Interface(),
@@ -417,7 +417,7 @@
 							response = append(response, entry)
 						}
 					} else {
-						log.Debugw("path-with-no-container-key", log.Fields{
+						logger.Debugw("path-with-no-container-key", log.Fields{
 							"path": path,
 							"txid": txid,
 							"data": data.Interface(),
@@ -425,7 +425,7 @@
 					}
 
 				} else if field.Key != "" {
-					log.Debugw("path-with-container-key", log.Fields{
+					logger.Debugw("path-with-container-key", log.Fields{
 						"path": path,
 						"txid": txid,
 						"data": data.Interface(),
@@ -446,9 +446,9 @@
 				}
 			}
 
-			log.Debugw("no-more-data-blobs", log.Fields{"path": path, "name": name})
+			logger.Debugw("no-more-data-blobs", log.Fields{"path": path, "name": name})
 		} else {
-			log.Debugw("cannot-process-field", log.Fields{
+			logger.Debugw("cannot-process-field", log.Fields{
 				"type": pr.GetBranch().Node.Type,
 				"name": name,
 			})
diff --git a/db/model/profiling.go b/db/model/profiling.go
index 4e6f871..c82afd7 100644
--- a/db/model/profiling.go
+++ b/db/model/profiling.go
@@ -18,8 +18,6 @@
 
 import (
 	"sync"
-
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 // Profiling is used to store performance details collected at runtime
@@ -109,15 +107,15 @@
 	p.Lock()
 	defer p.Unlock()
 
-	log.Infof("[ Profiling Report ]")
-	log.Infof("Database Retrieval : %f", p.DatabaseRetrieveTime)
-	log.Infof("Database Retrieval Count : %d", p.DatabaseRetrieveCount)
-	log.Infof("Avg Database Retrieval : %f", p.DatabaseRetrieveTime/float64(p.DatabaseRetrieveCount))
-	log.Infof("In-Memory Modeling : %f", p.InMemoryModelTime)
-	log.Infof("In-Memory Modeling Count: %d", p.InMemoryModelCount)
-	log.Infof("Avg In-Memory Modeling : %f", p.InMemoryModelTime/float64(p.InMemoryModelCount))
-	log.Infof("In-Memory Locking : %f", p.InMemoryLockTime)
-	log.Infof("In-Memory Locking Count: %d", p.InMemoryLockCount)
-	log.Infof("Avg In-Memory Locking : %f", p.InMemoryLockTime/float64(p.InMemoryLockCount))
+	logger.Infof("[ Profiling Report ]")
+	logger.Infof("Database Retrieval : %f", p.DatabaseRetrieveTime)
+	logger.Infof("Database Retrieval Count : %d", p.DatabaseRetrieveCount)
+	logger.Infof("Avg Database Retrieval : %f", p.DatabaseRetrieveTime/float64(p.DatabaseRetrieveCount))
+	logger.Infof("In-Memory Modeling : %f", p.InMemoryModelTime)
+	logger.Infof("In-Memory Modeling Count: %d", p.InMemoryModelCount)
+	logger.Infof("Avg In-Memory Modeling : %f", p.InMemoryModelTime/float64(p.InMemoryModelCount))
+	logger.Infof("In-Memory Locking : %f", p.InMemoryLockTime)
+	logger.Infof("In-Memory Locking Count: %d", p.InMemoryLockCount)
+	logger.Infof("Avg In-Memory Locking : %f", p.InMemoryLockTime/float64(p.InMemoryLockCount))
 
 }
diff --git a/db/model/profiling_test.go b/db/model/profiling_test.go
index 968aac6..8b515af 100644
--- a/db/model/profiling_test.go
+++ b/db/model/profiling_test.go
@@ -19,7 +19,6 @@
 	"reflect"
 	"testing"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -38,7 +37,7 @@
 	 * same profiling instance.
 	 */
 
-	log.Info("/***** Unit Test Begin: Profiling Report: *****/")
+	logger.Info("/***** Unit Test Begin: Profiling Report: *****/")
 	result.Report()
 
 	GetProfiling().AddToDatabaseRetrieveTime(2.0)
@@ -72,7 +71,7 @@
 	assert.Equal(t, 5.0, GetProfiling().InMemoryLockTime)
 	assert.Equal(t, 2, GetProfiling().InMemoryLockCount)
 
-	log.Info("/***** Unit Test End: Profiling Report: *****/")
+	logger.Info("/***** Unit Test End: Profiling Report: *****/")
 	GetProfiling().Report()
 
 	result.Reset()
diff --git a/db/model/proxy.go b/db/model/proxy.go
index 303bc4e..73ea70d 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -110,7 +110,7 @@
 			return cb
 		}
 	} else {
-		log.Debugw("proxy-is-nil", log.Fields{"callback-type": callbackType.String()})
+		logger.Debugw("proxy-is-nil", log.Fields{"callback-type": callbackType.String()})
 	}
 	return nil
 }
@@ -203,7 +203,7 @@
 	p.SetOperation(ProxyList)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-list", log.Fields{
+	logger.Debugw("proxy-list", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"operation": p.GetOperation(),
@@ -223,7 +223,7 @@
 	p.SetOperation(ProxyGet)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-get", log.Fields{
+	logger.Debugw("proxy-get", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"operation": p.GetOperation(),
@@ -235,7 +235,7 @@
 // Update will modify information in the data model at the specified location with the provided data
 func (p *Proxy) Update(ctx context.Context, path string, data interface{}, strict bool, txid string) (interface{}, error) {
 	if !strings.HasPrefix(path, "/") {
-		log.Errorf("invalid path: %s", path)
+		logger.Errorf("invalid path: %s", path)
 		return nil, fmt.Errorf("invalid path: %s", path)
 	}
 	var fullPath string
@@ -251,7 +251,7 @@
 	p.SetOperation(ProxyUpdate)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-update", log.Fields{
+	logger.Debugw("proxy-update", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"full":      fullPath,
@@ -272,7 +272,7 @@
 // that access control is active while inserting the information.
 func (p *Proxy) AddWithID(ctx context.Context, path string, id string, data interface{}, txid string) (interface{}, error) {
 	if !strings.HasPrefix(path, "/") {
-		log.Errorf("invalid path: %s", path)
+		logger.Errorf("invalid path: %s", path)
 		return nil, fmt.Errorf("invalid path: %s", path)
 	}
 	var fullPath string
@@ -288,7 +288,7 @@
 	p.SetOperation(ProxyAdd)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-add-with-id", log.Fields{
+	logger.Debugw("proxy-add-with-id", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"full":      fullPath,
@@ -307,7 +307,7 @@
 // Add will insert new data at specified location.
 func (p *Proxy) Add(ctx context.Context, path string, data interface{}, txid string) (interface{}, error) {
 	if !strings.HasPrefix(path, "/") {
-		log.Errorf("invalid path: %s", path)
+		logger.Errorf("invalid path: %s", path)
 		return nil, fmt.Errorf("invalid path: %s", path)
 	}
 	var fullPath string
@@ -323,7 +323,7 @@
 	p.SetOperation(ProxyAdd)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-add", log.Fields{
+	logger.Debugw("proxy-add", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"full":      fullPath,
@@ -342,7 +342,7 @@
 // Remove will delete an entry at the specified location
 func (p *Proxy) Remove(ctx context.Context, path string, txid string) (interface{}, error) {
 	if !strings.HasPrefix(path, "/") {
-		log.Errorf("invalid path: %s", path)
+		logger.Errorf("invalid path: %s", path)
 		return nil, fmt.Errorf("invalid path: %s", path)
 	}
 	var fullPath string
@@ -358,7 +358,7 @@
 	p.SetOperation(ProxyRemove)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-remove", log.Fields{
+	logger.Debugw("proxy-remove", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"full":      fullPath,
@@ -377,7 +377,7 @@
 // CreateProxy to interact with specific path directly
 func (p *Proxy) CreateProxy(ctx context.Context, path string, exclusive bool) (*Proxy, error) {
 	if !strings.HasPrefix(path, "/") {
-		log.Errorf("invalid path: %s", path)
+		logger.Errorf("invalid path: %s", path)
 		return nil, fmt.Errorf("invalid path: %s", path)
 	}
 
@@ -394,7 +394,7 @@
 	p.SetOperation(ProxyCreate)
 	defer p.SetOperation(ProxyNone)
 
-	log.Debugw("proxy-create", log.Fields{
+	logger.Debugw("proxy-create", log.Fields{
 		"path":      path,
 		"effective": effectivePath,
 		"full":      fullPath,
@@ -446,7 +446,7 @@
 		p.setCallbacks(callbackType, make(map[string]*CallbackTuple))
 	}
 	funcName := runtime.FuncForPC(reflect.ValueOf(callback).Pointer()).Name()
-	log.Debugf("value of function: %s", funcName)
+	logger.Debugf("value of function: %s", funcName)
 	funcHash := fmt.Sprintf("%x", md5.Sum([]byte(funcName)))[:12]
 
 	p.setCallback(callbackType, funcHash, &CallbackTuple{callback, args})
@@ -455,17 +455,17 @@
 // UnregisterCallback removes references to a callback within a proxy
 func (p *Proxy) UnregisterCallback(callbackType CallbackType, callback CallbackFunction, args ...interface{}) {
 	if p.getCallbacks(callbackType) == nil {
-		log.Errorf("no such callback type - %s", callbackType.String())
+		logger.Errorf("no such callback type - %s", callbackType.String())
 		return
 	}
 
 	funcName := runtime.FuncForPC(reflect.ValueOf(callback).Pointer()).Name()
 	funcHash := fmt.Sprintf("%x", md5.Sum([]byte(funcName)))[:12]
 
-	log.Debugf("value of function: %s", funcName)
+	logger.Debugf("value of function: %s", funcName)
 
 	if p.getCallback(callbackType, funcHash) == nil {
-		log.Errorf("function with hash value: '%s' not registered with callback type: '%s'", funcHash, callbackType)
+		logger.Errorf("function with hash value: '%s' not registered with callback type: '%s'", funcHash, callbackType)
 		return
 	}
 
@@ -477,7 +477,7 @@
 		if r := recover(); r != nil {
 			errStr := fmt.Sprintf("callback error occurred: %+v", r)
 			err = errors.New(errStr)
-			log.Error(errStr)
+			logger.Error(errStr)
 		}
 	}()
 
@@ -499,10 +499,10 @@
 		for _, callback := range callbacks {
 			if result, err = p.invoke(ctx, callback, context); err != nil {
 				if !proceedOnError {
-					log.Info("An error occurred.  Stopping callback invocation")
+					logger.Info("An error occurred.  Stopping callback invocation")
 					break
 				}
-				log.Info("An error occurred.  Invoking next callback")
+				logger.Info("An error occurred.  Invoking next callback")
 			}
 		}
 		p.mutex.Unlock()
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index 6fb5a6f..5bfd8d8 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -54,8 +54,6 @@
 )
 
 func init() {
-	//log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
-	//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
 	var err error
 	TestProxyRoot = NewRoot(&voltha.Voltha{}, nil)
 	if TestProxyRootLogicalDevice, err = TestProxyRoot.CreateProxy(context.Background(), "/", false); err != nil {
diff --git a/db/model/root.go b/db/model/root.go
index 3ae5614..c3b932e 100644
--- a/db/model/root.go
+++ b/db/model/root.go
@@ -106,7 +106,7 @@
 		r.DeleteTxBranch(txid)
 	} else {
 		if _, err = r.node.MergeBranch(ctx, txid, false); err != nil {
-			log.Errorw("Unable to integrate the contents of a transaction branch within the latest branch of a given node", log.Fields{"error": err})
+			logger.Errorw("Unable to integrate the contents of a transaction branch within the latest branch of a given node", log.Fields{"error": err})
 		}
 		r.node.GetRoot().ExecuteCallbacks(ctx)
 		r.DeleteTxBranch(txid)
@@ -272,9 +272,9 @@
 		if blob, err := json.Marshal(data); err != nil {
 			// TODO report error
 		} else {
-			log.Debugf("Changing root to : %s", string(blob))
+			logger.Debugf("Changing root to : %s", string(blob))
 			if err := r.KvStore.Put(ctx, "root", blob); err != nil {
-				log.Errorf("failed to properly put value in kvstore - err: %s", err.Error())
+				logger.Errorf("failed to properly put value in kvstore - err: %s", err.Error())
 			}
 		}
 	}
diff --git a/db/model/transaction.go b/db/model/transaction.go
index 7879a89..670525d 100644
--- a/db/model/transaction.go
+++ b/db/model/transaction.go
@@ -19,8 +19,6 @@
 import (
 	"context"
 	"fmt"
-
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 // Transaction -
@@ -39,7 +37,7 @@
 }
 func (t *Transaction) Get(ctx context.Context, path string, depth int, deep bool) (interface{}, error) {
 	if t.txid == "" {
-		log.Errorf("closed transaction")
+		logger.Errorf("closed transaction")
 		return nil, fmt.Errorf("closed transaction")
 	}
 	// TODO: need to review the return values at the different layers!!!!!
@@ -47,21 +45,21 @@
 }
 func (t *Transaction) Update(ctx context.Context, path string, data interface{}, strict bool) (interface{}, error) {
 	if t.txid == "" {
-		log.Errorf("closed transaction")
+		logger.Errorf("closed transaction")
 		return nil, fmt.Errorf("closed transaction")
 	}
 	return t.proxy.Update(ctx, path, data, strict, t.txid)
 }
 func (t *Transaction) Add(ctx context.Context, path string, data interface{}) (interface{}, error) {
 	if t.txid == "" {
-		log.Errorf("closed transaction")
+		logger.Errorf("closed transaction")
 		return nil, fmt.Errorf("closed transaction")
 	}
 	return t.proxy.Add(ctx, path, data, t.txid)
 }
 func (t *Transaction) Remove(ctx context.Context, path string) (interface{}, error) {
 	if t.txid == "" {
-		log.Errorf("closed transaction")
+		logger.Errorf("closed transaction")
 		return nil, fmt.Errorf("closed transaction")
 	}
 	return t.proxy.Remove(ctx, path, t.txid)
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
index c66101b..4e1346b 100644
--- a/db/model/transaction_test.go
+++ b/db/model/transaction_test.go
@@ -73,7 +73,7 @@
 
 	added, err := addTx.Add(context.Background(), "/devices", device)
 	if err != nil {
-		log.Errorf("Failed to add device due to error %v", err)
+		logger.Errorf("Failed to add device due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	if added == nil {
@@ -94,7 +94,7 @@
 	getDevWithPortsTx := TestTransactionRootProxy.OpenTransaction()
 	device1, err := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
 	if err != nil {
-		log.Errorf("Failed to get device with ports due to error %v", err)
+		logger.Errorf("Failed to get device with ports due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	t.Logf("retrieved device with ports: %+v", device1)
@@ -105,7 +105,7 @@
 	getDevTx := TestTransactionRootProxy.OpenTransaction()
 	device2, err := getDevTx.Get(context.Background(), basePath, 0, false)
 	if err != nil {
-		log.Errorf("Failed to open transaction due to error %v", err)
+		logger.Errorf("Failed to open transaction due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	t.Logf("retrieved device: %+v", device2)
@@ -116,7 +116,7 @@
 func TestTransaction_4_UpdateDevice(t *testing.T) {
 	updateTx := TestTransactionRootProxy.OpenTransaction()
 	if retrieved, err := updateTx.Get(context.Background(), "/devices/"+TestTransactionTargetDeviceID, 1, false); err != nil {
-		log.Errorf("Failed to retrieve device info due to error %v", err)
+		logger.Errorf("Failed to retrieve device info due to error %v", err)
 		assert.NotNil(t, err)
 	} else if retrieved == nil {
 		t.Error("Failed to get device")
@@ -136,7 +136,7 @@
 		// FIXME: The makeBranch passed in function is nil or not being executed properly!!!!!
 		afterUpdate, err := updateTx.Update(context.Background(), "/devices/"+TestTransactionTargetDeviceID, retrieved, false)
 		if err != nil {
-			log.Errorf("Failed to update device info due to error %v", err)
+			logger.Errorf("Failed to update device info due to error %v", err)
 			assert.NotNil(t, err)
 		}
 		if afterUpdate == nil {
@@ -157,7 +157,7 @@
 	getDevWithPortsTx := TestTransactionRootProxy.OpenTransaction()
 	device1, err := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
 	if err != nil {
-		log.Errorf("Failed to device with ports info due to error %v", err)
+		logger.Errorf("Failed to device with ports info due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	t.Logf("retrieved device with ports: %+v", device1)
@@ -168,7 +168,7 @@
 	getDevTx := TestTransactionRootProxy.OpenTransaction()
 	device2, err := getDevTx.Get(context.Background(), basePath, 0, false)
 	if err != nil {
-		log.Errorf("Failed to  get device info due to error %v", err)
+		logger.Errorf("Failed to  get device info due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	t.Logf("retrieved device: %+v", device2)
@@ -180,7 +180,7 @@
 	removeTx := TestTransactionRootProxy.OpenTransaction()
 	removed, err := removeTx.Remove(context.Background(), "/devices/"+TestTransactionDeviceID)
 	if err != nil {
-		log.Errorf("Failed to remove device due to error %v", err)
+		logger.Errorf("Failed to remove device due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	if removed == nil {
@@ -200,7 +200,7 @@
 	getDevTx := TestTransactionRootProxy.OpenTransaction()
 	device, err := TestTransactionRootProxy.Get(context.Background(), basePath, 0, false, "")
 	if err != nil {
-		log.Errorf("Failed to get device info post remove due to error %v", err)
+		logger.Errorf("Failed to get device info post remove due to error %v", err)
 		assert.NotNil(t, err)
 	}
 	t.Logf("retrieved device: %+v", device)
diff --git a/db/model/utils.go b/db/model/utils.go
index 769aa78..6d1b2d5 100644
--- a/db/model/utils.go
+++ b/db/model/utils.go
@@ -91,7 +91,6 @@
 			}
 		}
 	default:
-		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
 	}
 
 	return nil
@@ -134,7 +133,6 @@
 			}
 		}
 	default:
-		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
 	}
 
 	return nil
@@ -190,7 +188,6 @@
 			}
 		}
 	default:
-		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
 	}
 
 	return attribName, attribValue
@@ -245,7 +242,6 @@
 
 		}
 	default:
-		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
 	}
 
 	return result