VOL-1775 VOL-1779 VOL-1780 : Fix several issues with overall stability

- Apply changes as reported by golang race utility
- Added version attribute in KV object
- Added context object to db/model api
- Carrying timestamp info through context to help in the
  decision making when applying a revision change
- Replaced proxy access control mechanism with etcd reservation mechanism

Change-Id: If3d142a73b1da0d64fa6a819530f297dbfada2d3
diff --git a/db/kvstore/client.go b/db/kvstore/client.go
index 34ab711..937eefe 100644
--- a/db/kvstore/client.go
+++ b/db/kvstore/client.go
@@ -38,6 +38,7 @@
 type KVPair struct {
 	Key     string
 	Value   interface{}
+	Version int64
 	Session string
 	Lease   int64
 }
@@ -47,12 +48,13 @@
 }
 
 // NewKVPair creates a new KVPair object
-func NewKVPair(key string, value interface{}, session string, lease int64) *KVPair {
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
 	kv := new(KVPair)
 	kv.Key = key
 	kv.Value = value
 	kv.Session = session
 	kv.Lease = lease
+	kv.Version = version
 	return kv
 }
 
@@ -61,14 +63,16 @@
 	EventType int
 	Key       interface{}
 	Value     interface{}
+	Version   int64
 }
 
 // NewEvent creates a new Event object
-func NewEvent(eventType int, key interface{}, value interface{}) *Event {
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
 	evnt := new(Event)
 	evnt.EventType = eventType
 	evnt.Key = key
 	evnt.Value = value
+	evnt.Version = version
 
 	return evnt
 }
diff --git a/db/kvstore/consulclient.go b/db/kvstore/consulclient.go
index 2d02342..4b25b5f 100644
--- a/db/kvstore/consulclient.go
+++ b/db/kvstore/consulclient.go
@@ -79,7 +79,7 @@
 	}
 	m := make(map[string]*KVPair)
 	for _, kvp := range kvps {
-		m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0)
+		m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1)
 	}
 	return m, nil
 }
@@ -100,7 +100,7 @@
 		return nil, err
 	}
 	if kvp != nil {
-		return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0), nil
+		return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1), nil
 	}
 
 	return nil, nil
@@ -455,7 +455,7 @@
 		default:
 			if err != nil {
 				log.Warnw("error-from-watch", log.Fields{"error": err})
-				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""))
+				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
 			} else {
 				log.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
 			}
@@ -469,12 +469,12 @@
 		} else {
 			log.Debugw("update-received", log.Fields{"pair": pair})
 			if pair == nil {
-				ch <- NewEvent(DELETE, key, []byte(""))
+				ch <- NewEvent(DELETE, key, []byte(""), -1)
 			} else if !c.isKVEqual(pair, previousKVPair) {
 				// Push the change onto the channel if the data has changed
 				// For now just assume it's a PUT change
 				log.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
-				ch <- NewEvent(PUT, pair.Key, pair.Value)
+				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
 			}
 			previousKVPair = pair
 			lastIndex = meta.LastIndex
diff --git a/db/kvstore/etcdclient.go b/db/kvstore/etcdclient.go
index 6935296..4f6f90b 100644
--- a/db/kvstore/etcdclient.go
+++ b/db/kvstore/etcdclient.go
@@ -74,7 +74,7 @@
 	}
 	m := make(map[string]*KVPair)
 	for _, ev := range resp.Kvs {
-		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease)
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
 	}
 	return m, nil
 }
@@ -94,7 +94,7 @@
 	}
 	for _, ev := range resp.Kvs {
 		// Only one value is returned
-		return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease), nil
+		return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
 	}
 	return nil, nil
 }
@@ -399,7 +399,7 @@
 	for resp := range channel {
 		for _, ev := range resp.Events {
 			//log.Debugf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
-			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value)
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
 		}
 	}
 	log.Debug("stop-listening-on-channel ...")
diff --git a/db/model/branch.go b/db/model/branch.go
index 5502e63..3389291 100644
--- a/db/model/branch.go
+++ b/db/model/branch.go
@@ -26,7 +26,7 @@
 
 // Branch structure is used to classify a collection of transaction based revisions
 type Branch struct {
-	sync.RWMutex
+	mutex      sync.RWMutex
 	Node       *node
 	Txid       string
 	Origin     Revision
@@ -85,8 +85,8 @@
 
 // SetLatest assigns the latest revision for this branch
 func (b *Branch) SetLatest(latest Revision) {
-	b.Lock()
-	defer b.Unlock()
+	b.mutex.Lock()
+	defer b.mutex.Unlock()
 
 	if b.Latest != nil {
 		log.Debugw("updating-latest-revision", log.Fields{"current": b.Latest.GetHash(), "new": latest.GetHash()})
@@ -119,16 +119,16 @@
 
 // GetLatest retrieves the latest revision of the branch
 func (b *Branch) GetLatest() Revision {
-	b.Lock()
-	defer b.Unlock()
+	b.mutex.RLock()
+	defer b.mutex.RUnlock()
 
 	return b.Latest
 }
 
 // GetOrigin retrieves the original revision of the branch
 func (b *Branch) GetOrigin() Revision {
-	b.Lock()
-	defer b.Unlock()
+	b.mutex.RLock()
+	defer b.mutex.RUnlock()
 
 	return b.Origin
 }
@@ -142,8 +142,8 @@
 
 // GetRevision pulls a revision entry at the specified hash
 func (b *Branch) GetRevision(hash string) Revision {
-	b.Lock()
-	defer b.Unlock()
+	b.mutex.RLock()
+	defer b.mutex.RUnlock()
 
 	if revision, ok := b.Revisions[hash]; ok {
 		return revision
@@ -154,16 +154,16 @@
 
 // SetRevision updates a revision entry at the specified hash
 func (b *Branch) SetRevision(hash string, revision Revision) {
-	b.Lock()
-	defer b.Unlock()
+	b.mutex.Lock()
+	defer b.mutex.Unlock()
 
 	b.Revisions[hash] = revision
 }
 
 // DeleteRevision removes a revision with the specified hash
 func (b *Branch) DeleteRevision(hash string) {
-	b.Lock()
-	defer b.Unlock()
+	b.mutex.Lock()
+	defer b.mutex.Unlock()
 
 	if _, ok := b.Revisions[hash]; ok {
 		delete(b.Revisions, hash)
diff --git a/db/model/child_type.go b/db/model/child_type.go
index da6f688..250de9c 100644
--- a/db/model/child_type.go
+++ b/db/model/child_type.go
@@ -27,18 +27,50 @@
 	"sync"
 )
 
-type singletonChildTypeCache struct {
+type childTypesSingleton struct {
+	mutex sync.RWMutex
 	Cache map[interface{}]map[string]*ChildType
 }
 
-var instanceChildTypeCache *singletonChildTypeCache
-var onceChildTypeCache sync.Once
+var instanceChildTypes *childTypesSingleton
+var onceChildTypes sync.Once
 
-func getChildTypeCache() *singletonChildTypeCache {
-	onceChildTypeCache.Do(func() {
-		instanceChildTypeCache = &singletonChildTypeCache{}
+func getChildTypes() *childTypesSingleton {
+	onceChildTypes.Do(func() {
+		instanceChildTypes = &childTypesSingleton{}
 	})
-	return instanceChildTypeCache
+	return instanceChildTypes
+}
+
+func (s *childTypesSingleton) GetCache() map[interface{}]map[string]*ChildType {
+	s.mutex.RLock()
+	defer s.mutex.RUnlock()
+	return s.Cache
+}
+
+func (s *childTypesSingleton) SetCache(cache map[interface{}]map[string]*ChildType) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.Cache = cache
+}
+
+func (s *childTypesSingleton) GetCacheEntry(key interface{}) (map[string]*ChildType, bool) {
+	s.mutex.RLock()
+	defer s.mutex.RUnlock()
+	childTypeMap, exists := s.Cache[key]
+	return childTypeMap, exists
+}
+
+func (s *childTypesSingleton) SetCacheEntry(key interface{}, value map[string]*ChildType) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.Cache[key] = value
+}
+
+func (s *childTypesSingleton) ResetCache() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.Cache = make(map[interface{}]map[string]*ChildType)
 }
 
 // ChildType structure contains construct details of an object
@@ -58,12 +90,12 @@
 	var names map[string]*ChildType
 	var namesExist bool
 
-	if getChildTypeCache().Cache == nil {
-		getChildTypeCache().Cache = make(map[interface{}]map[string]*ChildType)
+	if getChildTypes().Cache == nil {
+		getChildTypes().Cache = make(map[interface{}]map[string]*ChildType)
 	}
 
 	msgType := reflect.TypeOf(cls)
-	inst := getChildTypeCache()
+	inst := getChildTypes()
 
 	if names, namesExist = inst.Cache[msgType.String()]; !namesExist {
 		names = make(map[string]*ChildType)
@@ -127,9 +159,10 @@
 			}
 		}
 
-		getChildTypeCache().Cache[msgType.String()] = names
+		getChildTypes().Cache[msgType.String()] = names
 	} else {
-		log.Debugf("Cache entry for %s: %+v", msgType.String(), inst.Cache[msgType.String()])
+		entry, _ := inst.GetCacheEntry(msgType.String())
+		log.Debugf("Cache entry for %s: %+v", msgType.String(), entry)
 	}
 
 	return names
diff --git a/db/model/child_type_test.go b/db/model/child_type_test.go
index 349baa6..4725975 100644
--- a/db/model/child_type_test.go
+++ b/db/model/child_type_test.go
@@ -39,10 +39,10 @@
 
 // Verify that the cache contains an entry for types on which ChildrenFields was performed
 func TestChildType_02_Cache_Keys(t *testing.T) {
-	if _, exists := getChildTypeCache().Cache[reflect.TypeOf(&voltha.Device{}).String()]; !exists {
+	if _, exists := getChildTypes().Cache[reflect.TypeOf(&voltha.Device{}).String()]; !exists {
 		t.Errorf("getChildTypeCache().Cache should have an entry of type: %+v\n", reflect.TypeOf(&voltha.Device{}).String())
 	}
-	for k := range getChildTypeCache().Cache {
+	for k := range getChildTypes().Cache {
 		t.Logf("getChildTypeCache().Cache Key:%+v\n", k)
 	}
 }
diff --git a/db/model/model.go b/db/model/model.go
index 18ff905..3446303 100644
--- a/db/model/model.go
+++ b/db/model/model.go
@@ -23,3 +23,15 @@
 	log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
 	defer log.CleanUp()
 }
+
+const (
+	// period to determine when data requires a refresh (in milliseconds)
+	// TODO: make this configurable?
+	DataRefreshPeriod int64 = 5000
+
+	// Attribute used to store a timestamp in the context object
+	RequestTimestamp = "request-timestamp"
+
+	// Time limit for a KV path reservation (in seconds)
+	ReservationTTL int64 = 180
+)
diff --git a/db/model/node.go b/db/model/node.go
index 207df09..fcd3b5f 100644
--- a/db/model/node.go
+++ b/db/model/node.go
@@ -20,6 +20,7 @@
 // TODO: proper logging
 
 import (
+	"context"
 	"fmt"
 	"github.com/golang/protobuf/proto"
 	"github.com/opencord/voltha-go/common/log"
@@ -32,10 +33,6 @@
 // When a branch has no transaction id, everything gets stored in NONE
 const (
 	NONE string = "none"
-
-	// period to determine when data requires a refresh (in seconds)
-	// TODO: make this configurable?
-	DATA_REFRESH_PERIOD int64 = 5000
 )
 
 // Node interface is an abstraction of the node data structure
@@ -43,10 +40,14 @@
 	MakeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple)
 
 	// CRUD functions
-	Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision
-	Get(path string, hash string, depth int, deep bool, txid string) interface{}
-	Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision
-	Remove(path string, txid string, makeBranch MakeBranchFunction) Revision
+	Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision
+	Get(ctx context.Context, path string, hash string, depth int, deep bool, txid string) interface{}
+	List(ctx context.Context, path string, hash string, depth int, deep bool, txid string) interface{}
+	Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision
+	Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision
+	CreateProxy(ctx context.Context, path string, exclusive bool) *Proxy
+
+	GetProxy() *Proxy
 
 	MakeBranch(txid string) *Branch
 	DeleteBranch(txid string)
@@ -55,16 +56,12 @@
 	MakeTxBranch() string
 	DeleteTxBranch(txid string)
 	FoldTxBranch(txid string)
-
-	CreateProxy(path string, exclusive bool) *Proxy
-	GetProxy() *Proxy
 }
 
 type node struct {
-	mutex sync.RWMutex
-	Root  *root
-	Type  interface{}
-
+	mutex     sync.RWMutex
+	Root      *root
+	Type      interface{}
 	Branches  map[string]*Branch
 	Tags      map[string]Revision
 	Proxy     *Proxy
@@ -133,7 +130,7 @@
 			log.Debugw("saving-latest-data", log.Fields{"hash": revision.GetHash(), "data": revision.GetData()})
 			// Tag a timestamp to that revision
 			revision.SetLastUpdate()
-			GetRevCache().Cache.Store(revision.GetName(), revision)
+			GetRevCache().Set(revision.GetName(), revision)
 		}
 		branch.SetLatest(revision)
 	}
@@ -148,13 +145,13 @@
 			for _, change := range changeAnnouncement {
 				log.Debugw("adding-callback",
 					log.Fields{
-						"callbacks":    n.Proxy.getCallbacks(change.Type),
+						"callbacks":    n.GetProxy().getCallbacks(change.Type),
 						"type":         change.Type,
 						"previousData": change.PreviousData,
 						"latestData":   change.LatestData,
 					})
 				n.Root.AddCallback(
-					n.Proxy.InvokeCallbacks,
+					n.GetProxy().InvokeCallbacks,
 					change.Type,
 					true,
 					change.PreviousData,
@@ -253,7 +250,7 @@
 }
 
 // Get retrieves the data from a node tree that resides at the specified path
-func (n *node) List(path string, hash string, depth int, deep bool, txid string) interface{} {
+func (n *node) List(ctx context.Context, path string, hash string, depth int, deep bool, txid string) interface{} {
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
@@ -281,7 +278,7 @@
 
 	var result interface{}
 	var prList []interface{}
-	if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil {
+	if pr := rev.LoadFromPersistence(ctx, path, txid, nil); pr != nil {
 		for _, revEntry := range pr {
 			prList = append(prList, revEntry.GetData())
 		}
@@ -292,7 +289,7 @@
 }
 
 // Get retrieves the data from a node tree that resides at the specified path
-func (n *node) Get(path string, hash string, depth int, reconcile bool, txid string) interface{} {
+func (n *node) Get(ctx context.Context, path string, hash string, depth int, reconcile bool, txid string) interface{} {
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
@@ -323,9 +320,9 @@
 		// 1.  Start with the cache which stores revisions by watch names
 		// 2.  Then look in the revision tree, especially if it's a sub-path such as /devices/1234/flows
 		// 3.  Move on to the KV store if that path cannot be found or if the entry has expired
-		if entry, exists := GetRevCache().Cache.Load(path); exists && entry.(Revision) != nil {
+		if entry, exists := GetRevCache().Get(path); exists && entry.(Revision) != nil {
 			entryAge := time.Now().Sub(entry.(Revision).GetLastUpdate()).Nanoseconds() / int64(time.Millisecond)
-			if entryAge < DATA_REFRESH_PERIOD {
+			if entryAge < DataRefreshPeriod {
 				log.Debugw("using-cache-entry", log.Fields{
 					"path": path,
 					"hash": hash,
@@ -335,7 +332,7 @@
 			} else {
 				log.Debugw("cache-entry-expired", log.Fields{"path": path, "hash": hash, "age": entryAge})
 			}
-		} else if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+		} else if result = n.getPath(ctx, rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
 			log.Debugw("using-rev-tree-entry", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
 			return result
 		} else {
@@ -357,7 +354,7 @@
 	// If we got to this point, we are either trying to reconcile with the db
 	// or we simply failed at getting information from memory
 	if n.Root.KvStore != nil {
-		if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil && len(pr) > 0 {
+		if pr := rev.LoadFromPersistence(ctx, path, txid, nil); pr != nil && len(pr) > 0 {
 			// Did we receive a single or multiple revisions?
 			if len(pr) > 1 {
 				var revs []interface{}
@@ -375,7 +372,7 @@
 }
 
 //getPath traverses the specified path and retrieves the data associated to it
-func (n *node) getPath(rev Revision, path string, depth int) interface{} {
+func (n *node) getPath(ctx context.Context, rev Revision, path string, depth int) interface{} {
 	if path == "" {
 		return n.getData(rev, depth)
 	}
@@ -406,7 +403,7 @@
 					return nil
 				} else {
 					childNode := childRev.GetNode()
-					return childNode.getPath(childRev, path, depth)
+					return childNode.getPath(ctx, childRev, path, depth)
 				}
 			} else {
 				var response []interface{}
@@ -430,11 +427,13 @@
 			}
 			return response
 		}
+	} else if children := rev.GetChildren(name); children != nil && len(children) > 0 {
+		childRev := children[0]
+		childNode := childRev.GetNode()
+		return childNode.getPath(ctx, childRev, path, depth)
 	}
 
-	childRev := rev.GetChildren(name)[0]
-	childNode := childRev.GetNode()
-	return childNode.getPath(childRev, path, depth)
+	return nil
 }
 
 // getData retrieves the data from a node revision
@@ -454,7 +453,7 @@
 }
 
 // Update changes the content of a node at the specified path with the provided data
-func (n *node) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+func (n *node) Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
@@ -475,7 +474,7 @@
 		log.Debugf("Branch data : %+v, Passed data: %+v", branch.GetLatest().GetData(), data)
 	}
 	if path == "" {
-		return n.doUpdate(branch, data, strict)
+		return n.doUpdate(ctx, branch, data, strict)
 	}
 
 	rev := branch.GetLatest()
@@ -493,7 +492,7 @@
 	var children []Revision
 
 	if field == nil {
-		return n.doUpdate(branch, data, strict)
+		return n.doUpdate(ctx, branch, data, strict)
 	}
 
 	if field.IsContainer {
@@ -523,11 +522,11 @@
 
 			// Save proxy in child node to ensure callbacks are called later on
 			// only assign in cases of non sub-folder proxies, i.e. "/"
-			if childNode.Proxy == nil && n.Proxy != nil && n.Proxy.getFullPath() == "" {
+			if childNode.Proxy == nil && n.Proxy != nil && n.GetProxy().getFullPath() == "" {
 				childNode.Proxy = n.Proxy
 			}
 
-			newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+			newChildRev := childNode.Update(ctx, path, data, strict, txid, makeBranch)
 
 			if newChildRev.GetHash() == childRev.GetHash() {
 				if newChildRev != childRev {
@@ -559,7 +558,7 @@
 				children = append(children, newChildRev)
 			}
 
-			updatedRev := rev.UpdateChildren(name, children, branch)
+			updatedRev := rev.UpdateChildren(ctx, name, children, branch)
 
 			n.makeLatest(branch, updatedRev, nil)
 			updatedRev.ChildDrop(name, childRev.GetHash())
@@ -572,12 +571,12 @@
 	} else {
 		childRev := rev.GetChildren(name)[0]
 		childNode := childRev.GetNode()
-		newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+		newChildRev := childNode.Update(ctx, path, data, strict, txid, makeBranch)
 
 		branch.LatestLock.Lock()
 		defer branch.LatestLock.Unlock()
 
-		updatedRev := rev.UpdateChildren(name, []Revision{newChildRev}, branch)
+		updatedRev := rev.UpdateChildren(ctx, name, []Revision{newChildRev}, branch)
 		n.makeLatest(branch, updatedRev, nil)
 
 		updatedRev.ChildDrop(name, childRev.GetHash())
@@ -588,7 +587,7 @@
 	return nil
 }
 
-func (n *node) doUpdate(branch *Branch, data interface{}, strict bool) Revision {
+func (n *node) doUpdate(ctx context.Context, branch *Branch, data interface{}, strict bool) Revision {
 	log.Debugw("comparing-types", log.Fields{"expected": reflect.ValueOf(n.Type).Type(), "actual": reflect.TypeOf(data)})
 
 	if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
@@ -613,7 +612,7 @@
 			log.Debugf("checking access violations")
 		}
 
-		rev := branch.GetLatest().UpdateData(data, branch)
+		rev := branch.GetLatest().UpdateData(ctx, data, branch)
 		changes := []ChangeTuple{{POST_UPDATE, branch.GetLatest().GetData(), rev.GetData()}}
 		n.makeLatest(branch, rev, changes)
 
@@ -623,7 +622,7 @@
 }
 
 // Add inserts a new node at the specified path with the provided data
-func (n *node) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+func (n *node) Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
@@ -688,7 +687,7 @@
 
 				children = append(children, childRev)
 
-				updatedRev := rev.UpdateChildren(name, children, branch)
+				updatedRev := rev.UpdateChildren(ctx, name, children, branch)
 				changes := []ChangeTuple{{POST_ADD, nil, childRev.GetData()}}
 				childRev.SetupWatch(childRev.GetName())
 
@@ -718,7 +717,7 @@
 			}
 
 			childNode := childRev.GetNode()
-			newChildRev := childNode.Add(path, data, txid, makeBranch)
+			newChildRev := childNode.Add(ctx, path, data, txid, makeBranch)
 
 			// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
 			newChildRev.SetName(name + "/" + keyValue.(string))
@@ -732,7 +731,7 @@
 				children = append(children, newChildRev)
 			}
 
-			updatedRev := rev.UpdateChildren(name, children, branch)
+			updatedRev := rev.UpdateChildren(ctx, name, children, branch)
 			n.makeLatest(branch, updatedRev, nil)
 
 			updatedRev.ChildDrop(name, childRev.GetHash())
@@ -749,7 +748,7 @@
 }
 
 // Remove eliminates a node at the specified path
-func (n *node) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+func (n *node) Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision {
 	n.mutex.Lock()
 	defer n.mutex.Unlock()
 
@@ -805,7 +804,7 @@
 					if childNode.Proxy == nil {
 						childNode.Proxy = n.Proxy
 					}
-					newChildRev := childNode.Remove(path, txid, makeBranch)
+					newChildRev := childNode.Remove(ctx, path, txid, makeBranch)
 
 					branch.LatestLock.Lock()
 					defer branch.LatestLock.Unlock()
@@ -833,7 +832,7 @@
 				}
 
 				childRev.StorageDrop(txid, true)
-				GetRevCache().Cache.Delete(childRev.GetName())
+				GetRevCache().Delete(childRev.GetName())
 
 				branch.LatestLock.Lock()
 				defer branch.LatestLock.Unlock()
@@ -950,11 +949,11 @@
 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ node Proxy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 // CreateProxy returns a reference to a sub-tree of the data model
-func (n *node) CreateProxy(path string, exclusive bool) *Proxy {
-	return n.createProxy(path, path, n, exclusive)
+func (n *node) CreateProxy(ctx context.Context, path string, exclusive bool) *Proxy {
+	return n.createProxy(ctx, path, path, n, exclusive)
 }
 
-func (n *node) createProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
+func (n *node) createProxy(ctx context.Context, path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
 	log.Debugw("node-create-proxy", log.Fields{
 		"node-type":        reflect.ValueOf(n.Type).Type(),
 		"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
@@ -973,7 +972,6 @@
 	partition := strings.SplitN(path, "/", 2)
 	name := partition[0]
 	var nodeType interface{}
-	// Node type is chosen depending on if we have reached the end of path or not
 	if len(partition) < 2 {
 		path = ""
 		nodeType = n.Type
@@ -1020,8 +1018,6 @@
 				children = make([]Revision, len(rev.GetChildren(name)))
 				copy(children, rev.GetChildren(name))
 
-				// Try to find a matching revision in memory
-				// If not found try the db
 				var childRev Revision
 				if _, childRev = n.findRevByKey(children, field.Key, keyValue); childRev != nil {
 					log.Debugw("found-revision-matching-key-in-memory", log.Fields{
@@ -1030,7 +1026,7 @@
 						"fullPath":         fullPath,
 						"name":             name,
 					})
-				} else if revs := n.GetBranch(NONE).GetLatest().LoadFromPersistence(fullPath, "", nil); revs != nil && len(revs) > 0 {
+				} else if revs := n.GetBranch(NONE).GetLatest().LoadFromPersistence(ctx, fullPath, "", nil); revs != nil && len(revs) > 0 {
 					log.Debugw("found-revision-matching-key-in-db", log.Fields{
 						"node-type":        reflect.ValueOf(n.Type).Type(),
 						"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
@@ -1048,7 +1044,7 @@
 				}
 				if childRev != nil {
 					childNode := childRev.GetNode()
-					return childNode.createProxy(path, fullPath, n, exclusive)
+					return childNode.createProxy(ctx, path, fullPath, n, exclusive)
 				}
 			} else {
 				log.Errorw("cannot-access-index-of-empty-container", log.Fields{
@@ -1067,7 +1063,7 @@
 			})
 			childRev := rev.GetChildren(name)[0]
 			childNode := childRev.GetNode()
-			return childNode.createProxy(path, fullPath, n, exclusive)
+			return childNode.createProxy(ctx, path, fullPath, n, exclusive)
 		}
 	} else {
 		log.Debugw("field-object-is-nil", log.Fields{
@@ -1116,12 +1112,12 @@
 		n.Proxy = NewProxy(r, n, parentNode, path, fullPath, exclusive)
 	} else {
 		log.Debugw("node-has-existing-proxy", log.Fields{
-			"node-type":        reflect.ValueOf(n.Proxy.Node.Type).Type(),
-			"parent-node-type": reflect.ValueOf(n.Proxy.ParentNode.Type).Type(),
-			"path":             n.Proxy.Path,
-			"fullPath":         n.Proxy.FullPath,
+			"node-type":        reflect.ValueOf(n.GetProxy().Node.Type).Type(),
+			"parent-node-type": reflect.ValueOf(n.GetProxy().ParentNode.Type).Type(),
+			"path":             n.GetProxy().Path,
+			"fullPath":         n.GetProxy().FullPath,
 		})
-		if n.Proxy.Exclusive {
+		if n.GetProxy().Exclusive {
 			log.Error("node is already owned exclusively")
 		}
 	}
@@ -1160,3 +1156,6 @@
 func (n *node) GetRoot() *root {
 	return n.Root
 }
+func (n *node) SetRoot(root *root) {
+	n.Root = root
+}
diff --git a/db/model/non_persisted_revision.go b/db/model/non_persisted_revision.go
index 297a740..6900c5d 100644
--- a/db/model/non_persisted_revision.go
+++ b/db/model/non_persisted_revision.go
@@ -17,6 +17,7 @@
 
 import (
 	"bytes"
+	"context"
 	"crypto/md5"
 	"fmt"
 	"github.com/golang/protobuf/proto"
@@ -35,6 +36,16 @@
 	Cache sync.Map
 }
 
+func (s *revCacheSingleton) Get(path string) (interface{}, bool) {
+	return s.Cache.Load(path)
+}
+func (s *revCacheSingleton) Set(path string, value interface{}) {
+	s.Cache.Store(path, value)
+}
+func (s *revCacheSingleton) Delete(path string) {
+	s.Cache.Delete(path)
+}
+
 var revCacheInstance *revCacheSingleton
 var revCacheOnce sync.Once
 
@@ -269,10 +280,17 @@
 }
 
 // UpdateData will refresh the data content of the revision
-func (npr *NonPersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+func (npr *NonPersistedRevision) UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision {
 	npr.mutex.Lock()
 	defer npr.mutex.Unlock()
 
+	if ctx != nil {
+		if ctxTS, ok := ctx.Value(RequestTimestamp).(int64); ok && npr.lastUpdate.UnixNano() > ctxTS {
+			log.Warnw("data-is-older-than-current", log.Fields{"ctx-ts": ctxTS, "rev-ts": npr.lastUpdate.UnixNano()})
+			return npr
+		}
+	}
+
 	// Do not update the revision if data is the same
 	if npr.Config.Data != nil && npr.Config.hashData(npr.Root, data) == npr.Config.Hash {
 		log.Debugw("stored-data-matches-latest", log.Fields{"stored": npr.Config.Data, "provided": data})
@@ -300,7 +318,7 @@
 
 // UpdateChildren will refresh the list of children with the provided ones
 // It will carefully go through the list and ensure that no child is lost
-func (npr *NonPersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+func (npr *NonPersistedRevision) UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision {
 	npr.mutex.Lock()
 	defer npr.mutex.Unlock()
 
@@ -358,7 +376,7 @@
 					})
 
 					// replace entry
-					newChild.GetNode().Root = existingChildren[nameIndex].GetNode().Root
+					newChild.GetNode().SetRoot(existingChildren[nameIndex].GetNode().GetRoot())
 					updatedChildren = append(updatedChildren, newChild)
 				} else {
 					log.Debugw("keeping-existing-child", log.Fields{
@@ -461,7 +479,7 @@
 	return npr.lastUpdate
 }
 
-func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
+func (npr *NonPersistedRevision) LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
 	// stub... required by interface
 	return nil
 }
@@ -473,3 +491,7 @@
 func (npr *NonPersistedRevision) StorageDrop(txid string, includeConfig bool) {
 	// stub ... required by interface
 }
+
+func (npr *NonPersistedRevision) getVersion() int64 {
+	return -1
+}
diff --git a/db/model/persisted_revision.go b/db/model/persisted_revision.go
index 2ab91b7..d2d228f 100644
--- a/db/model/persisted_revision.go
+++ b/db/model/persisted_revision.go
@@ -19,7 +19,9 @@
 import (
 	"bytes"
 	"compress/gzip"
+	"context"
 	"github.com/golang/protobuf/proto"
+	"github.com/google/uuid"
 	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/db/kvstore"
 	"reflect"
@@ -32,11 +34,13 @@
 	Revision
 	Compress bool
 
-	events    chan *kvstore.Event
-	kvStore   *Backend
-	mutex     sync.RWMutex
-	isStored  bool
-	isWatched bool
+	events       chan *kvstore.Event
+	kvStore      *Backend
+	mutex        sync.RWMutex
+	versionMutex sync.RWMutex
+	Version      int64
+	isStored     bool
+	isWatched    bool
 }
 
 type watchCache struct {
@@ -57,10 +61,23 @@
 func NewPersistedRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
 	pr := &PersistedRevision{}
 	pr.kvStore = branch.Node.GetRoot().KvStore
+	pr.Version = 1
 	pr.Revision = NewNonPersistedRevision(nil, branch, data, children)
 	return pr
 }
 
+func (pr *PersistedRevision) getVersion() int64 {
+	pr.versionMutex.RLock()
+	defer pr.versionMutex.RUnlock()
+	return pr.Version
+}
+
+func (pr *PersistedRevision) setVersion(version int64) {
+	pr.versionMutex.Lock()
+	defer pr.versionMutex.Unlock()
+	pr.Version = version
+}
+
 // Finalize is responsible of saving the revision in the persistent storage
 func (pr *PersistedRevision) Finalize(skipOnExist bool) {
 	pr.store(skipOnExist)
@@ -73,8 +90,12 @@
 
 	log.Debugw("ready-to-store-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
 
-	if blob, err := proto.Marshal(pr.GetConfig().Data.(proto.Message)); err != nil {
-		// TODO report error
+	// clone the revision data to avoid any race conditions with processes
+	// accessing the same data
+	cloned := proto.Clone(pr.GetConfig().Data.(proto.Message))
+
+	if blob, err := proto.Marshal(cloned); err != nil {
+		log.Errorw("problem-to-marshal", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
 	} else {
 		if pr.Compress {
 			var b bytes.Buffer
@@ -84,10 +105,11 @@
 			blob = b.Bytes()
 		}
 
+		GetRevCache().Set(pr.GetName(), pr)
 		if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
 			log.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
 		} else {
-			log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data, "version": pr.getVersion()})
 			pr.isStored = true
 		}
 	}
@@ -145,6 +167,20 @@
 
 			case kvstore.PUT:
 				log.Debugw("update-in-memory", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
+				if latestRev.getVersion() >= event.Version {
+					log.Debugw("skipping-matching-or-older-revision", log.Fields{
+						"watch":          latestRev.GetName(),
+						"watch-version":  event.Version,
+						"latest-version": latestRev.getVersion(),
+					})
+					continue
+				} else {
+					log.Debugw("watch-revision-is-newer", log.Fields{
+						"watch":          latestRev.GetName(),
+						"watch-version":  event.Version,
+						"latest-version": latestRev.getVersion(),
+					})
+				}
 
 				data := reflect.New(reflect.TypeOf(latestRev.GetData()).Elem())
 
@@ -154,7 +190,6 @@
 					log.Debugw("un-marshaled-watch-data", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "data": data.Interface()})
 
 					var pathLock string
-					var pac *proxyAccessControl
 					var blobs map[string]*kvstore.KVPair
 
 					// The watch reported new persistence data.
@@ -166,6 +201,7 @@
 						Value:   event.Value,
 						Session: "",
 						Lease:   0,
+						Version: event.Version,
 					}
 
 					if latestRev.GetNode().GetProxy() != nil {
@@ -173,82 +209,35 @@
 						// If a proxy exists for this revision, use it to lock access to the path
 						// and prevent simultaneous updates to the object in memory
 						//
-						pathLock, _ = latestRev.GetNode().GetProxy().parseForControlledPath(latestRev.GetNode().GetProxy().getFullPath())
 
 						//If the proxy already has a request in progress, then there is no need to process the watch
-						log.Debugw("checking-if-path-is-locked", log.Fields{"key": latestRev.GetHash(), "pathLock": pathLock})
-						if PAC().IsReserved(pathLock) {
+						if latestRev.GetNode().GetProxy().GetOperation() != PROXY_NONE {
 							log.Debugw("operation-in-progress", log.Fields{
 								"key":       latestRev.GetHash(),
 								"path":      latestRev.GetNode().GetProxy().getFullPath(),
-								"operation": latestRev.GetNode().GetProxy().Operation.String(),
+								"operation": latestRev.GetNode().GetProxy().operation.String(),
 							})
-
-							//continue
-
-							// Identify the operation type and determine if the watch event should be applied or not.
-							switch latestRev.GetNode().GetProxy().Operation {
-							case PROXY_REMOVE:
-								fallthrough
-
-							case PROXY_ADD:
-								fallthrough
-
-							case PROXY_UPDATE:
-								// We will need to reload once the operation completes.
-								// Therefore, the data of the current event is most likely out-dated
-								// and should be ignored
-								log.Debugw("ignore-watch-event", log.Fields{
-									"key":       latestRev.GetHash(),
-									"path":      latestRev.GetNode().GetProxy().getFullPath(),
-									"operation": latestRev.GetNode().GetProxy().Operation.String(),
-								})
-
-								continue
-
-							case PROXY_CREATE:
-								fallthrough
-
-							case PROXY_LIST:
-								fallthrough
-
-							case PROXY_GET:
-								fallthrough
-
-							case PROXY_WATCH:
-								fallthrough
-
-							default:
-								log.Debugw("process-watch-event", log.Fields{
-									"key":       latestRev.GetHash(),
-									"path":      latestRev.GetNode().GetProxy().getFullPath(),
-									"operation": latestRev.GetNode().GetProxy().Operation.String(),
-								})
-							}
+							continue
 						}
 
+						pathLock, _ = latestRev.GetNode().GetProxy().parseForControlledPath(latestRev.GetNode().GetProxy().getFullPath())
+
 						// Reserve the path to prevent others to modify while we reload from persistence
-						log.Debugw("reserve-and-lock-path", log.Fields{"key": latestRev.GetHash(), "path": pathLock})
-						pac = PAC().ReservePath(latestRev.GetNode().GetProxy().getFullPath(),
-							latestRev.GetNode().GetProxy(), pathLock)
-						pac.lock()
-						latestRev.GetNode().GetProxy().Operation = PROXY_WATCH
-						pac.SetProxy(latestRev.GetNode().GetProxy())
+						latestRev.GetNode().GetProxy().GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+						latestRev.GetNode().GetProxy().SetOperation(PROXY_WATCH)
 
 						// Load changes and apply to memory
-						latestRev.LoadFromPersistence(latestRev.GetName(), "", blobs)
+						latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs)
 
-						log.Debugw("release-and-unlock-path", log.Fields{"key": latestRev.GetHash(), "path": pathLock})
-						pac.getProxy().Operation = PROXY_GET
-						pac.unlock()
-						PAC().ReleasePath(pathLock)
+						// Release path
+						latestRev.GetNode().GetProxy().GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
 
 					} else {
 						// This block should be reached only if coming from a non-proxied request
 						log.Debugw("revision-with-no-proxy", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
 
 						// Load changes and apply to memory
-						latestRev.LoadFromPersistence(latestRev.GetName(), "", blobs)
+						latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs)
 					}
 				}
 
@@ -264,16 +253,17 @@
 }
 
 // UpdateData modifies the information in the data model and saves it in the persistent storage
-func (pr *PersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+func (pr *PersistedRevision) UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision {
 	log.Debugw("updating-persisted-data", log.Fields{"hash": pr.GetHash()})
 
-	newNPR := pr.Revision.UpdateData(data, branch)
+	newNPR := pr.Revision.UpdateData(ctx, data, branch)
 
 	newPR := &PersistedRevision{
 		Revision:  newNPR,
 		Compress:  pr.Compress,
 		kvStore:   pr.kvStore,
 		events:    pr.events,
+		Version:   pr.getVersion(),
 		isWatched: pr.isWatched,
 	}
 
@@ -289,17 +279,17 @@
 }
 
 // UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
-func (pr *PersistedRevision) UpdateChildren(name string, children []Revision,
-	branch *Branch) Revision {
+func (pr *PersistedRevision) UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision {
 	log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
 
-	newNPR := pr.Revision.UpdateChildren(name, children, branch)
+	newNPR := pr.Revision.UpdateChildren(ctx, name, children, branch)
 
 	newPR := &PersistedRevision{
 		Revision:  newNPR,
 		Compress:  pr.Compress,
 		kvStore:   pr.kvStore,
 		events:    pr.events,
+		Version:   pr.getVersion(),
 		isWatched: pr.isWatched,
 	}
 
@@ -324,6 +314,7 @@
 		Compress:  pr.Compress,
 		kvStore:   pr.kvStore,
 		events:    pr.events,
+		Version:   pr.getVersion(),
 		isWatched: pr.isWatched,
 	}
 
@@ -346,8 +337,7 @@
 // Drop takes care of eliminating a revision hash that is no longer needed
 // and its associated config when required
 func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
-	log.Debugw("dropping-revision",
-		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash})
+	log.Debugw("dropping-revision", log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash})
 
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
@@ -376,9 +366,10 @@
 }
 
 // verifyPersistedEntry validates if the provided data is available or not in memory and applies updates as required
-func (pr *PersistedRevision) verifyPersistedEntry(data interface{}, typeName string, keyName string, keyValue string, txid string) (response Revision) {
+func (pr *PersistedRevision) verifyPersistedEntry(ctx context.Context, data interface{}, typeName string, keyName string,
+	keyValue string, txid string, version int64) (response Revision) {
 	// Parent which holds the current node entry
-	parent := pr.GetBranch().Node.Root
+	parent := pr.GetBranch().Node.GetRoot()
 
 	// Get a copy of the parent's children
 	children := make([]Revision, len(parent.GetBranch(NONE).Latest.GetChildren(typeName)))
@@ -389,11 +380,12 @@
 		// A child matching the provided key exists in memory
 		// Verify if the data differs from what was retrieved from persistence
 		// Also check if we are treating a newer revision of the data or not
-		if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() {
+		if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() && childRev.getVersion() < version {
 			log.Debugw("revision-data-is-different", log.Fields{
-				"key":  childRev.GetHash(),
-				"name": childRev.GetName(),
-				"data": childRev.GetData(),
+				"key":     childRev.GetHash(),
+				"name":    childRev.GetName(),
+				"data":    childRev.GetData(),
+				"version": childRev.getVersion(),
 			})
 
 			//
@@ -404,14 +396,15 @@
 			childRev.GetBranch().LatestLock.Lock()
 
 			// Update child
-			updatedChildRev := childRev.UpdateData(data, childRev.GetBranch())
+			updatedChildRev := childRev.UpdateData(ctx, data, childRev.GetBranch())
 
 			updatedChildRev.GetNode().SetProxy(childRev.GetNode().GetProxy())
 			updatedChildRev.SetupWatch(updatedChildRev.GetName())
 			updatedChildRev.SetLastUpdate()
+			updatedChildRev.(*PersistedRevision).setVersion(version)
 
 			// Update cache
-			GetRevCache().Cache.Store(updatedChildRev.GetName(), updatedChildRev)
+			GetRevCache().Set(updatedChildRev.GetName(), updatedChildRev)
 			childRev.Drop(txid, false)
 
 			childRev.GetBranch().LatestLock.Unlock()
@@ -423,7 +416,7 @@
 			// BEGIN lock parent -- Update parent
 			parent.GetBranch(NONE).LatestLock.Lock()
 
-			updatedRev := parent.GetBranch(NONE).Latest.UpdateChildren(typeName, children, parent.GetBranch(NONE))
+			updatedRev := parent.GetBranch(NONE).GetLatest().UpdateChildren(ctx, typeName, children, parent.GetBranch(NONE))
 			parent.GetBranch(NONE).Node.makeLatest(parent.GetBranch(NONE), updatedRev, nil)
 
 			parent.GetBranch(NONE).LatestLock.Unlock()
@@ -441,14 +434,8 @@
 				response = updatedChildRev
 			}
 		} else {
-			// Data is the same. Continue to the next entry
-			log.Debugw("same-revision-data", log.Fields{
-				"key":  childRev.GetHash(),
-				"name": childRev.GetName(),
-				"data": childRev.GetData(),
-			})
 			if childRev != nil {
-				log.Debugw("keeping-same-revision-data", log.Fields{
+				log.Debugw("keeping-revision-data", log.Fields{
 					"key":  childRev.GetHash(),
 					"name": childRev.GetName(),
 					"data": childRev.GetData(),
@@ -456,7 +443,10 @@
 
 				// Update timestamp to reflect when it was last read and to reset tracked timeout
 				childRev.SetLastUpdate()
-				GetRevCache().Cache.Store(childRev.GetName(), childRev)
+				if childRev.getVersion() < version {
+					childRev.(*PersistedRevision).setVersion(version)
+				}
+				GetRevCache().Set(childRev.GetName(), childRev)
 				response = childRev
 			}
 		}
@@ -479,6 +469,10 @@
 		// We need to start watching this entry for future changes
 		childRev.SetName(typeName + "/" + keyValue)
 		childRev.SetupWatch(childRev.GetName())
+		childRev.(*PersistedRevision).setVersion(version)
+
+		// Add entry to cache
+		GetRevCache().Set(childRev.GetName(), childRev)
 
 		pr.GetBranch().LatestLock.Unlock()
 		// END child lock
@@ -490,7 +484,7 @@
 		// BEGIN parent lock
 		parent.GetBranch(NONE).LatestLock.Lock()
 		children = append(children, childRev)
-		updatedRev := parent.GetBranch(NONE).Latest.UpdateChildren(typeName, children, parent.GetBranch(NONE))
+		updatedRev := parent.GetBranch(NONE).GetLatest().UpdateChildren(ctx, typeName, children, parent.GetBranch(NONE))
 		updatedRev.GetNode().SetProxy(parent.GetBranch(NONE).Node.GetProxy())
 		parent.GetBranch(NONE).Node.makeLatest(parent.GetBranch(NONE), updatedRev, nil)
 		parent.GetBranch(NONE).LatestLock.Unlock()
@@ -512,7 +506,7 @@
 
 // LoadFromPersistence retrieves data from kv store at the specified location and refreshes the memory
 // by adding missing entries, updating changed entries and ignoring unchanged ones
-func (pr *PersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
+func (pr *PersistedRevision) LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
 
@@ -539,7 +533,7 @@
 			nodeType = pr.GetBranch().Node.Type
 		} else {
 			path = partition[1]
-			nodeType = pr.GetBranch().Node.Root.Type
+			nodeType = pr.GetBranch().Node.GetRoot().Type
 		}
 
 		field := ChildrenFields(nodeType)[name]
@@ -574,7 +568,7 @@
 						// based on the field's key attribute
 						_, key := GetAttributeValue(data.Interface(), field.Key, 0)
 
-						if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, key.String(), txid); entry != nil {
+						if entry := pr.verifyPersistedEntry(ctx, data.Interface(), name, field.Key, key.String(), txid, blob.Version); entry != nil {
 							response = append(response, entry)
 						}
 					} else {
@@ -601,7 +595,7 @@
 					}
 					keyValue := field.KeyFromStr(key)
 
-					if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, keyValue.(string), txid); entry != nil {
+					if entry := pr.verifyPersistedEntry(ctx, data.Interface(), name, field.Key, keyValue.(string), txid, blob.Version); entry != nil {
 						response = append(response, entry)
 					}
 				}
diff --git a/db/model/proxy.go b/db/model/proxy.go
index 182dcdd..5c4d772 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -17,9 +17,11 @@
 package model
 
 import (
+	"context"
 	"crypto/md5"
 	"errors"
 	"fmt"
+	"github.com/google/uuid"
 	"github.com/opencord/voltha-go/common/log"
 	"reflect"
 	"runtime"
@@ -54,7 +56,7 @@
 
 // Proxy holds the information for a specific location with the data model
 type Proxy struct {
-	sync.RWMutex
+	mutex      sync.RWMutex
 	Root       *root
 	Node       *node
 	ParentNode *node
@@ -62,7 +64,7 @@
 	FullPath   string
 	Exclusive  bool
 	Callbacks  map[CallbackType]map[string]*CallbackTuple
-	Operation  ProxyOperation
+	operation  ProxyOperation
 }
 
 // NewProxy instantiates a new proxy to a specific location
@@ -100,6 +102,9 @@
 
 // getCallbacks returns the full list of callbacks associated to the proxy
 func (p *Proxy) getCallbacks(callbackType CallbackType) map[string]*CallbackTuple {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+
 	if p != nil {
 		if cb, exists := p.Callbacks[callbackType]; exists {
 			return cb
@@ -112,8 +117,8 @@
 
 // getCallback returns a specific callback matching the type and function hash
 func (p *Proxy) getCallback(callbackType CallbackType, funcHash string) *CallbackTuple {
-	p.Lock()
-	defer p.Unlock()
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
 	if tuple, exists := p.Callbacks[callbackType][funcHash]; exists {
 		return tuple
 	}
@@ -122,22 +127,22 @@
 
 // setCallbacks applies a callbacks list to a type
 func (p *Proxy) setCallbacks(callbackType CallbackType, callbacks map[string]*CallbackTuple) {
-	p.Lock()
-	defer p.Unlock()
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
 	p.Callbacks[callbackType] = callbacks
 }
 
 // setCallback applies a callback to a type and hash value
 func (p *Proxy) setCallback(callbackType CallbackType, funcHash string, tuple *CallbackTuple) {
-	p.Lock()
-	defer p.Unlock()
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
 	p.Callbacks[callbackType][funcHash] = tuple
 }
 
 // DeleteCallback removes a callback matching the type and hash
 func (p *Proxy) DeleteCallback(callbackType CallbackType, funcHash string) {
-	p.Lock()
-	defer p.Unlock()
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
 	delete(p.Callbacks[callbackType], funcHash)
 }
 
@@ -146,7 +151,8 @@
 
 // Enumerated list of callback types
 const (
-	PROXY_GET ProxyOperation = iota
+	PROXY_NONE ProxyOperation = iota
+	PROXY_GET
 	PROXY_LIST
 	PROXY_ADD
 	PROXY_UPDATE
@@ -156,6 +162,7 @@
 )
 
 var proxyOperationTypes = []string{
+	"PROXY_NONE",
 	"PROXY_GET",
 	"PROXY_LIST",
 	"PROXY_ADD",
@@ -169,6 +176,18 @@
 	return proxyOperationTypes[t]
 }
 
+func (p *Proxy) GetOperation() ProxyOperation {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	return p.operation
+}
+
+func (p *Proxy) SetOperation(operation ProxyOperation) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	p.operation = operation
+}
+
 // parseForControlledPath verifies if a proxy path matches a pattern
 // for locations that need to be access controlled.
 func (p *Proxy) parseForControlledPath(path string) (pathLock string, controlled bool) {
@@ -195,7 +214,7 @@
 
 // List will retrieve information from the data model at the specified path location
 // A list operation will force access to persistence storage
-func (p *Proxy) List(path string, depth int, deep bool, txid string) interface{} {
+func (p *Proxy) List(ctx context.Context, path string, depth int, deep bool, txid string) interface{} {
 	var effectivePath string
 	if path == "/" {
 		effectivePath = p.getFullPath()
@@ -205,28 +224,24 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_LIST)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-list", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(effectivePath, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
-	p.Operation = PROXY_LIST
-	pac.SetProxy(p)
-	defer func(op ProxyOperation) {
-		pac.getProxy().Operation = op
-	}(PROXY_GET)
-
-	rv := pac.List(path, depth, deep, txid, controlled)
+	rv := p.GetRoot().List(ctx, path, "", depth, deep, txid)
 
 	return rv
 }
 
 // Get will retrieve information from the data model at the specified path location
-func (p *Proxy) Get(path string, depth int, deep bool, txid string) interface{} {
+func (p *Proxy) Get(ctx context.Context, path string, depth int, deep bool, txid string) interface{} {
 	var effectivePath string
 	if path == "/" {
 		effectivePath = p.getFullPath()
@@ -236,25 +251,24 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_GET)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-get", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(effectivePath, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
-	p.Operation = PROXY_GET
-	pac.SetProxy(p)
-
-	rv := pac.Get(path, depth, deep, txid, controlled)
+	rv := p.GetRoot().Get(ctx, path, "", depth, deep, txid)
 
 	return rv
 }
 
 // Update will modify information in the data model at the specified location with the provided data
-func (p *Proxy) Update(path string, data interface{}, strict bool, txid string) interface{} {
+func (p *Proxy) Update(ctx context.Context, path string, data interface{}, strict bool, txid string) interface{} {
 	if !strings.HasPrefix(path, "/") {
 		log.Errorf("invalid path: %s", path)
 		return nil
@@ -271,31 +285,36 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_UPDATE)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-update", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"full":       fullPath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(effectivePath, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
+	if p.GetRoot().KvStore != nil {
+		p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+		defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+	}
 
-	p.Operation = PROXY_UPDATE
-	pac.SetProxy(p)
-	defer func(op ProxyOperation) {
-		pac.getProxy().Operation = op
-	}(PROXY_GET)
-	log.Debugw("proxy-operation--update", log.Fields{"operation": p.Operation})
+	result := p.GetRoot().Update(ctx, fullPath, data, strict, txid, nil)
 
-	return pac.Update(fullPath, data, strict, txid, controlled)
+	if result != nil {
+		return result.GetData()
+	}
+
+	return nil
 }
 
 // AddWithID will insert new data at specified location.
 // This method also allows the user to specify the ID of the data entry to ensure
 // that access control is active while inserting the information.
-func (p *Proxy) AddWithID(path string, id string, data interface{}, txid string) interface{} {
+func (p *Proxy) AddWithID(ctx context.Context, path string, id string, data interface{}, txid string) interface{} {
 	if !strings.HasPrefix(path, "/") {
 		log.Errorf("invalid path: %s", path)
 		return nil
@@ -312,31 +331,34 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_ADD)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-add-with-id", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"full":       fullPath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(path, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
+	if p.GetRoot().KvStore != nil {
+		p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+		defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+	}
 
-	p.Operation = PROXY_ADD
-	defer func(op ProxyOperation) {
-		pac.getProxy().Operation = op
-	}(PROXY_GET)
+	result := p.GetRoot().Add(ctx, fullPath, data, txid, nil)
 
-	pac.SetProxy(p)
+	if result != nil {
+		return result.GetData()
+	}
 
-	log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
-
-	return pac.Add(fullPath, data, txid, controlled)
+	return nil
 }
 
 // Add will insert new data at specified location.
-func (p *Proxy) Add(path string, data interface{}, txid string) interface{} {
+func (p *Proxy) Add(ctx context.Context, path string, data interface{}, txid string) interface{} {
 	if !strings.HasPrefix(path, "/") {
 		log.Errorf("invalid path: %s", path)
 		return nil
@@ -353,30 +375,34 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_ADD)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-add", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"full":       fullPath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(path, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
+	if p.GetRoot().KvStore != nil {
+		p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+		defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+	}
 
-	p.Operation = PROXY_ADD
-	pac.SetProxy(p)
-	defer func(op ProxyOperation) {
-		pac.getProxy().Operation = op
-	}(PROXY_GET)
+	result := p.GetRoot().Add(ctx, fullPath, data, txid, nil)
 
-	log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
+	if result != nil {
+		return result.GetData()
+	}
 
-	return pac.Add(fullPath, data, txid, controlled)
+	return nil
 }
 
 // Remove will delete an entry at the specified location
-func (p *Proxy) Remove(path string, txid string) interface{} {
+func (p *Proxy) Remove(ctx context.Context, path string, txid string) interface{} {
 	if !strings.HasPrefix(path, "/") {
 		log.Errorf("invalid path: %s", path)
 		return nil
@@ -393,30 +419,34 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_REMOVE)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-remove", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"full":       fullPath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(effectivePath, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
+	if p.GetRoot().KvStore != nil {
+		p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+		defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+	}
 
-	p.Operation = PROXY_REMOVE
-	pac.SetProxy(p)
-	defer func(op ProxyOperation) {
-		pac.getProxy().Operation = op
-	}(PROXY_GET)
+	result := p.GetRoot().Remove(ctx, fullPath, txid, nil)
 
-	log.Debugw("proxy-operation--remove", log.Fields{"operation": p.Operation})
+	if result != nil {
+		return result.GetData()
+	}
 
-	return pac.Remove(fullPath, txid, controlled)
+	return nil
 }
 
 // CreateProxy to interact with specific path directly
-func (p *Proxy) CreateProxy(path string, exclusive bool) *Proxy {
+func (p *Proxy) CreateProxy(ctx context.Context, path string, exclusive bool) *Proxy {
 	if !strings.HasPrefix(path, "/") {
 		log.Errorf("invalid path: %s", path)
 		return nil
@@ -434,26 +464,24 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
+	p.SetOperation(PROXY_CREATE)
+	defer p.SetOperation(PROXY_NONE)
+
 	log.Debugw("proxy-create", log.Fields{
 		"path":       path,
 		"effective":  effectivePath,
 		"full":       fullPath,
 		"pathLock":   pathLock,
 		"controlled": controlled,
+		"operation":  p.GetOperation(),
 	})
 
-	pac := PAC().ReservePath(path, p, pathLock)
-	defer PAC().ReleasePath(pathLock)
+	if p.GetRoot().KvStore != nil {
+		p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+		defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+	}
 
-	p.Operation = PROXY_CREATE
-	pac.SetProxy(p)
-	defer func(op ProxyOperation) {
-		pac.getProxy().Operation = op
-	}(PROXY_GET)
-
-	log.Debugw("proxy-operation--create-proxy", log.Fields{"operation": p.Operation})
-
-	return pac.CreateProxy(fullPath, exclusive, controlled)
+	return p.GetRoot().CreateProxy(ctx, fullPath, exclusive)
 }
 
 // OpenTransaction creates a new transaction branch to isolate operations made to the data model
@@ -553,7 +581,7 @@
 	var err error
 
 	if callbacks := p.getCallbacks(callbackType); callbacks != nil {
-		p.Lock()
+		p.mutex.Lock()
 		for _, callback := range callbacks {
 			if result, err = p.invoke(callback, context); err != nil {
 				if !proceedOnError {
@@ -563,7 +591,7 @@
 				log.Info("An error occurred.  Invoking next callback")
 			}
 		}
-		p.Unlock()
+		p.mutex.Unlock()
 	}
 
 	return result
diff --git a/db/model/proxy_access_control.go b/db/model/proxy_access_control.go
deleted file mode 100644
index a1ea6be..0000000
--- a/db/model/proxy_access_control.go
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package model
-
-import (
-	"github.com/opencord/voltha-go/common/log"
-	"sync"
-	"time"
-)
-
-type singletonProxyAccessControl struct {
-	sync.RWMutex
-	cache         sync.Map
-	reservedCount int
-}
-
-var instanceProxyAccessControl *singletonProxyAccessControl
-var onceProxyAccessControl sync.Once
-
-// PAC provides access to the proxy access control singleton instance
-func PAC() *singletonProxyAccessControl {
-	onceProxyAccessControl.Do(func() {
-		instanceProxyAccessControl = &singletonProxyAccessControl{}
-	})
-	return instanceProxyAccessControl
-}
-
-// IsReserved will verify if access control is active for a specific path within the model
-func (singleton *singletonProxyAccessControl) IsReserved(pathLock string) bool {
-	singleton.Lock()
-	defer singleton.Unlock()
-
-	_, exists := singleton.cache.Load(pathLock)
-	log.Debugw("is-reserved", log.Fields{"pathLock": pathLock, "exists": exists})
-
-	return exists
-}
-
-// ReservePath will apply access control for a specific path within the model
-func (singleton *singletonProxyAccessControl) ReservePath(path string, proxy *Proxy, pathLock string) *proxyAccessControl {
-	singleton.Lock()
-	defer singleton.Unlock()
-	singleton.reservedCount++
-	if pac, exists := singleton.cache.Load(pathLock); !exists {
-		log.Debugf("Creating new PAC entry for path:%s pathLock:%s", path, pathLock)
-		newPac := NewProxyAccessControl(proxy, pathLock)
-		singleton.cache.Store(pathLock, newPac)
-		return newPac
-	} else {
-		log.Debugf("Re-using existing PAC entry for path:%s pathLock:%s", path, pathLock)
-		return pac.(*proxyAccessControl)
-	}
-}
-
-// ReleasePath will remove access control for a specific path within the model
-func (singleton *singletonProxyAccessControl) ReleasePath(pathLock string) {
-	singleton.Lock()
-	defer singleton.Unlock()
-
-	singleton.reservedCount--
-
-	if singleton.reservedCount == 0 {
-		singleton.cache.Delete(pathLock)
-	}
-}
-
-// ProxyAccessControl is the abstraction interface to the base proxyAccessControl structure
-type ProxyAccessControl interface {
-	Get(path string, depth int, deep bool, txid string, control bool) interface{}
-	Update(path string, data interface{}, strict bool, txid string, control bool) interface{}
-	Add(path string, data interface{}, txid string, control bool) interface{}
-	Remove(path string, txid string, control bool) interface{}
-	SetProxy(proxy *Proxy)
-}
-
-// proxyAccessControl holds details of the path and proxy that requires access control
-type proxyAccessControl struct {
-	sync.RWMutex
-	Proxy    *Proxy
-	PathLock chan struct{}
-	Path     string
-
-	start time.Time
-	stop  time.Time
-}
-
-// NewProxyAccessControl creates a new instance of an access control structure
-func NewProxyAccessControl(proxy *Proxy, path string) *proxyAccessControl {
-	return &proxyAccessControl{
-		Proxy:    proxy,
-		Path:     path,
-		PathLock: make(chan struct{}, 1),
-	}
-}
-
-// lock will prevent access to a model path
-func (pac *proxyAccessControl) lock() {
-	log.Debugw("locking", log.Fields{"path": pac.Path})
-	pac.PathLock <- struct{}{}
-	pac.setStart(time.Now())
-}
-
-// unlock will release control of a model path
-func (pac *proxyAccessControl) unlock() {
-	<-pac.PathLock
-	log.Debugw("unlocking", log.Fields{"path": pac.Path})
-	pac.setStop(time.Now())
-	GetProfiling().AddToInMemoryLockTime(pac.getStop().Sub(pac.getStart()).Seconds())
-}
-
-// getStart is used for profiling purposes and returns the time at which access control was applied
-func (pac *proxyAccessControl) getStart() time.Time {
-	pac.Lock()
-	defer pac.Unlock()
-	return pac.start
-}
-
-// getStart is used for profiling purposes and returns the time at which access control was removed
-func (pac *proxyAccessControl) getStop() time.Time {
-	pac.Lock()
-	defer pac.Unlock()
-	return pac.stop
-}
-
-// getPath returns the access controlled path
-func (pac *proxyAccessControl) getPath() string {
-	pac.Lock()
-	defer pac.Unlock()
-	return pac.Path
-}
-
-// getProxy returns the proxy used to reach a specific location in the data model
-func (pac *proxyAccessControl) getProxy() *Proxy {
-	pac.Lock()
-	defer pac.Unlock()
-	return pac.Proxy
-}
-
-// setStart is for profiling purposes and applies a start time value at which access control was started
-func (pac *proxyAccessControl) setStart(time time.Time) {
-	pac.Lock()
-	defer pac.Unlock()
-	pac.start = time
-}
-
-// setStop is for profiling purposes and applies a stop time value at which access control was stopped
-func (pac *proxyAccessControl) setStop(time time.Time) {
-	pac.Lock()
-	defer pac.Unlock()
-	pac.stop = time
-}
-
-// SetProxy is used to changed the proxy object of an access controlled path
-func (pac *proxyAccessControl) SetProxy(proxy *Proxy) {
-	pac.Lock()
-	defer pac.Unlock()
-	pac.Proxy = proxy
-}
-
-// List retrieves data linked to a data model path
-func (pac *proxyAccessControl) List(path string, depth int, deep bool, txid string, control bool) interface{} {
-	if control {
-		pac.lock()
-		log.Debugw("locked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-		defer pac.unlock()
-		defer log.Debugw("unlocked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-	}
-
-	// FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
-	// The data traversal through reflection currently corrupts the content
-
-	return pac.getProxy().GetRoot().List(path, "", depth, deep, txid)
-}
-
-// Get retrieves data linked to a data model path
-func (pac *proxyAccessControl) Get(path string, depth int, deep bool, txid string, control bool) interface{} {
-	if control {
-		pac.lock()
-		log.Debugw("locked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-		defer pac.unlock()
-		defer log.Debugw("unlocked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-	}
-
-	// FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
-	// The data traversal through reflection currently corrupts the content
-	return pac.getProxy().GetRoot().Get(path, "", 0, deep, txid)
-}
-
-// Update changes the content of the data model at the specified location with the provided data
-func (pac *proxyAccessControl) Update(path string, data interface{}, strict bool, txid string, control bool) interface{} {
-	if control {
-		pac.lock()
-		log.Debugw("locked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-		defer pac.unlock()
-		defer log.Debugw("unlocked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-	}
-
-	result := pac.getProxy().GetRoot().Update(path, data, strict, txid, nil)
-
-	if result != nil {
-		return result.GetData()
-	}
-	return nil
-}
-
-// Add creates a new data model entry at the specified location with the provided data
-func (pac *proxyAccessControl) Add(path string, data interface{}, txid string, control bool) interface{} {
-	if control {
-		pac.lock()
-		log.Debugw("locked-access--add", log.Fields{"path": path, "fullPath": pac.Path})
-		defer pac.unlock()
-		defer log.Debugw("unlocked-access--add", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-	}
-
-	result := pac.getProxy().GetRoot().Add(path, data, txid, nil)
-
-	if result != nil {
-		return result.GetData()
-	}
-	return nil
-}
-
-// Remove discards information linked to the data model path
-func (pac *proxyAccessControl) Remove(path string, txid string, control bool) interface{} {
-	if control {
-		pac.lock()
-		log.Debugw("locked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-		defer pac.unlock()
-		defer log.Debugw("unlocked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-	}
-
-	return pac.getProxy().GetRoot().Remove(path, txid, nil)
-}
-
-// CreateProxy allows interaction for a specific path
-func (pac *proxyAccessControl) CreateProxy(path string, exclusive bool, control bool) *Proxy {
-	if control {
-		pac.lock()
-		log.Debugw("locked-access--create-proxy", log.Fields{"path": path, "fullPath": pac.Path})
-		defer pac.unlock()
-		defer log.Debugw("unlocked-access--create-proxy", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
-	}
-
-	result := pac.getProxy().ParentNode.CreateProxy(path, exclusive)
-
-	if result != nil {
-		return result
-	}
-	return nil
-}
diff --git a/db/model/proxy_load_test.go b/db/model/proxy_load_test.go
index f44a6ae..f4fd325 100644
--- a/db/model/proxy_load_test.go
+++ b/db/model/proxy_load_test.go
@@ -16,6 +16,7 @@
 package model
 
 import (
+	"context"
 	"encoding/hex"
 	"github.com/google/uuid"
 	"github.com/opencord/voltha-go/common/log"
@@ -42,10 +43,16 @@
 	After  interface{}
 }
 type proxyLoadTest struct {
-	sync.RWMutex
-	addedDevices       []string
-	updatedFirmwares   []proxyLoadChanges
-	updatedFlows       []proxyLoadChanges
+	mutex sync.RWMutex
+
+	addMutex     sync.RWMutex
+	addedDevices []string
+
+	firmwareMutex    sync.RWMutex
+	updatedFirmwares []proxyLoadChanges
+	flowMutex        sync.RWMutex
+	updatedFlows     []proxyLoadChanges
+
 	preAddExecuted     bool
 	postAddExecuted    bool
 	preUpdateExecuted  bool
@@ -53,33 +60,43 @@
 }
 
 func (plt *proxyLoadTest) SetPreAddExecuted(status bool) {
-	plt.Lock()
-	defer plt.Unlock()
+	plt.mutex.Lock()
+	defer plt.mutex.Unlock()
 	plt.preAddExecuted = status
 }
 func (plt *proxyLoadTest) SetPostAddExecuted(status bool) {
-	plt.Lock()
-	defer plt.Unlock()
+	plt.mutex.Lock()
+	defer plt.mutex.Unlock()
 	plt.postAddExecuted = status
 }
 func (plt *proxyLoadTest) SetPreUpdateExecuted(status bool) {
-	plt.Lock()
-	defer plt.Unlock()
+	plt.mutex.Lock()
+	defer plt.mutex.Unlock()
 	plt.preUpdateExecuted = status
 }
 func (plt *proxyLoadTest) SetPostUpdateExecuted(status bool) {
-	plt.Lock()
-	defer plt.Unlock()
+	plt.mutex.Lock()
+	defer plt.mutex.Unlock()
 	plt.postUpdateExecuted = status
 }
 
 func init() {
 	BenchmarkProxy_Root = NewRoot(&voltha.Voltha{}, nil)
 
-	BenchmarkProxy_Logger, _ = log.AddPackage(log.JSON, log.InfoLevel, nil)
+	BenchmarkProxy_Logger, _ = log.AddPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
 	//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
+	//Setup default logger - applies for packages that do not have specific logger set
+	if _, err := log.SetDefaultLogger(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"}); err != nil {
+		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+	}
 
-	BenchmarkProxy_DeviceProxy = BenchmarkProxy_Root.node.CreateProxy("/", false)
+	// Update all loggers (provisioned via init) with a common field
+	if err := log.UpdateAllLoggers(log.Fields{"instanceId": "PLT"}); err != nil {
+		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+	}
+	log.SetPackageLogLevel("github.com/opencord/voltha-go/db/model", log.DebugLevel)
+
+	BenchmarkProxy_DeviceProxy = BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
 	// Register ADD instructions callbacks
 	BenchmarkProxy_PLT = &proxyLoadTest{}
 
@@ -133,16 +150,16 @@
 
 			var added interface{}
 			// Add the device
-			if added = BenchmarkProxy_DeviceProxy.AddWithID("/devices", ltDevID, ltDevice, ""); added == nil {
+			if added = BenchmarkProxy_DeviceProxy.AddWithID(context.Background(), "/devices", ltDevID, ltDevice, ""); added == nil {
 				BenchmarkProxy_Logger.Errorf("Failed to add device: %+v", ltDevice)
 				continue
 			} else {
 				BenchmarkProxy_Logger.Infof("Device was added 1: %+v", added)
 			}
 
-			BenchmarkProxy_PLT.Lock()
+			BenchmarkProxy_PLT.addMutex.Lock()
 			BenchmarkProxy_PLT.addedDevices = append(BenchmarkProxy_PLT.addedDevices, added.(*voltha.Device).Id)
-			BenchmarkProxy_PLT.Unlock()
+			BenchmarkProxy_PLT.addMutex.Unlock()
 		}
 	})
 
@@ -157,8 +174,8 @@
 			if len(BenchmarkProxy_PLT.addedDevices) > 0 {
 				var target interface{}
 				randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
-				firmProxy := BenchmarkProxy_Root.node.CreateProxy("/", false)
-				if target = firmProxy.Get("/devices/"+randomID, 0, false,
+				firmProxy := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
+				if target = firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
 					""); !reflect.ValueOf(target).IsValid() {
 					BenchmarkProxy_Logger.Errorf("Failed to find device: %s %+v", randomID, target)
 					continue
@@ -183,7 +200,7 @@
 				after := target.(*voltha.Device).FirmwareVersion
 
 				var updated interface{}
-				if updated = firmProxy.Update("/devices/"+randomID, target.(*voltha.Device), false,
+				if updated = firmProxy.Update(context.Background(), "/devices/"+randomID, target.(*voltha.Device), false,
 					""); updated == nil {
 					BenchmarkProxy_Logger.Errorf("Failed to update device: %+v", target)
 					continue
@@ -192,7 +209,7 @@
 
 				}
 
-				if d := firmProxy.Get("/devices/"+randomID, 0, false,
+				if d := firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
 					""); !reflect.ValueOf(d).IsValid() {
 					BenchmarkProxy_Logger.Errorf("Failed to get device: %s", randomID)
 					continue
@@ -204,12 +221,13 @@
 					BenchmarkProxy_Logger.Errorf("Imm Device has unknown value: %s %+v %+v", randomID, d, target)
 				}
 
-				BenchmarkProxy_PLT.Lock()
+				BenchmarkProxy_PLT.firmwareMutex.Lock()
+
 				BenchmarkProxy_PLT.updatedFirmwares = append(
 					BenchmarkProxy_PLT.updatedFirmwares,
 					proxyLoadChanges{ID: randomID, Before: before, After: after},
 				)
-				BenchmarkProxy_PLT.Unlock()
+				BenchmarkProxy_PLT.firmwareMutex.Unlock()
 			}
 		}
 	})
@@ -246,8 +264,8 @@
 			if len(BenchmarkProxy_PLT.addedDevices) > 0 {
 				randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
 
-				flowsProxy := BenchmarkProxy_Root.node.CreateProxy("/devices/"+randomID+"/flows", false)
-				flows := flowsProxy.Get("/", 0, false, "")
+				flowsProxy := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/devices/"+randomID+"/flows", false)
+				flows := flowsProxy.Get(context.Background(), "/", 0, false, "")
 
 				before := flows.(*openflow_13.Flows).Items[0].TableId
 				flows.(*openflow_13.Flows).Items[0].TableId = uint32(rand.Intn(3000))
@@ -263,17 +281,17 @@
 				)
 
 				var updated interface{}
-				if updated = flowsProxy.Update("/", flows.(*openflow_13.Flows), false, ""); updated == nil {
+				if updated = flowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
 					b.Errorf("Failed to update flows for device: %+v", flows)
 				} else {
 					BenchmarkProxy_Logger.Infof("Flows were updated : %+v", updated)
 				}
-				BenchmarkProxy_PLT.Lock()
+				BenchmarkProxy_PLT.flowMutex.Lock()
 				BenchmarkProxy_PLT.updatedFlows = append(
 					BenchmarkProxy_PLT.updatedFlows,
 					proxyLoadChanges{ID: randomID, Before: before, After: after},
 				)
-				BenchmarkProxy_PLT.Unlock()
+				BenchmarkProxy_PLT.flowMutex.Unlock()
 			}
 		}
 	})
@@ -285,7 +303,7 @@
 	for i := 0; i < len(BenchmarkProxy_PLT.addedDevices); i++ {
 		devToGet := BenchmarkProxy_PLT.addedDevices[i]
 		// Verify that the added device can now be retrieved
-		if d := BenchmarkProxy_DeviceProxy.Get("/devices/"+devToGet, 0, false,
+		if d := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false,
 			""); !reflect.ValueOf(d).IsValid() {
 			BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
 			continue
@@ -299,7 +317,7 @@
 	for i := 0; i < len(BenchmarkProxy_PLT.updatedFirmwares); i++ {
 		devToGet := BenchmarkProxy_PLT.updatedFirmwares[i].ID
 		// Verify that the updated device can be retrieved and that the updates were actually applied
-		if d := BenchmarkProxy_DeviceProxy.Get("/devices/"+devToGet, 0, false,
+		if d := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false,
 			""); !reflect.ValueOf(d).IsValid() {
 			BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
 			continue
@@ -312,23 +330,3 @@
 		}
 	}
 }
-
-func BenchmarkProxy_GetUpdatedFlows(b *testing.B) {
-	var d interface{}
-	for i := 0; i < len(BenchmarkProxy_PLT.updatedFlows); i++ {
-		devToGet := BenchmarkProxy_PLT.updatedFlows[i].ID
-		// Verify that the updated device can be retrieved and that the updates were actually applied
-		flowsProxy := BenchmarkProxy_Root.node.CreateProxy("/devices/"+devToGet+"/flows", false)
-		if d = flowsProxy.Get("/", 0, false,
-			""); !reflect.ValueOf(d).IsValid() {
-			BenchmarkProxy_Logger.Errorf("Failed to get device flows: %s", devToGet)
-			continue
-		} else if d.(*openflow_13.Flows).Items[0].TableId == BenchmarkProxy_PLT.updatedFlows[i].After.(uint32) {
-			BenchmarkProxy_Logger.Infof("Device was updated with new flow value: %s %+v", devToGet, d)
-		} else if d.(*openflow_13.Flows).Items[0].TableId == BenchmarkProxy_PLT.updatedFlows[i].Before.(uint32) {
-			BenchmarkProxy_Logger.Errorf("Device kept old flow value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFlows[i])
-		} else {
-			BenchmarkProxy_Logger.Errorf("Device has unknown flow value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFlows[i])
-		}
-	}
-}
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index f583b99..3f65997 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -16,6 +16,7 @@
 package model
 
 import (
+	"context"
 	"encoding/hex"
 	"encoding/json"
 	"github.com/golang/protobuf/proto"
@@ -53,9 +54,9 @@
 	//log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
 	//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
 	TestProxy_Root = NewRoot(&voltha.Voltha{}, nil)
-	TestProxy_Root_LogicalDevice = TestProxy_Root.CreateProxy("/", false)
-	TestProxy_Root_Device = TestProxy_Root.CreateProxy("/", false)
-	TestProxy_Root_Adapter = TestProxy_Root.CreateProxy("/", false)
+	TestProxy_Root_LogicalDevice = TestProxy_Root.CreateProxy(context.Background(), "/", false)
+	TestProxy_Root_Device = TestProxy_Root.CreateProxy(context.Background(), "/", false)
+	TestProxy_Root_Adapter = TestProxy_Root.CreateProxy(context.Background(), "/", false)
 
 	TestProxy_LogicalPorts = []*voltha.LogicalPort{
 		{
@@ -115,7 +116,7 @@
 	postAddExecuted := make(chan struct{})
 	preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
 
-	devicesProxy := TestProxy_Root.node.CreateProxy("/devices", false)
+	devicesProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/devices", false)
 	devicesProxy.RegisterCallback(PRE_ADD, commonCallback2, "PRE_ADD Device container changes")
 	devicesProxy.RegisterCallback(POST_ADD, commonCallback2, "POST_ADD Device container changes")
 
@@ -123,7 +124,7 @@
 	TestProxy_Root_Device.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
 	TestProxy_Root_Device.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
 
-	if added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, ""); added == nil {
+	if added := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, ""); added == nil {
 		t.Error("Failed to add device")
 	} else {
 		t.Logf("Added device : %+v", added)
@@ -137,7 +138,7 @@
 	}
 
 	// Verify that the added device can now be retrieved
-	if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
+	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
 		t.Error("Failed to find added device")
 	} else {
 		djson, _ := json.Marshal(d)
@@ -148,7 +149,7 @@
 func TestProxy_1_1_2_Add_ExistingDevice(t *testing.T) {
 	TestProxy_Device.Id = TestProxy_DeviceId
 
-	added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, "")
+	added := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, "")
 	if added.(proto.Message).String() != reflect.ValueOf(TestProxy_Device).Interface().(proto.Message).String() {
 		t.Errorf("Devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
 	}
@@ -180,7 +181,7 @@
 	TestProxy_Root_Adapter.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions for adapters", &postAddExecutedPtr)
 
 	// Add the adapter
-	if added := TestProxy_Root_Adapter.Add("/adapters", TestProxy_Adapter, ""); added == nil {
+	if added := TestProxy_Root_Adapter.Add(context.Background(), "/adapters", TestProxy_Adapter, ""); added == nil {
 		t.Error("Failed to add adapter")
 	} else {
 		t.Logf("Added adapter : %+v", added)
@@ -189,7 +190,7 @@
 	verifyGotResponse(postAddExecuted)
 
 	// Verify that the added device can now be retrieved
-	if d := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
+	if d := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
 		t.Error("Failed to find added adapter")
 	} else {
 		djson, _ := json.Marshal(d)
@@ -205,7 +206,7 @@
 }
 
 func TestProxy_1_2_1_Get_AllDevices(t *testing.T) {
-	devices := TestProxy_Root_Device.Get("/devices", 1, false, "")
+	devices := TestProxy_Root_Device.Get(context.Background(), "/devices", 1, false, "")
 
 	if len(devices.([]interface{})) == 0 {
 		t.Error("there are no available devices to retrieve")
@@ -217,7 +218,7 @@
 }
 
 func TestProxy_1_2_2_Get_SingleDevice(t *testing.T) {
-	if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
+	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
 		t.Errorf("Failed to find device : %s", TestProxy_TargetDeviceId)
 	} else {
 		djson, _ := json.Marshal(d)
@@ -232,7 +233,7 @@
 	postUpdateExecuted := make(chan struct{})
 	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
 
-	if retrieved := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 1, false, ""); retrieved == nil {
+	if retrieved := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, ""); retrieved == nil {
 		t.Error("Failed to get device")
 	} else {
 		t.Logf("Found raw device (root proxy): %+v", retrieved)
@@ -257,7 +258,7 @@
 			"POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
 		)
 
-		if afterUpdate := TestProxy_Root_Device.Update("/devices/"+TestProxy_TargetDeviceId, retrieved, false, ""); afterUpdate == nil {
+		if afterUpdate := TestProxy_Root_Device.Update(context.Background(), "/devices/"+TestProxy_TargetDeviceId, retrieved, false, ""); afterUpdate == nil {
 			t.Error("Failed to update device")
 		} else {
 			t.Logf("Updated device : %+v", afterUpdate)
@@ -270,7 +271,7 @@
 			t.Error("POST_UPDATE callback was not executed")
 		}
 
-		if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
+		if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
 			t.Error("Failed to find updated device (root proxy)")
 		} else {
 			djson, _ := json.Marshal(d)
@@ -281,8 +282,8 @@
 
 func TestProxy_1_3_2_Update_DeviceFlows(t *testing.T) {
 	// Get a device proxy and update a specific port
-	devFlowsProxy := TestProxy_Root.node.CreateProxy("/devices/"+TestProxy_DeviceId+"/flows", false)
-	flows := devFlowsProxy.Get("/", 0, false, "")
+	devFlowsProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", false)
+	flows := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
 	flows.(*openflow_13.Flows).Items[0].TableId = 2244
 
 	preUpdateExecuted := make(chan struct{})
@@ -300,13 +301,13 @@
 		"POST_UPDATE instructions (flows proxy)", &postUpdateExecutedPtr,
 	)
 
-	kvFlows := devFlowsProxy.Get("/", 0, false, "")
+	kvFlows := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
 
 	if reflect.DeepEqual(flows, kvFlows) {
 		t.Errorf("Local changes have changed the KV store contents -  local:%+v, kv: %+v", flows, kvFlows)
 	}
 
-	if updated := devFlowsProxy.Update("/", flows.(*openflow_13.Flows), false, ""); updated == nil {
+	if updated := devFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
 		t.Error("Failed to update flow")
 	} else {
 		t.Logf("Updated flows : %+v", updated)
@@ -319,14 +320,14 @@
 		t.Error("POST_UPDATE callback was not executed")
 	}
 
-	if d := devFlowsProxy.Get("/", 0, false, ""); d == nil {
+	if d := devFlowsProxy.Get(context.Background(), "/", 0, false, ""); d == nil {
 		t.Error("Failed to find updated flows (flows proxy)")
 	} else {
 		djson, _ := json.Marshal(d)
 		t.Logf("Found flows (flows proxy): %s", string(djson))
 	}
 
-	if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId+"/flows", 1, false, ""); !reflect.ValueOf(d).IsValid() {
+	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", 1, false, ""); !reflect.ValueOf(d).IsValid() {
 		t.Error("Failed to find updated flows (root proxy)")
 	} else {
 		djson, _ := json.Marshal(d)
@@ -339,9 +340,9 @@
 	postUpdateExecuted := make(chan struct{})
 	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
 
-	adaptersProxy := TestProxy_Root.node.CreateProxy("/adapters", false)
+	adaptersProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/adapters", false)
 
-	if retrieved := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 1, false, ""); retrieved == nil {
+	if retrieved := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, ""); retrieved == nil {
 		t.Error("Failed to get adapter")
 	} else {
 		t.Logf("Found raw adapter (root proxy): %+v", retrieved)
@@ -359,7 +360,7 @@
 			"POST_UPDATE instructions for adapters", &postUpdateExecutedPtr,
 		)
 
-		if afterUpdate := adaptersProxy.Update("/"+TestProxy_AdapterId, retrieved, false, ""); afterUpdate == nil {
+		if afterUpdate := adaptersProxy.Update(context.Background(), "/"+TestProxy_AdapterId, retrieved, false, ""); afterUpdate == nil {
 			t.Error("Failed to update adapter")
 		} else {
 			t.Logf("Updated adapter : %+v", afterUpdate)
@@ -372,7 +373,7 @@
 			t.Error("POST_UPDATE callback for adapter was not executed")
 		}
 
-		if d := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
+		if d := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
 			t.Error("Failed to find updated adapter (root proxy)")
 		} else {
 			djson, _ := json.Marshal(d)
@@ -397,7 +398,7 @@
 		"POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
 	)
 
-	if removed := TestProxy_Root_Device.Remove("/devices/"+TestProxy_DeviceId, ""); removed == nil {
+	if removed := TestProxy_Root_Device.Remove(context.Background(), "/devices/"+TestProxy_DeviceId, ""); removed == nil {
 		t.Error("Failed to remove device")
 	} else {
 		t.Logf("Removed device : %+v", removed)
@@ -410,7 +411,7 @@
 		t.Error("POST_REMOVE callback was not executed")
 	}
 
-	if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
+	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
 		djson, _ := json.Marshal(d)
 		t.Errorf("Device was not removed - %s", djson)
 	} else {
@@ -432,7 +433,7 @@
 	TestProxy_Root_LogicalDevice.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
 	TestProxy_Root_LogicalDevice.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
 
-	if added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, ""); added == nil {
+	if added := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, ""); added == nil {
 		t.Error("Failed to add logical device")
 	} else {
 		t.Logf("Added logical device : %+v", added)
@@ -440,7 +441,7 @@
 
 	verifyGotResponse(postAddExecuted)
 
-	if ld := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
+	if ld := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
 		t.Error("Failed to find added logical device")
 	} else {
 		ldJSON, _ := json.Marshal(ld)
@@ -458,14 +459,14 @@
 func TestProxy_2_1_2_Add_ExistingLogicalDevice(t *testing.T) {
 	TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
 
-	added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, "")
+	added := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, "")
 	if added.(proto.Message).String() != reflect.ValueOf(TestProxy_LogicalDevice).Interface().(proto.Message).String() {
 		t.Errorf("Logical devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
 	}
 }
 
 func TestProxy_2_2_1_Get_AllLogicalDevices(t *testing.T) {
-	logicalDevices := TestProxy_Root_LogicalDevice.Get("/logical_devices", 1, false, "")
+	logicalDevices := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices", 1, false, "")
 
 	if len(logicalDevices.([]interface{})) == 0 {
 		t.Error("there are no available logical devices to retrieve")
@@ -477,7 +478,7 @@
 }
 
 func TestProxy_2_2_2_Get_SingleLogicalDevice(t *testing.T) {
-	if ld := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
+	if ld := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
 		t.Errorf("Failed to find logical device : %s", TestProxy_TargetLogicalDeviceId)
 	} else {
 		ldJSON, _ := json.Marshal(ld)
@@ -492,7 +493,7 @@
 	postUpdateExecuted := make(chan struct{})
 	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
 
-	if retrieved := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); retrieved == nil {
+	if retrieved := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); retrieved == nil {
 		t.Error("Failed to get logical device")
 	} else {
 		t.Logf("Found raw logical device (root proxy): %+v", retrieved)
@@ -517,7 +518,7 @@
 
 		retrieved.(*voltha.LogicalDevice).RootDeviceId = strconv.Itoa(fwVersion)
 
-		if afterUpdate := TestProxy_Root_LogicalDevice.Update("/logical_devices/"+TestProxy_TargetLogicalDeviceId, retrieved, false,
+		if afterUpdate := TestProxy_Root_LogicalDevice.Update(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, retrieved, false,
 			""); afterUpdate == nil {
 			t.Error("Failed to update logical device")
 		} else {
@@ -531,7 +532,7 @@
 			t.Error("POST_UPDATE callback was not executed")
 		}
 
-		if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
+		if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
 			t.Error("Failed to find updated logical device (root proxy)")
 		} else {
 			djson, _ := json.Marshal(d)
@@ -543,8 +544,8 @@
 
 func TestProxy_2_3_2_Update_LogicalDeviceFlows(t *testing.T) {
 	// Get a device proxy and update a specific port
-	ldFlowsProxy := TestProxy_Root.node.CreateProxy("/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", false)
-	flows := ldFlowsProxy.Get("/", 0, false, "")
+	ldFlowsProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", false)
+	flows := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
 	flows.(*openflow_13.Flows).Items[0].TableId = rand.Uint32()
 	t.Logf("before updated flows: %+v", flows)
 
@@ -557,26 +558,26 @@
 		commonCallback2,
 	)
 
-	kvFlows := ldFlowsProxy.Get("/", 0, false, "")
+	kvFlows := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
 
 	if reflect.DeepEqual(flows, kvFlows) {
 		t.Errorf("Local changes have changed the KV store contents -  local:%+v, kv: %+v", flows, kvFlows)
 	}
 
-	if updated := ldFlowsProxy.Update("/", flows.(*openflow_13.Flows), false, ""); updated == nil {
+	if updated := ldFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
 		t.Error("Failed to update logical device flows")
 	} else {
 		t.Logf("Updated logical device flows : %+v", updated)
 	}
 
-	if d := ldFlowsProxy.Get("/", 0, false, ""); d == nil {
+	if d := ldFlowsProxy.Get(context.Background(), "/", 0, false, ""); d == nil {
 		t.Error("Failed to find updated logical device flows (flows proxy)")
 	} else {
 		djson, _ := json.Marshal(d)
 		t.Logf("Found flows (flows proxy): %s", string(djson))
 	}
 
-	if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", 0, false,
+	if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", 0, false,
 		""); !reflect.ValueOf(d).IsValid() {
 		t.Error("Failed to find updated logical device flows (root proxy)")
 	} else {
@@ -601,7 +602,7 @@
 		"POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
 	)
 
-	if removed := TestProxy_Root_LogicalDevice.Remove("/logical_devices/"+TestProxy_LogicalDeviceId, ""); removed == nil {
+	if removed := TestProxy_Root_LogicalDevice.Remove(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, ""); removed == nil {
 		t.Error("Failed to remove logical device")
 	} else {
 		t.Logf("Removed device : %+v", removed)
@@ -614,7 +615,7 @@
 		t.Error("POST_REMOVE callback was not executed")
 	}
 
-	if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
+	if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
 		djson, _ := json.Marshal(d)
 		t.Errorf("Device was not removed - %s", djson)
 	} else {
diff --git a/db/model/revision.go b/db/model/revision.go
index cd4c5df..6f52248 100644
--- a/db/model/revision.go
+++ b/db/model/revision.go
@@ -16,6 +16,7 @@
 package model
 
 import (
+	"context"
 	"github.com/opencord/voltha-go/db/kvstore"
 	"time"
 )
@@ -34,6 +35,7 @@
 	SetHash(hash string)
 	GetHash() string
 	ClearHash()
+	getVersion() int64
 	SetupWatch(key string)
 	SetName(name string)
 	GetName() string
@@ -42,10 +44,10 @@
 	Get(int) interface{}
 	GetData() interface{}
 	GetNode() *node
-	LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision
 	SetLastUpdate(ts ...time.Time)
 	GetLastUpdate() time.Time
-	UpdateData(data interface{}, branch *Branch) Revision
-	UpdateChildren(name string, children []Revision, branch *Branch) Revision
+	LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) []Revision
+	UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision
+	UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision
 	UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision
 }
diff --git a/db/model/root.go b/db/model/root.go
index 5036ce1..8331e11 100644
--- a/db/model/root.go
+++ b/db/model/root.go
@@ -17,6 +17,7 @@
 package model
 
 import (
+	"context"
 	"encoding/hex"
 	"encoding/json"
 	"github.com/golang/protobuf/proto"
@@ -103,7 +104,7 @@
 		r.DeleteTxBranch(txid)
 	} else {
 		r.node.MergeBranch(txid, false)
-		r.ExecuteCallbacks()
+		r.node.GetRoot().ExecuteCallbacks()
 		r.DeleteTxBranch(txid)
 	}
 }
@@ -162,7 +163,7 @@
 }
 
 func (r *root) syncParent(childRev Revision, txid string) {
-	data := proto.Clone(r.Proxy.ParentNode.Latest().GetData().(proto.Message))
+	data := proto.Clone(r.GetProxy().ParentNode.Latest().GetData().(proto.Message))
 
 	for fieldName, _ := range ChildrenFields(data) {
 		childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
@@ -172,12 +173,12 @@
 		}
 	}
 
-	r.Proxy.ParentNode.Latest().SetConfig(NewDataRevision(r.Proxy.ParentNode.Root, data))
-	r.Proxy.ParentNode.Latest(txid).Finalize(false)
+	r.GetProxy().ParentNode.Latest().SetConfig(NewDataRevision(r.GetProxy().ParentNode.GetRoot(), data))
+	r.GetProxy().ParentNode.Latest(txid).Finalize(false)
 }
 
 // Update modifies the content of an object at a given path with the provided data
-func (r *root) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+func (r *root) Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
 	var result Revision
 
 	if makeBranch != nil {
@@ -193,13 +194,13 @@
 			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
 			return node.MakeBranch(txid)
 		}
-		result = r.node.Update(path, data, strict, txid, trackDirty)
+		result = r.node.Update(ctx, path, data, strict, txid, trackDirty)
 	} else {
-		result = r.node.Update(path, data, strict, "", nil)
+		result = r.node.Update(ctx, path, data, strict, "", nil)
 	}
 
 	if result != nil {
-		if r.Proxy.FullPath != r.Proxy.Path {
+		if r.GetProxy().FullPath != r.GetProxy().Path {
 			r.syncParent(result, txid)
 		} else {
 			result.Finalize(false)
@@ -212,7 +213,7 @@
 }
 
 // Add creates a new object at the given path with the provided data
-func (r *root) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+func (r *root) Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
 	var result Revision
 
 	if makeBranch != nil {
@@ -228,9 +229,9 @@
 			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
 			return node.MakeBranch(txid)
 		}
-		result = r.node.Add(path, data, txid, trackDirty)
+		result = r.node.Add(ctx, path, data, txid, trackDirty)
 	} else {
-		result = r.node.Add(path, data, "", nil)
+		result = r.node.Add(ctx, path, data, "", nil)
 	}
 
 	if result != nil {
@@ -241,7 +242,7 @@
 }
 
 // Remove discards an object at a given path
-func (r *root) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+func (r *root) Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision {
 	var result Revision
 
 	if makeBranch != nil {
@@ -257,9 +258,9 @@
 			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
 			return node.MakeBranch(txid)
 		}
-		result = r.node.Remove(path, txid, trackDirty)
+		result = r.node.Remove(ctx, path, txid, trackDirty)
 	} else {
-		result = r.node.Remove(path, "", nil)
+		result = r.node.Remove(ctx, path, "", nil)
 	}
 
 	r.node.GetRoot().ExecuteCallbacks()
diff --git a/db/model/transaction.go b/db/model/transaction.go
index fa8de1d..7529ff2 100644
--- a/db/model/transaction.go
+++ b/db/model/transaction.go
@@ -16,6 +16,7 @@
 package model
 
 import (
+	"context"
 	"github.com/opencord/voltha-go/common/log"
 )
 
@@ -31,34 +32,34 @@
 	}
 	return tx
 }
-func (t *Transaction) Get(path string, depth int, deep bool) interface{} {
+func (t *Transaction) Get(ctx context.Context, path string, depth int, deep bool) interface{} {
 	if t.txid == "" {
 		log.Errorf("closed transaction")
 		return nil
 	}
 	// TODO: need to review the return values at the different layers!!!!!
-	return t.proxy.Get(path, depth, deep, t.txid)
+	return t.proxy.Get(ctx, path, depth, deep, t.txid)
 }
-func (t *Transaction) Update(path string, data interface{}, strict bool) interface{} {
+func (t *Transaction) Update(ctx context.Context, path string, data interface{}, strict bool) interface{} {
 	if t.txid == "" {
 		log.Errorf("closed transaction")
 		return nil
 	}
-	return t.proxy.Update(path, data, strict, t.txid)
+	return t.proxy.Update(ctx, path, data, strict, t.txid)
 }
-func (t *Transaction) Add(path string, data interface{}) interface{} {
+func (t *Transaction) Add(ctx context.Context, path string, data interface{}) interface{} {
 	if t.txid == "" {
 		log.Errorf("closed transaction")
 		return nil
 	}
-	return t.proxy.Add(path, data, t.txid)
+	return t.proxy.Add(ctx, path, data, t.txid)
 }
-func (t *Transaction) Remove(path string) interface{} {
+func (t *Transaction) Remove(ctx context.Context, path string) interface{} {
 	if t.txid == "" {
 		log.Errorf("closed transaction")
 		return nil
 	}
-	return t.proxy.Remove(path, t.txid)
+	return t.proxy.Remove(ctx, path, t.txid)
 }
 func (t *Transaction) Cancel() {
 	t.proxy.cancelTransaction(t.txid)
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
index bc53791..3660a86 100644
--- a/db/model/transaction_test.go
+++ b/db/model/transaction_test.go
@@ -17,6 +17,7 @@
 package model
 
 import (
+	"context"
 	"encoding/hex"
 	"github.com/google/uuid"
 	"github.com/opencord/voltha-protos/go/common"
@@ -34,7 +35,7 @@
 
 func init() {
 	TestTransaction_Root = NewRoot(&voltha.Voltha{}, nil)
-	TestTransaction_RootProxy = TestTransaction_Root.node.CreateProxy("/", false)
+	TestTransaction_RootProxy = TestTransaction_Root.node.CreateProxy(context.Background(), "/", false)
 }
 
 //func TestTransaction_1_GetDevices(t *testing.T) {
@@ -79,7 +80,7 @@
 
 	addTx := TestTransaction_RootProxy.OpenTransaction()
 
-	if added := addTx.Add("/devices", device); added == nil {
+	if added := addTx.Add(context.Background(), "/devices", device); added == nil {
 		t.Error("Failed to add device")
 	} else {
 		TestTransaction_TargetDeviceId = added.(*voltha.Device).Id
@@ -93,12 +94,12 @@
 	basePath := "/devices/" + TestTransaction_DeviceId
 
 	getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
-	device1 := getDevWithPortsTx.Get(basePath+"/ports", 1, false)
+	device1 := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
 	t.Logf("retrieved device with ports: %+v", device1)
 	getDevWithPortsTx.Commit()
 
 	getDevTx := TestTransaction_RootProxy.OpenTransaction()
-	device2 := getDevTx.Get(basePath, 0, false)
+	device2 := getDevTx.Get(context.Background(), basePath, 0, false)
 	t.Logf("retrieved device: %+v", device2)
 
 	getDevTx.Commit()
@@ -106,7 +107,7 @@
 
 func TestTransaction_4_UpdateDevice(t *testing.T) {
 	updateTx := TestTransaction_RootProxy.OpenTransaction()
-	if retrieved := updateTx.Get("/devices/"+TestTransaction_TargetDeviceId, 1, false); retrieved == nil {
+	if retrieved := updateTx.Get(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, 1, false); retrieved == nil {
 		t.Error("Failed to get device")
 	} else {
 		var fwVersion int
@@ -122,7 +123,7 @@
 		t.Logf("Before update : %+v", retrieved)
 
 		// FIXME: The makeBranch passed in function is nil or not being executed properly!!!!!
-		if afterUpdate := updateTx.Update("/devices/"+TestTransaction_TargetDeviceId, retrieved, false); afterUpdate == nil {
+		if afterUpdate := updateTx.Update(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, retrieved, false); afterUpdate == nil {
 			t.Error("Failed to update device")
 		} else {
 			t.Logf("Updated device : %+v", afterUpdate)
@@ -136,12 +137,12 @@
 	basePath := "/devices/" + TestTransaction_DeviceId
 
 	getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
-	device1 := getDevWithPortsTx.Get(basePath+"/ports", 1, false)
+	device1 := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
 	t.Logf("retrieved device with ports: %+v", device1)
 	getDevWithPortsTx.Commit()
 
 	getDevTx := TestTransaction_RootProxy.OpenTransaction()
-	device2 := getDevTx.Get(basePath, 0, false)
+	device2 := getDevTx.Get(context.Background(), basePath, 0, false)
 	t.Logf("retrieved device: %+v", device2)
 
 	getDevTx.Commit()
@@ -149,7 +150,7 @@
 
 func TestTransaction_6_RemoveDevice(t *testing.T) {
 	removeTx := TestTransaction_RootProxy.OpenTransaction()
-	if removed := removeTx.Remove("/devices/" + TestTransaction_DeviceId); removed == nil {
+	if removed := removeTx.Remove(context.Background(), "/devices/"+TestTransaction_DeviceId); removed == nil {
 		t.Error("Failed to remove device")
 	} else {
 		t.Logf("Removed device : %+v", removed)
@@ -162,7 +163,7 @@
 	basePath := "/devices/" + TestTransaction_DeviceId
 
 	getDevTx := TestTransaction_RootProxy.OpenTransaction()
-	device := TestTransaction_RootProxy.Get(basePath, 0, false, "")
+	device := TestTransaction_RootProxy.Get(context.Background(), basePath, 0, false, "")
 	t.Logf("retrieved device: %+v", device)
 
 	getDevTx.Commit()
diff --git a/ro_core/core/core.go b/ro_core/core/core.go
index 90aadae..54cd455 100644
--- a/ro_core/core/core.go
+++ b/ro_core/core/core.go
@@ -66,8 +66,8 @@
 		PathPrefix: "service/voltha"}
 	core.clusterDataRoot = model.NewRoot(&voltha.Voltha{}, &backend)
 	core.localDataRoot = model.NewRoot(&voltha.CoreInstance{}, &backend)
-	core.clusterDataProxy = core.clusterDataRoot.CreateProxy("/", false)
-	core.localDataProxy = core.localDataRoot.CreateProxy("/", false)
+	core.clusterDataProxy = core.clusterDataRoot.CreateProxy(context.Background(), "/", false)
+	core.localDataProxy = core.localDataRoot.CreateProxy(context.Background(), "/", false)
 	return &core
 }
 
diff --git a/ro_core/core/device_agent.go b/ro_core/core/device_agent.go
index dfaf767..a64931c 100644
--- a/ro_core/core/device_agent.go
+++ b/ro_core/core/device_agent.go
@@ -56,7 +56,7 @@
 	defer agent.lockDevice.Unlock()
 	log.Debugw("starting-device-agent", log.Fields{"device": agent.lastData})
 	if loadFromDb {
-		if device := agent.clusterDataProxy.Get("/devices/"+agent.deviceId, 0, false, ""); device != nil {
+		if device := agent.clusterDataProxy.Get(ctx, "/devices/"+agent.deviceId, 0, false, ""); device != nil {
 			if d, ok := device.(*voltha.Device); ok {
 				agent.lastData = proto.Clone(d).(*voltha.Device)
 			}
@@ -83,7 +83,7 @@
 func (agent *DeviceAgent) getDevice() (*voltha.Device, error) {
 	agent.lockDevice.Lock()
 	defer agent.lockDevice.Unlock()
-	if device := agent.clusterDataProxy.Get("/devices/"+agent.deviceId, 0, false, ""); device != nil {
+	if device := agent.clusterDataProxy.Get(context.Background(), "/devices/"+agent.deviceId, 0, false, ""); device != nil {
 		if d, ok := device.(*voltha.Device); ok {
 			cloned := proto.Clone(d).(*voltha.Device)
 			return cloned, nil
@@ -95,7 +95,7 @@
 // getDeviceWithoutLock is a helper function to be used ONLY by any device agent function AFTER it has acquired the device lock.
 // This function is meant so that we do not have duplicate code all over the device agent functions
 func (agent *DeviceAgent) getDeviceWithoutLock() (*voltha.Device, error) {
-	if device := agent.clusterDataProxy.Get("/devices/"+agent.deviceId, 0, false, ""); device != nil {
+	if device := agent.clusterDataProxy.Get(context.Background(), "/devices/"+agent.deviceId, 0, false, ""); device != nil {
 		if d, ok := device.(*voltha.Device); ok {
 			cloned := proto.Clone(d).(*voltha.Device)
 			return cloned, nil
diff --git a/ro_core/core/device_manager.go b/ro_core/core/device_manager.go
index f3a1f6c..90c7822 100644
--- a/ro_core/core/device_manager.go
+++ b/ro_core/core/device_manager.go
@@ -26,21 +26,18 @@
 )
 
 type DeviceManager struct {
-	deviceAgents        map[string]*DeviceAgent
-	logicalDeviceMgr    *LogicalDeviceManager
-	clusterDataProxy    *model.Proxy
-	coreInstanceId      string
-	exitChannel         chan int
-	lockDeviceAgentsMap sync.RWMutex
+	deviceAgents     sync.Map
+	logicalDeviceMgr *LogicalDeviceManager
+	clusterDataProxy *model.Proxy
+	coreInstanceId   string
+	exitChannel      chan int
 }
 
 func newDeviceManager(cdProxy *model.Proxy, coreInstanceId string) *DeviceManager {
 	var deviceMgr DeviceManager
 	deviceMgr.exitChannel = make(chan int, 1)
-	deviceMgr.deviceAgents = make(map[string]*DeviceAgent)
 	deviceMgr.coreInstanceId = coreInstanceId
 	deviceMgr.clusterDataProxy = cdProxy
-	deviceMgr.lockDeviceAgentsMap = sync.RWMutex{}
 	return &deviceMgr
 }
 
@@ -69,32 +66,23 @@
 }
 
 func (dMgr *DeviceManager) addDeviceAgentToMap(agent *DeviceAgent) {
-	dMgr.lockDeviceAgentsMap.Lock()
-	defer dMgr.lockDeviceAgentsMap.Unlock()
-	if _, exist := dMgr.deviceAgents[agent.deviceId]; !exist {
-		dMgr.deviceAgents[agent.deviceId] = agent
+	if _, exist := dMgr.deviceAgents.Load(agent.deviceId); !exist {
+		dMgr.deviceAgents.Store(agent.deviceId, agent)
 	}
 }
 
 func (dMgr *DeviceManager) deleteDeviceAgentToMap(agent *DeviceAgent) {
-	dMgr.lockDeviceAgentsMap.Lock()
-	defer dMgr.lockDeviceAgentsMap.Unlock()
-	delete(dMgr.deviceAgents, agent.deviceId)
+	dMgr.deviceAgents.Delete(agent.deviceId)
 }
 
 func (dMgr *DeviceManager) getDeviceAgent(deviceId string) *DeviceAgent {
-	dMgr.lockDeviceAgentsMap.Lock()
-	if agent, ok := dMgr.deviceAgents[deviceId]; ok {
-		dMgr.lockDeviceAgentsMap.Unlock()
-		return agent
+	if agent, ok := dMgr.deviceAgents.Load(deviceId); ok {
+		return agent.(*DeviceAgent)
 	} else {
 		//	Try to load into memory - loading will also create the device agent
-		dMgr.lockDeviceAgentsMap.Unlock()
 		if err := dMgr.load(deviceId); err == nil {
-			dMgr.lockDeviceAgentsMap.Lock()
-			defer dMgr.lockDeviceAgentsMap.Unlock()
-			if agent, ok = dMgr.deviceAgents[deviceId]; ok {
-				return agent
+			if agent, ok = dMgr.deviceAgents.Load(deviceId); ok {
+				return agent.(*DeviceAgent)
 			}
 		}
 	}
@@ -103,12 +91,11 @@
 
 // listDeviceIdsFromMap returns the list of device IDs that are in memory
 func (dMgr *DeviceManager) listDeviceIdsFromMap() *voltha.IDs {
-	dMgr.lockDeviceAgentsMap.Lock()
-	defer dMgr.lockDeviceAgentsMap.Unlock()
 	result := &voltha.IDs{Items: make([]*voltha.ID, 0)}
-	for key, _ := range dMgr.deviceAgents {
-		result.Items = append(result.Items, &voltha.ID{Id: key})
-	}
+	dMgr.deviceAgents.Range(func(key, value interface{}) bool {
+		result.Items = append(result.Items, &voltha.ID{Id: key.(string)})
+		return true
+	})
 	return result
 }
 
@@ -122,9 +109,7 @@
 }
 
 func (dMgr *DeviceManager) IsDeviceInCache(id string) bool {
-	dMgr.lockDeviceAgentsMap.Lock()
-	defer dMgr.lockDeviceAgentsMap.Unlock()
-	_, exist := dMgr.deviceAgents[id]
+	_, exist := dMgr.deviceAgents.Load(id)
 	return exist
 }
 
@@ -140,7 +125,7 @@
 func (dMgr *DeviceManager) ListDevices() (*voltha.Devices, error) {
 	log.Debug("ListDevices")
 	result := &voltha.Devices{}
-	if devices := dMgr.clusterDataProxy.List("/devices", 0, false, ""); devices != nil {
+	if devices := dMgr.clusterDataProxy.List(context.Background(), "/devices", 0, false, ""); devices != nil {
 		for _, device := range devices.([]interface{}) {
 			// If device is not in memory then set it up
 			if !dMgr.IsDeviceInCache(device.(*voltha.Device).Id) {
diff --git a/ro_core/core/logical_device_agent.go b/ro_core/core/logical_device_agent.go
index 1357bd4..d1c8887 100644
--- a/ro_core/core/logical_device_agent.go
+++ b/ro_core/core/logical_device_agent.go
@@ -57,7 +57,7 @@
 	if loadFromDb {
 		//	load from dB - the logical may not exist at this time.  On error, just return and the calling function
 		// will destroy this agent.
-		if logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, ""); logicalDevice != nil {
+		if logicalDevice := agent.clusterDataProxy.Get(ctx, "/logical_devices/"+agent.logicalDeviceId, 0, false, ""); logicalDevice != nil {
 			if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 				agent.lastData = proto.Clone(lDevice).(*voltha.LogicalDevice)
 			}
@@ -85,8 +85,7 @@
 	log.Debug("GetLogicalDevice")
 	agent.lockLogicalDevice.Lock()
 	defer agent.lockLogicalDevice.Unlock()
-	if logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false,
-		""); logicalDevice != nil {
+	if logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, ""); logicalDevice != nil {
 		if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 			return lDevice, nil
 		}
diff --git a/ro_core/core/logical_device_manager.go b/ro_core/core/logical_device_manager.go
index 05b494a..db220d5 100644
--- a/ro_core/core/logical_device_manager.go
+++ b/ro_core/core/logical_device_manager.go
@@ -26,7 +26,7 @@
 )
 
 type LogicalDeviceManager struct {
-	logicalDeviceAgents        map[string]*LogicalDeviceAgent
+	logicalDeviceAgents        sync.Map
 	deviceMgr                  *DeviceManager
 	grpcNbiHdlr                *APIHandler
 	clusterDataProxy           *model.Proxy
@@ -37,7 +37,6 @@
 func newLogicalDeviceManager(deviceMgr *DeviceManager, cdProxy *model.Proxy) *LogicalDeviceManager {
 	var logicalDeviceMgr LogicalDeviceManager
 	logicalDeviceMgr.exitChannel = make(chan int, 1)
-	logicalDeviceMgr.logicalDeviceAgents = make(map[string]*LogicalDeviceAgent)
 	logicalDeviceMgr.deviceMgr = deviceMgr
 	logicalDeviceMgr.clusterDataProxy = cdProxy
 	logicalDeviceMgr.lockLogicalDeviceAgentsMap = sync.RWMutex{}
@@ -60,26 +59,21 @@
 }
 
 func (ldMgr *LogicalDeviceManager) addLogicalDeviceAgentToMap(agent *LogicalDeviceAgent) {
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	if _, exist := ldMgr.logicalDeviceAgents[agent.logicalDeviceId]; !exist {
-		ldMgr.logicalDeviceAgents[agent.logicalDeviceId] = agent
+	if _, exist := ldMgr.logicalDeviceAgents.Load(agent.logicalDeviceId); !exist {
+		ldMgr.logicalDeviceAgents.Store(agent.logicalDeviceId, agent)
 	}
 }
 
 func (ldMgr *LogicalDeviceManager) getLogicalDeviceAgent(logicalDeviceId string) *LogicalDeviceAgent {
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	if agent, ok := ldMgr.logicalDeviceAgents[logicalDeviceId]; ok {
-		return agent
+	if agent, ok := ldMgr.logicalDeviceAgents.Load(logicalDeviceId); ok {
+		//return agent
+		return agent.(*LogicalDeviceAgent)
 	}
 	return nil
 }
 
 func (ldMgr *LogicalDeviceManager) deleteLogicalDeviceAgent(logicalDeviceId string) {
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	delete(ldMgr.logicalDeviceAgents, logicalDeviceId)
+	ldMgr.logicalDeviceAgents.Delete(logicalDeviceId)
 }
 
 // GetLogicalDevice provides a cloned most up to date logical device
@@ -94,14 +88,15 @@
 func (ldMgr *LogicalDeviceManager) IsLogicalDeviceInCache(id string) bool {
 	ldMgr.lockLogicalDeviceAgentsMap.Lock()
 	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	_, exist := ldMgr.logicalDeviceAgents[id]
+	_, exist := ldMgr.logicalDeviceAgents.Load(id)
 	return exist
 }
 
 func (ldMgr *LogicalDeviceManager) listLogicalDevices() (*voltha.LogicalDevices, error) {
 	log.Debug("ListAllLogicalDevices")
 	result := &voltha.LogicalDevices{}
-	if logicalDevices := ldMgr.clusterDataProxy.List("/logical_devices", 0, false, ""); logicalDevices != nil {
+	if logicalDevices := ldMgr.clusterDataProxy.List(context.Background(), "/logical_devices", 0, false,
+		""); logicalDevices != nil {
 		for _, logicalDevice := range logicalDevices.([]interface{}) {
 			// If device is not in memory then set it up
 			if !ldMgr.IsLogicalDeviceInCache(logicalDevice.(*voltha.LogicalDevice).Id) {
@@ -130,16 +125,14 @@
 	log.Debugw("loading-logical-device", log.Fields{"lDeviceId": lDeviceId})
 	// To prevent a race condition, let's hold the logical device agent map lock.  This will prevent a loading and
 	// a create logical device callback from occurring at the same time.
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	if ldAgent, _ := ldMgr.logicalDeviceAgents[lDeviceId]; ldAgent == nil {
+	if ldAgent, _ := ldMgr.logicalDeviceAgents.Load(lDeviceId); ldAgent == nil {
 		// Logical device not in memory - create a temp logical device Agent and let it load from memory
 		agent := newLogicalDeviceAgent(lDeviceId, "", ldMgr, ldMgr.deviceMgr, ldMgr.clusterDataProxy)
 		if err := agent.start(nil, true); err != nil {
 			agent.stop(nil)
 			return err
 		}
-		ldMgr.logicalDeviceAgents[agent.logicalDeviceId] = agent
+		ldMgr.logicalDeviceAgents.Store(agent.logicalDeviceId, agent)
 	}
 	// TODO: load the child device
 	return nil
diff --git a/ro_core/core/model_proxy.go b/ro_core/core/model_proxy.go
index f5e6c3b..473e579 100644
--- a/ro_core/core/model_proxy.go
+++ b/ro_core/core/model_proxy.go
@@ -16,6 +16,7 @@
 package core
 
 import (
+	"context"
 	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/db/model"
 	"google.golang.org/grpc/codes"
@@ -55,7 +56,7 @@
 
 	log.Debugw("get-data", log.Fields{"path": path})
 
-	if data := mp.rootProxy.Get(path, 1, false, ""); data != nil {
+	if data := mp.rootProxy.Get(context.Background(), path, 1, false, ""); data != nil {
 		return data, nil
 	}
 	return nil, status.Errorf(codes.NotFound, "data-path: %s", path)
diff --git a/rw_core/core/adapter_manager.go b/rw_core/core/adapter_manager.go
index 5d539aa..ac856df 100644
--- a/rw_core/core/adapter_manager.go
+++ b/rw_core/core/adapter_manager.go
@@ -119,8 +119,8 @@
 	aMgr.loadAdaptersAndDevicetypesInMemory()
 
 	//// Create the proxies
-	aMgr.adapterProxy = aMgr.clusterDataProxy.CreateProxy("/adapters", false)
-	aMgr.deviceTypeProxy = aMgr.clusterDataProxy.CreateProxy("/device_types", false)
+	aMgr.adapterProxy = aMgr.clusterDataProxy.CreateProxy(context.Background(), "/adapters", false)
+	aMgr.deviceTypeProxy = aMgr.clusterDataProxy.CreateProxy(context.Background(), "/device_types", false)
 
 	// Register the callbacks
 	aMgr.adapterProxy.RegisterCallback(model.POST_UPDATE, aMgr.adapterUpdated)
@@ -138,7 +138,7 @@
 //loadAdaptersAndDevicetypesInMemory loads the existing set of adapters and device types in memory
 func (aMgr *AdapterManager) loadAdaptersAndDevicetypesInMemory() {
 	// Load the adapters
-	if adaptersIf := aMgr.clusterDataProxy.List("/adapters", 0, false, ""); adaptersIf != nil {
+	if adaptersIf := aMgr.clusterDataProxy.List(context.Background(), "/adapters", 0, false, ""); adaptersIf != nil {
 		for _, adapterIf := range adaptersIf.([]interface{}) {
 			if adapter, ok := adapterIf.(*voltha.Adapter); ok {
 				log.Debugw("found-existing-adapter", log.Fields{"adapterId": adapter.Id})
@@ -152,7 +152,7 @@
 	}
 
 	// Load the device types
-	if deviceTypesIf := aMgr.clusterDataProxy.List("/device_types", 0, false, ""); deviceTypesIf != nil {
+	if deviceTypesIf := aMgr.clusterDataProxy.List(context.Background(), "/device_types", 0, false, ""); deviceTypesIf != nil {
 		dTypes := &voltha.DeviceTypes{Items: []*voltha.DeviceType{}}
 		for _, deviceTypeIf := range deviceTypesIf.([]interface{}) {
 			if dType, ok := deviceTypeIf.(*voltha.DeviceType); ok {
@@ -171,7 +171,7 @@
 //updateAdaptersAndDevicetypesInMemory loads the existing set of adapters and device types in memory
 func (aMgr *AdapterManager) updateAdaptersAndDevicetypesInMemory() {
 	// Update the adapters
-	if adaptersIf := aMgr.clusterDataProxy.List("/adapters", 0, false, ""); adaptersIf != nil {
+	if adaptersIf := aMgr.clusterDataProxy.List(context.Background(), "/adapters", 0, false, ""); adaptersIf != nil {
 		for _, adapterIf := range adaptersIf.([]interface{}) {
 			if adapter, ok := adapterIf.(*voltha.Adapter); ok {
 				log.Debugw("found-existing-adapter", log.Fields{"adapterId": adapter.Id})
@@ -180,7 +180,7 @@
 		}
 	}
 	// Update the device types
-	if deviceTypesIf := aMgr.clusterDataProxy.List("/device_types", 0, false, ""); deviceTypesIf != nil {
+	if deviceTypesIf := aMgr.clusterDataProxy.List(context.Background(), "/device_types", 0, false, ""); deviceTypesIf != nil {
 		dTypes := &voltha.DeviceTypes{Items: []*voltha.DeviceType{}}
 		for _, deviceTypeIf := range deviceTypesIf.([]interface{}) {
 			if dType, ok := deviceTypeIf.(*voltha.DeviceType); ok {
@@ -200,8 +200,8 @@
 		aMgr.adapterAgents[adapter.Id] = newAdapterAgent(clonedAdapter, nil)
 		if saveToDb {
 			// Save the adapter to the KV store - first check if it already exist
-			if kvAdapter := aMgr.clusterDataProxy.Get("/adapters/"+adapter.Id, 0, false, ""); kvAdapter == nil {
-				if added := aMgr.clusterDataProxy.AddWithID("/adapters", adapter.Id, clonedAdapter, ""); added == nil {
+			if kvAdapter := aMgr.clusterDataProxy.Get(context.Background(), "/adapters/"+adapter.Id, 0, false, ""); kvAdapter == nil {
+				if added := aMgr.clusterDataProxy.AddWithID(context.Background(), "/adapters", adapter.Id, clonedAdapter, ""); added == nil {
 					//TODO:  Errors when saving to KV would require a separate go routine to be launched and try the saving again
 					log.Errorw("failed-to-save-adapter", log.Fields{"adapter": adapter})
 				} else {
@@ -234,10 +234,10 @@
 	if saveToDb {
 		// Save the device types to the KV store as well
 		for _, deviceType := range deviceTypes.Items {
-			if dType := aMgr.clusterDataProxy.Get("/device_types/"+deviceType.Id, 0, false, ""); dType == nil {
+			if dType := aMgr.clusterDataProxy.Get(context.Background(), "/device_types/"+deviceType.Id, 0, false, ""); dType == nil {
 				//	Does not exist - save it
 				clonedDType := (proto.Clone(deviceType)).(*voltha.DeviceType)
-				if added := aMgr.clusterDataProxy.AddWithID("/device_types", deviceType.Id, clonedDType, ""); added == nil {
+				if added := aMgr.clusterDataProxy.AddWithID(context.Background(), "/device_types", deviceType.Id, clonedDType, ""); added == nil {
 					log.Errorw("failed-to-save-deviceType", log.Fields{"deviceType": deviceType})
 				} else {
 					log.Debugw("device-type-saved-to-KV-Store", log.Fields{"deviceType": deviceType})
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
index 938d8e9..224b3cb 100644
--- a/rw_core/core/core.go
+++ b/rw_core/core/core.go
@@ -49,7 +49,7 @@
 	kvClient          kvstore.Client
 	kafkaClient       kafka.Client
 	coreMembership    *voltha.Membership
-	membershipLock    *sync.RWMutex
+	membershipLock    sync.RWMutex
 	deviceOwnership   *DeviceOwnership
 }
 
@@ -78,9 +78,8 @@
 		PathPrefix: cf.KVStoreDataPrefix}
 	core.clusterDataRoot = model.NewRoot(&voltha.Voltha{}, &backend)
 	core.localDataRoot = model.NewRoot(&voltha.CoreInstance{}, &backend)
-	core.clusterDataProxy = core.clusterDataRoot.CreateProxy("/", false)
-	core.localDataProxy = core.localDataRoot.CreateProxy("/", false)
-	core.membershipLock = &sync.RWMutex{}
+	core.clusterDataProxy = core.clusterDataRoot.CreateProxy(context.Background(), "/", false)
+	core.localDataProxy = core.localDataRoot.CreateProxy(context.Background(), "/", false)
 	return &core
 }
 
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
index 0ce1cce..0198254 100755
--- a/rw_core/core/device_agent.go
+++ b/rw_core/core/device_agent.go
@@ -29,6 +29,7 @@
 	"google.golang.org/grpc/status"
 	"reflect"
 	"sync"
+	"time"
 )
 
 type DeviceAgent struct {
@@ -85,7 +86,7 @@
 	defer agent.lockDevice.Unlock()
 	log.Debugw("starting-device-agent", log.Fields{"deviceId": agent.deviceId})
 	if loadFromdB {
-		if device := agent.clusterDataProxy.Get("/devices/"+agent.deviceId, 1, false, ""); device != nil {
+		if device := agent.clusterDataProxy.Get(ctx, "/devices/"+agent.deviceId, 1, false, ""); device != nil {
 			if d, ok := device.(*voltha.Device); ok {
 				agent.lastData = proto.Clone(d).(*voltha.Device)
 				agent.deviceType = agent.lastData.Adapter
@@ -97,12 +98,12 @@
 		log.Debugw("device-loaded-from-dB", log.Fields{"device": agent.lastData})
 	} else {
 		// Add the initial device to the local model
-		if added := agent.clusterDataProxy.AddWithID("/devices", agent.deviceId, agent.lastData, ""); added == nil {
+		if added := agent.clusterDataProxy.AddWithID(ctx, "/devices", agent.deviceId, agent.lastData, ""); added == nil {
 			log.Errorw("failed-to-add-device", log.Fields{"deviceId": agent.deviceId})
 		}
 	}
 
-	agent.deviceProxy = agent.clusterDataProxy.CreateProxy("/devices/"+agent.deviceId, false)
+	agent.deviceProxy = agent.clusterDataProxy.CreateProxy(ctx, "/devices/"+agent.deviceId, false)
 	agent.deviceProxy.RegisterCallback(model.POST_UPDATE, agent.processUpdate)
 
 	log.Debug("device-agent-started")
@@ -115,7 +116,7 @@
 	defer agent.lockDevice.Unlock()
 	log.Debug("stopping-device-agent")
 	//	Remove the device from the KV store
-	if removed := agent.clusterDataProxy.Remove("/devices/"+agent.deviceId, ""); removed == nil {
+	if removed := agent.clusterDataProxy.Remove(ctx, "/devices/"+agent.deviceId, ""); removed == nil {
 		log.Debugw("device-already-removed", log.Fields{"id": agent.deviceId})
 	}
 	agent.exitChannel <- 1
@@ -127,7 +128,7 @@
 func (agent *DeviceAgent) getDevice() (*voltha.Device, error) {
 	agent.lockDevice.RLock()
 	defer agent.lockDevice.RUnlock()
-	if device := agent.clusterDataProxy.Get("/devices/"+agent.deviceId, 0, false, ""); device != nil {
+	if device := agent.clusterDataProxy.Get(context.Background(), "/devices/"+agent.deviceId, 0, true, ""); device != nil {
 		if d, ok := device.(*voltha.Device); ok {
 			cloned := proto.Clone(d).(*voltha.Device)
 			return cloned, nil
@@ -139,7 +140,7 @@
 // getDeviceWithoutLock is a helper function to be used ONLY by any device agent function AFTER it has acquired the device lock.
 // This function is meant so that we do not have duplicate code all over the device agent functions
 func (agent *DeviceAgent) getDeviceWithoutLock() (*voltha.Device, error) {
-	if device := agent.clusterDataProxy.Get("/devices/"+agent.deviceId, 0, false, ""); device != nil {
+	if device := agent.clusterDataProxy.Get(context.Background(), "/devices/"+agent.deviceId, 0, false, ""); device != nil {
 		if d, ok := device.(*voltha.Device); ok {
 			cloned := proto.Clone(d).(*voltha.Device)
 			return cloned, nil
@@ -186,7 +187,9 @@
 		cloned := proto.Clone(device).(*voltha.Device)
 		cloned.AdminState = voltha.AdminState_ENABLED
 		cloned.OperStatus = voltha.OperStatus_ACTIVATING
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+
+		updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 
@@ -547,7 +550,8 @@
 		cloned := proto.Clone(device).(*voltha.Device)
 		cloned.AdminState = voltha.AdminState_DISABLED
 		cloned.OperStatus = voltha.OperStatus_UNKNOWN
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 
@@ -574,7 +578,8 @@
 		// Received an Ack (no error found above).  Now update the device in the model to the expected state
 		cloned := proto.Clone(device).(*voltha.Device)
 		cloned.AdminState = adminState
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 	}
@@ -626,7 +631,8 @@
 		//	the device as well as its association with the logical device
 		cloned := proto.Clone(device).(*voltha.Device)
 		cloned.AdminState = voltha.AdminState_DELETED
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 
@@ -660,7 +666,8 @@
 			cloned.ImageDownloads = append(cloned.ImageDownloads, clonedImg)
 		}
 		cloned.AdminState = voltha.AdminState_DOWNLOADING_IMAGE
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return nil, status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 		// Send the request to the adapter
@@ -706,7 +713,8 @@
 		if device.AdminState == voltha.AdminState_DOWNLOADING_IMAGE {
 			// Set the device to Enabled
 			cloned.AdminState = voltha.AdminState_ENABLED
-			if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+			updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+			if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 				return nil, status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 			}
 			// Send the request to teh adapter
@@ -744,7 +752,8 @@
 		}
 		// Set the device to downloading_image
 		cloned.AdminState = voltha.AdminState_DOWNLOADING_IMAGE
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return nil, status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 
@@ -781,7 +790,8 @@
 				image.ImageState = voltha.ImageDownload_IMAGE_REVERTING
 			}
 		}
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(ctx, model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return nil, status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 
@@ -836,7 +846,8 @@
 			cloned.AdminState = voltha.AdminState_ENABLED
 		}
 
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
 		}
 	}
@@ -965,7 +976,8 @@
 	defer agent.lockDevice.Unlock()
 	log.Debugw("updateDevice", log.Fields{"deviceId": device.Id})
 	cloned := proto.Clone(device).(*voltha.Device)
-	afterUpdate := agent.clusterDataProxy.Update("/devices/"+device.Id, cloned, false, "")
+	updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+	afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+device.Id, cloned, false, "")
 	if afterUpdate == nil {
 		return status.Errorf(codes.Internal, "%s", device.Id)
 	}
@@ -975,7 +987,8 @@
 func (agent *DeviceAgent) updateDeviceWithoutLock(device *voltha.Device) error {
 	log.Debugw("updateDevice", log.Fields{"deviceId": device.Id})
 	cloned := proto.Clone(device).(*voltha.Device)
-	afterUpdate := agent.clusterDataProxy.Update("/devices/"+device.Id, cloned, false, "")
+	updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+	afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+device.Id, cloned, false, "")
 	if afterUpdate == nil {
 		return status.Errorf(codes.Internal, "%s", device.Id)
 	}
@@ -1002,7 +1015,8 @@
 		}
 		log.Debugw("updateDeviceStatus", log.Fields{"deviceId": cloned.Id, "operStatus": cloned.OperStatus, "connectStatus": cloned.ConnectStatus})
 		// Store the device
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
 		return nil
@@ -1022,7 +1036,8 @@
 			port.OperStatus = voltha.OperStatus_ACTIVE
 		}
 		// Store the device
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
 		return nil
@@ -1043,7 +1058,8 @@
 			port.OperStatus = voltha.OperStatus_UNKNOWN
 		}
 		// Store the device
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
 		return nil
@@ -1077,7 +1093,8 @@
 		}
 		log.Debugw("portStatusUpdate", log.Fields{"deviceId": cloned.Id})
 		// Store the device
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
 		return nil
@@ -1106,7 +1123,8 @@
 		cloned.Ports = []*voltha.Port{}
 		log.Debugw("portStatusUpdate", log.Fields{"deviceId": cloned.Id})
 		// Store the device
-		if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
 		return nil
@@ -1125,7 +1143,8 @@
 		cloned := proto.Clone(storeDevice).(*voltha.Device)
 		cloned.PmConfigs = proto.Clone(pmConfigs).(*voltha.PmConfigs)
 		// Store the device
-		afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, "")
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
@@ -1163,7 +1182,8 @@
 		}
 		cloned.Ports = append(cloned.Ports, cp)
 		// Store the device
-		afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, "")
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
@@ -1191,7 +1211,8 @@
 			}
 		}
 		// Store the device
-		afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, "")
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
@@ -1221,7 +1242,8 @@
 		}
 
 		// Store the device with updated peer ports
-		afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, "")
+		updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+		afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, "")
 		if afterUpdate == nil {
 			return status.Errorf(codes.Internal, "%s", agent.deviceId)
 		}
@@ -1263,7 +1285,8 @@
 	log.Debugw("update-field-status", log.Fields{"deviceId": storeDevice.Id, "name": name, "updated": updated})
 	//	Save the data
 	cloned := proto.Clone(storeDevice).(*voltha.Device)
-	if afterUpdate := agent.clusterDataProxy.Update("/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
+	updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+	if afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/devices/"+agent.deviceId, cloned, false, ""); afterUpdate == nil {
 		log.Warnw("attribute-update-failed", log.Fields{"attribute": name, "value": value})
 	}
 	return
diff --git a/rw_core/core/device_manager.go b/rw_core/core/device_manager.go
index ad4f362..257b707 100755
--- a/rw_core/core/device_manager.go
+++ b/rw_core/core/device_manager.go
@@ -33,34 +33,31 @@
 )
 
 type DeviceManager struct {
-	deviceAgents        map[string]*DeviceAgent
-	rootDevices         map[string]bool
-	lockRootDeviceMap   sync.RWMutex
-	core                *Core
-	adapterProxy        *AdapterProxy
-	adapterMgr          *AdapterManager
-	logicalDeviceMgr    *LogicalDeviceManager
-	kafkaICProxy        *kafka.InterContainerProxy
-	stateTransitions    *TransitionMap
-	clusterDataProxy    *model.Proxy
-	coreInstanceId      string
-	exitChannel         chan int
-	defaultTimeout      int64
-	lockDeviceAgentsMap sync.RWMutex
+	deviceAgents      sync.Map
+	rootDevices       map[string]bool
+	lockRootDeviceMap sync.RWMutex
+	core              *Core
+	adapterProxy      *AdapterProxy
+	adapterMgr        *AdapterManager
+	logicalDeviceMgr  *LogicalDeviceManager
+	kafkaICProxy      *kafka.InterContainerProxy
+	stateTransitions  *TransitionMap
+	clusterDataProxy  *model.Proxy
+	coreInstanceId    string
+	exitChannel       chan int
+	defaultTimeout    int64
 }
 
 func newDeviceManager(core *Core) *DeviceManager {
 	var deviceMgr DeviceManager
 	deviceMgr.core = core
 	deviceMgr.exitChannel = make(chan int, 1)
-	deviceMgr.deviceAgents = make(map[string]*DeviceAgent)
 	deviceMgr.rootDevices = make(map[string]bool)
 	deviceMgr.kafkaICProxy = core.kmp
 	deviceMgr.adapterProxy = NewAdapterProxy(core.kmp)
 	deviceMgr.coreInstanceId = core.instanceId
 	deviceMgr.clusterDataProxy = core.clusterDataProxy
 	deviceMgr.adapterMgr = core.adapterMgr
-	deviceMgr.lockDeviceAgentsMap = sync.RWMutex{}
 	deviceMgr.lockRootDeviceMap = sync.RWMutex{}
 	deviceMgr.defaultTimeout = core.config.DefaultCoreTimeout
 	return &deviceMgr
@@ -92,12 +89,9 @@
 }
 
 func (dMgr *DeviceManager) addDeviceAgentToMap(agent *DeviceAgent) {
-	dMgr.lockDeviceAgentsMap.Lock()
-	//defer dMgr.lockDeviceAgentsMap.Unlock()
-	if _, exist := dMgr.deviceAgents[agent.deviceId]; !exist {
-		dMgr.deviceAgents[agent.deviceId] = agent
+	if _, exist := dMgr.deviceAgents.Load(agent.deviceId); !exist {
+		dMgr.deviceAgents.Store(agent.deviceId, agent)
 	}
-	dMgr.lockDeviceAgentsMap.Unlock()
 	dMgr.lockRootDeviceMap.Lock()
 	defer dMgr.lockRootDeviceMap.Unlock()
 	dMgr.rootDevices[agent.deviceId] = agent.isRootdevice
@@ -105,34 +99,25 @@
 }
 
 func (dMgr *DeviceManager) deleteDeviceAgentToMap(agent *DeviceAgent) {
-	dMgr.lockDeviceAgentsMap.Lock()
-	//defer dMgr.lockDeviceAgentsMap.Unlock()
-	delete(dMgr.deviceAgents, agent.deviceId)
-	dMgr.lockDeviceAgentsMap.Unlock()
+	dMgr.deviceAgents.Delete(agent.deviceId)
 	dMgr.lockRootDeviceMap.Lock()
 	defer dMgr.lockRootDeviceMap.Unlock()
 	delete(dMgr.rootDevices, agent.deviceId)
-
 }
 
 // getDeviceAgent returns the agent managing the device.  If the device is not in memory, it will loads it, if it exists
 func (dMgr *DeviceManager) getDeviceAgent(deviceId string) *DeviceAgent {
-	dMgr.lockDeviceAgentsMap.RLock()
-	if agent, ok := dMgr.deviceAgents[deviceId]; ok {
-		dMgr.lockDeviceAgentsMap.RUnlock()
-		return agent
+	if agent, ok := dMgr.deviceAgents.Load(deviceId); ok {
+		return agent.(*DeviceAgent)
 	} else {
 		//	Try to load into memory - loading will also create the device agent and set the device ownership
-		dMgr.lockDeviceAgentsMap.RUnlock()
 		if err := dMgr.load(deviceId); err == nil {
-			dMgr.lockDeviceAgentsMap.RLock()
-			defer dMgr.lockDeviceAgentsMap.RUnlock()
-			if agent, ok = dMgr.deviceAgents[deviceId]; !ok {
+			if agent, ok = dMgr.deviceAgents.Load(deviceId); !ok {
 				return nil
 			} else {
 				// Register this device for ownership tracking
 				go dMgr.core.deviceOwnership.OwnedByMe(&utils.DeviceID{Id: deviceId})
-				return agent
+				return agent.(*DeviceAgent)
 			}
 		} else {
 			//TODO: Change the return params to return an error as well
@@ -144,12 +129,13 @@
 
 // listDeviceIdsFromMap returns the list of device IDs that are in memory
 func (dMgr *DeviceManager) listDeviceIdsFromMap() *voltha.IDs {
-	dMgr.lockDeviceAgentsMap.RLock()
-	defer dMgr.lockDeviceAgentsMap.RUnlock()
 	result := &voltha.IDs{Items: make([]*voltha.ID, 0)}
-	for key := range dMgr.deviceAgents {
-		result.Items = append(result.Items, &voltha.ID{Id: key})
-	}
+
+	dMgr.deviceAgents.Range(func(key, value interface{}) bool {
+		result.Items = append(result.Items, &voltha.ID{Id: key.(string)})
+		return true
+	})
+
 	return result
 }
 
@@ -355,9 +341,7 @@
 }
 
 func (dMgr *DeviceManager) IsDeviceInCache(id string) bool {
-	dMgr.lockDeviceAgentsMap.RLock()
-	defer dMgr.lockDeviceAgentsMap.RUnlock()
-	_, exist := dMgr.deviceAgents[id]
+	_, exist := dMgr.deviceAgents.Load(id)
 	return exist
 }
 
@@ -374,7 +358,7 @@
 func (dMgr *DeviceManager) ListDevices() (*voltha.Devices, error) {
 	log.Debug("ListDevices")
 	result := &voltha.Devices{}
-	if devices := dMgr.clusterDataProxy.List("/devices", 0, false, ""); devices != nil {
+	if devices := dMgr.clusterDataProxy.List(context.Background(), "/devices", 0, false, ""); devices != nil {
 		for _, device := range devices.([]interface{}) {
 			// If device is not in memory then set it up
 			if !dMgr.IsDeviceInCache(device.(*voltha.Device).Id) {
@@ -396,7 +380,7 @@
 
 //getDeviceFromModelretrieves the device data from the model.
 func (dMgr *DeviceManager) getDeviceFromModel(deviceId string) (*voltha.Device, error) {
-	if device := dMgr.clusterDataProxy.Get("/devices/"+deviceId, 0, false, ""); device != nil {
+	if device := dMgr.clusterDataProxy.Get(context.Background(), "/devices/"+deviceId, 0, false, ""); device != nil {
 		if d, ok := device.(*voltha.Device); ok {
 			return d, nil
 		}
@@ -791,7 +775,7 @@
 		log.Debugw("no-op-transition", log.Fields{"deviceId": current.Id})
 		return nil
 	}
-	log.Debugw("handler-found", log.Fields{"num-handlers": len(handlers), "isParent": current.Root})
+	log.Debugw("handler-found", log.Fields{"num-handlers": len(handlers), "isParent": current.Root, "current-data": current})
 	for _, handler := range handlers {
 		log.Debugw("running-handler", log.Fields{"handler": funcName(handler)})
 		if err := handler(current); err != nil {
@@ -1024,13 +1008,13 @@
 func (dMgr *DeviceManager) getAllDeviceIdsWithDeviceParentId(id string) []string {
 	log.Debugw("getAllAgentsWithDeviceParentId", log.Fields{"parentDeviceId": id})
 	deviceIds := make([]string, 0)
-	dMgr.lockDeviceAgentsMap.RLock()
-	defer dMgr.lockDeviceAgentsMap.RUnlock()
-	for deviceId, agent := range dMgr.deviceAgents {
+	dMgr.deviceAgents.Range(func(key, value interface{}) bool {
+		agent := value.(*DeviceAgent)
 		if agent.parentId == id {
-			deviceIds = append(deviceIds, deviceId)
+			deviceIds = append(deviceIds, key.(string))
 		}
-	}
+		return true
+	})
 	return deviceIds
 }
 
@@ -1218,7 +1202,12 @@
 }
 
 func (dMgr *DeviceManager) NotifyInvalidTransition(pcDevice *voltha.Device) error {
-	log.Errorw("NotifyInvalidTransition", log.Fields{"device": pcDevice.Id, "adminState": pcDevice.AdminState})
+	log.Errorw("NotifyInvalidTransition", log.Fields{
+		"device":     pcDevice.Id,
+		"adminState": pcDevice.AdminState,
+		"operState":  pcDevice.OperStatus,
+		"connState":  pcDevice.ConnectStatus,
+	})
 	//TODO: notify over kafka?
 	return nil
 }
@@ -1230,8 +1219,8 @@
 }
 
 func (dMgr *DeviceManager) UpdateDeviceAttribute(deviceId string, attribute string, value interface{}) {
-	if agent, ok := dMgr.deviceAgents[deviceId]; ok {
-		agent.updateDeviceAttribute(attribute, value)
+	if agent, ok := dMgr.deviceAgents.Load(deviceId); ok {
+		agent.(*DeviceAgent).updateDeviceAttribute(attribute, value)
 	}
 }
 
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
index 53faec8..70349d8 100644
--- a/rw_core/core/logical_device_agent.go
+++ b/rw_core/core/logical_device_agent.go
@@ -32,6 +32,7 @@
 	"google.golang.org/grpc/status"
 	"reflect"
 	"sync"
+	"time"
 )
 
 type LogicalDeviceAgent struct {
@@ -103,7 +104,7 @@
 
 		agent.lockLogicalDevice.Lock()
 		// Save the logical device
-		if added := agent.clusterDataProxy.AddWithID("/logical_devices", ld.Id, ld, ""); added == nil {
+		if added := agent.clusterDataProxy.AddWithID(ctx, "/logical_devices", ld.Id, ld, ""); added == nil {
 			log.Errorw("failed-to-add-logical-device", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
 		} else {
 			log.Debugw("logicaldevice-created", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
@@ -135,12 +136,15 @@
 	defer agent.lockLogicalDevice.Unlock()
 
 	agent.flowProxy = agent.clusterDataProxy.CreateProxy(
+		ctx,
 		fmt.Sprintf("/logical_devices/%s/flows", agent.logicalDeviceId),
 		false)
 	agent.groupProxy = agent.clusterDataProxy.CreateProxy(
+		ctx,
 		fmt.Sprintf("/logical_devices/%s/flow_groups", agent.logicalDeviceId),
 		false)
 	agent.ldProxy = agent.clusterDataProxy.CreateProxy(
+		ctx,
 		fmt.Sprintf("/logical_devices/%s", agent.logicalDeviceId),
 		false)
 
@@ -162,7 +166,7 @@
 	defer agent.lockLogicalDevice.Unlock()
 
 	//Remove the logical device from the model
-	if removed := agent.clusterDataProxy.Remove("/logical_devices/"+agent.logicalDeviceId, ""); removed == nil {
+	if removed := agent.clusterDataProxy.Remove(ctx, "/logical_devices/"+agent.logicalDeviceId, ""); removed == nil {
 		log.Errorw("failed-to-remove-logical-device", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
 	} else {
 		log.Debugw("logicaldevice-removed", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
@@ -176,7 +180,7 @@
 	log.Debug("GetLogicalDevice")
 	agent.lockLogicalDevice.RLock()
 	defer agent.lockLogicalDevice.RUnlock()
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		return lDevice, nil
 	}
@@ -187,7 +191,7 @@
 	log.Debug("ListLogicalDeviceFlows")
 	agent.lockLogicalDevice.RLock()
 	defer agent.lockLogicalDevice.RUnlock()
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		cFlows := (proto.Clone(lDevice.Flows)).(*ofp.Flows)
 		return cFlows, nil
@@ -199,7 +203,7 @@
 	log.Debug("ListLogicalDeviceFlowGroups")
 	agent.lockLogicalDevice.RLock()
 	defer agent.lockLogicalDevice.RUnlock()
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		cFlowGroups := (proto.Clone(lDevice.FlowGroups)).(*ofp.FlowGroups)
 		return cFlowGroups, nil
@@ -211,7 +215,7 @@
 	log.Debug("ListLogicalDevicePorts")
 	agent.lockLogicalDevice.RLock()
 	defer agent.lockLogicalDevice.RUnlock()
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		lPorts := make([]*voltha.LogicalPort, 0)
 		for _, port := range lDevice.Ports {
@@ -227,7 +231,7 @@
 	log.Debug("listFlows")
 	agent.lockLogicalDevice.RLock()
 	defer agent.lockLogicalDevice.RUnlock()
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		return lDevice.Flows.Items
 	}
@@ -239,7 +243,7 @@
 	log.Debug("listFlowGroups")
 	agent.lockLogicalDevice.RLock()
 	defer agent.lockLogicalDevice.RUnlock()
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		return lDevice.FlowGroups.Items
 	}
@@ -248,7 +252,8 @@
 
 //updateLogicalDeviceWithoutLock updates the model with the logical device.  It clones the logicaldevice before saving it
 func (agent *LogicalDeviceAgent) updateLogicalDeviceFlowsWithoutLock(flows *ofp.Flows) error {
-	afterUpdate := agent.flowProxy.Update("/", flows, false, "")
+	updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+	afterUpdate := agent.flowProxy.Update(updateCtx, "/", flows, false, "")
 	if afterUpdate == nil {
 		return status.Errorf(codes.Internal, "failed-updating-logical-device-flows:%s", agent.logicalDeviceId)
 	}
@@ -257,7 +262,8 @@
 
 //updateLogicalDeviceWithoutLock updates the model with the logical device.  It clones the logicaldevice before saving it
 func (agent *LogicalDeviceAgent) updateLogicalDeviceFlowGroupsWithoutLock(flowGroups *ofp.FlowGroups) error {
-	afterUpdate := agent.groupProxy.Update("/", flowGroups, false, "")
+	updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+	afterUpdate := agent.groupProxy.Update(updateCtx, "/", flowGroups, false, "")
 	if afterUpdate == nil {
 		return status.Errorf(codes.Internal, "failed-updating-logical-device-flow-groups:%s", agent.logicalDeviceId)
 	}
@@ -268,7 +274,7 @@
 // functions that have already acquired the logical device lock to the model
 func (agent *LogicalDeviceAgent) getLogicalDeviceWithoutLock() (*voltha.LogicalDevice, error) {
 	log.Debug("getLogicalDeviceWithoutLock")
-	logicalDevice := agent.clusterDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 0, false, "")
+	logicalDevice := agent.clusterDataProxy.Get(context.Background(), "/logical_devices/"+agent.logicalDeviceId, 0, false, "")
 	if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 		//log.Debug("getLogicalDeviceWithoutLock", log.Fields{"ldevice": lDevice})
 		return lDevice, nil
@@ -466,7 +472,8 @@
 
 //updateLogicalDeviceWithoutLock updates the model with the logical device.  It clones the logicaldevice before saving it
 func (agent *LogicalDeviceAgent) updateLogicalDeviceWithoutLock(logicalDevice *voltha.LogicalDevice) error {
-	afterUpdate := agent.clusterDataProxy.Update("/logical_devices/"+agent.logicalDeviceId, logicalDevice, false, "")
+	updateCtx := context.WithValue(context.Background(), model.RequestTimestamp, time.Now().UnixNano())
+	afterUpdate := agent.clusterDataProxy.Update(updateCtx, "/logical_devices/"+agent.logicalDeviceId, logicalDevice, false, "")
 	if afterUpdate == nil {
 		return status.Errorf(codes.Internal, "failed-updating-logical-device:%s", agent.logicalDeviceId)
 	}
@@ -1187,6 +1194,7 @@
 	// Set the proxy and callback for that port
 	agent.portProxiesLock.Lock()
 	agent.portProxies[port.Id] = agent.clusterDataProxy.CreateProxy(
+		context.Background(),
 		fmt.Sprintf("/logical_devices/%s/ports/%s", agent.logicalDeviceId, port.Id),
 		false)
 	agent.portProxies[port.Id].RegisterCallback(model.POST_UPDATE, agent.portUpdated)
diff --git a/rw_core/core/logical_device_manager.go b/rw_core/core/logical_device_manager.go
index 96e3541..b871cd4 100644
--- a/rw_core/core/logical_device_manager.go
+++ b/rw_core/core/logical_device_manager.go
@@ -30,27 +30,24 @@
 )
 
 type LogicalDeviceManager struct {
-	logicalDeviceAgents        map[string]*LogicalDeviceAgent
-	core                       *Core
-	deviceMgr                  *DeviceManager
-	grpcNbiHdlr                *APIHandler
-	adapterProxy               *AdapterProxy
-	kafkaICProxy               *kafka.InterContainerProxy
-	clusterDataProxy           *model.Proxy
-	exitChannel                chan int
-	lockLogicalDeviceAgentsMap sync.RWMutex
-	defaultTimeout             int64
+	logicalDeviceAgents sync.Map
+	core                *Core
+	deviceMgr           *DeviceManager
+	grpcNbiHdlr         *APIHandler
+	adapterProxy        *AdapterProxy
+	kafkaICProxy        *kafka.InterContainerProxy
+	clusterDataProxy    *model.Proxy
+	exitChannel         chan int
+	defaultTimeout      int64
 }
 
 func newLogicalDeviceManager(core *Core, deviceMgr *DeviceManager, kafkaICProxy *kafka.InterContainerProxy, cdProxy *model.Proxy, timeout int64) *LogicalDeviceManager {
 	var logicalDeviceMgr LogicalDeviceManager
 	logicalDeviceMgr.core = core
 	logicalDeviceMgr.exitChannel = make(chan int, 1)
-	logicalDeviceMgr.logicalDeviceAgents = make(map[string]*LogicalDeviceAgent)
 	logicalDeviceMgr.deviceMgr = deviceMgr
 	logicalDeviceMgr.kafkaICProxy = kafkaICProxy
 	logicalDeviceMgr.clusterDataProxy = cdProxy
-	logicalDeviceMgr.lockLogicalDeviceAgentsMap = sync.RWMutex{}
 	logicalDeviceMgr.defaultTimeout = timeout
 	return &logicalDeviceMgr
 }
@@ -83,35 +80,26 @@
 }
 
 func (ldMgr *LogicalDeviceManager) addLogicalDeviceAgentToMap(agent *LogicalDeviceAgent) {
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	if _, exist := ldMgr.logicalDeviceAgents[agent.logicalDeviceId]; !exist {
-		ldMgr.logicalDeviceAgents[agent.logicalDeviceId] = agent
+	if _, exist := ldMgr.logicalDeviceAgents.Load(agent.logicalDeviceId); !exist {
+		ldMgr.logicalDeviceAgents.Store(agent.logicalDeviceId, agent)
 	}
 }
 
 func (ldMgr *LogicalDeviceManager) isLogicalDeviceInCache(logicalDeviceId string) bool {
-	ldMgr.lockLogicalDeviceAgentsMap.RLock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.RUnlock()
-	_, inCache := ldMgr.logicalDeviceAgents[logicalDeviceId]
+	_, inCache := ldMgr.logicalDeviceAgents.Load(logicalDeviceId)
 	return inCache
 }
 
 // getLogicalDeviceAgent returns the logical device agent.  If the device is not in memory then the device will
 // be loaded from dB and a logical device agent created to managed it.
 func (ldMgr *LogicalDeviceManager) getLogicalDeviceAgent(logicalDeviceId string) *LogicalDeviceAgent {
-	ldMgr.lockLogicalDeviceAgentsMap.RLock()
-	if agent, ok := ldMgr.logicalDeviceAgents[logicalDeviceId]; ok {
-		ldMgr.lockLogicalDeviceAgentsMap.RUnlock()
-		return agent
+	if agent, ok := ldMgr.logicalDeviceAgents.Load(logicalDeviceId); ok {
+		return agent.(*LogicalDeviceAgent)
 	} else {
 		//	Try to load into memory - loading will also create the logical device agent
-		ldMgr.lockLogicalDeviceAgentsMap.RUnlock()
 		if err := ldMgr.load(logicalDeviceId); err == nil {
-			ldMgr.lockLogicalDeviceAgentsMap.RLock()
-			defer ldMgr.lockLogicalDeviceAgentsMap.RUnlock()
-			if agent, ok = ldMgr.logicalDeviceAgents[logicalDeviceId]; ok {
-				return agent
+			if agent, ok = ldMgr.logicalDeviceAgents.Load(logicalDeviceId); ok {
+				return agent.(*LogicalDeviceAgent)
 			}
 		}
 	}
@@ -119,9 +107,7 @@
 }
 
 func (ldMgr *LogicalDeviceManager) deleteLogicalDeviceAgent(logicalDeviceId string) {
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	delete(ldMgr.logicalDeviceAgents, logicalDeviceId)
+	ldMgr.logicalDeviceAgents.Delete(logicalDeviceId)
 }
 
 // GetLogicalDevice provides a cloned most up to date logical device.  If device is not in memory
@@ -137,20 +123,21 @@
 func (ldMgr *LogicalDeviceManager) listManagedLogicalDevices() (*voltha.LogicalDevices, error) {
 	log.Debug("listManagedLogicalDevices")
 	result := &voltha.LogicalDevices{}
-	ldMgr.lockLogicalDeviceAgentsMap.RLock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.RUnlock()
-	for _, agent := range ldMgr.logicalDeviceAgents {
+	ldMgr.logicalDeviceAgents.Range(func(key, value interface{}) bool {
+		agent := value.(*LogicalDeviceAgent)
 		if ld, _ := agent.GetLogicalDevice(); ld != nil {
 			result.Items = append(result.Items, ld)
 		}
-	}
+		return true
+	})
+
 	return result, nil
 }
 
 func (ldMgr *LogicalDeviceManager) listLogicalDevices() (*voltha.LogicalDevices, error) {
 	log.Debug("ListAllLogicalDevices")
 	result := &voltha.LogicalDevices{}
-	if logicalDevices := ldMgr.clusterDataProxy.List("/logical_devices", 0, false, ""); logicalDevices != nil {
+	if logicalDevices := ldMgr.clusterDataProxy.List(context.Background(), "/logical_devices", 0, false, ""); logicalDevices != nil {
 		for _, logicalDevice := range logicalDevices.([]interface{}) {
 			if agent := ldMgr.getLogicalDeviceAgent(logicalDevice.(*voltha.LogicalDevice).Id); agent == nil {
 				agent = newLogicalDeviceAgent(
@@ -203,23 +190,23 @@
 func (ldMgr *LogicalDeviceManager) stopManagingLogicalDeviceWithDeviceId(id string) string {
 	log.Infow("stop-managing-logical-device", log.Fields{"deviceId": id})
 	// Go over the list of logical device agents to find the one which has rootDeviceId as id
-	ldMgr.lockLogicalDeviceAgentsMap.RLock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.RUnlock()
-	for ldId, ldAgent := range ldMgr.logicalDeviceAgents {
+	var ldId = ""
+	ldMgr.logicalDeviceAgents.Range(func(key, value interface{}) bool {
+		ldAgent := value.(*LogicalDeviceAgent)
 		if ldAgent.rootDeviceId == id {
-			log.Infow("stopping-logical-device-agent", log.Fields{"lDeviceId": ldId})
+			log.Infow("stopping-logical-device-agent", log.Fields{"lDeviceId": key})
 			ldAgent.stop(nil)
-			delete(ldMgr.logicalDeviceAgents, ldId)
-			return ldId
+			ldMgr.logicalDeviceAgents.Delete(ldId)
+			ldId = key.(string)
 		}
-	}
-	return ""
+		return true
+	})
+	return ldId
 }
 
 //getLogicalDeviceFromModel retrieves the logical device data from the model.
 func (ldMgr *LogicalDeviceManager) getLogicalDeviceFromModel(lDeviceId string) (*voltha.LogicalDevice, error) {
-
-	if logicalDevice := ldMgr.clusterDataProxy.Get("/logical_devices/"+lDeviceId, 0, false, ""); logicalDevice != nil {
+	if logicalDevice := ldMgr.clusterDataProxy.Get(context.Background(), "/logical_devices/"+lDeviceId, 0, false, ""); logicalDevice != nil {
 		if lDevice, ok := logicalDevice.(*voltha.LogicalDevice); ok {
 			return lDevice, nil
 		}
@@ -232,9 +219,7 @@
 	log.Debugw("loading-logical-device", log.Fields{"lDeviceId": lDeviceId})
 	// To prevent a race condition, let's hold the logical device agent map lock.  This will prevent a loading and
 	// a create logical device callback from occurring at the same time.
-	ldMgr.lockLogicalDeviceAgentsMap.Lock()
-	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
-	if ldAgent, _ := ldMgr.logicalDeviceAgents[lDeviceId]; ldAgent == nil {
+	if ldAgent, _ := ldMgr.logicalDeviceAgents.Load(lDeviceId); ldAgent == nil {
 		// Proceed with the loading only if the logical device exist in the Model (could have been deleted)
 		if _, err := ldMgr.getLogicalDeviceFromModel(lDeviceId); err == nil {
 			// Create a temp logical device Agent and let it load from memory
@@ -243,7 +228,7 @@
 				agent.stop(nil)
 				return err
 			}
-			ldMgr.logicalDeviceAgents[agent.logicalDeviceId] = agent
+			ldMgr.logicalDeviceAgents.Store(agent.logicalDeviceId, agent)
 		}
 	}
 	// TODO: load the child device
@@ -430,7 +415,7 @@
 }
 
 func (ldMgr *LogicalDeviceManager) setupUNILogicalPorts(ctx context.Context, childDevice *voltha.Device) error {
-	log.Debugw("setupUNILogicalPorts", log.Fields{"childDeviceId": childDevice.Id, "parentDeviceId": childDevice.ParentId})
+	log.Debugw("setupUNILogicalPorts", log.Fields{"childDeviceId": childDevice.Id, "parentDeviceId": childDevice.ParentId, "current-data": childDevice})
 	// Sanity check
 	if childDevice.Root {
 		return errors.New("Device-root")
@@ -473,7 +458,7 @@
 }
 
 func (ldMgr *LogicalDeviceManager) updatePortsState(device *voltha.Device, state voltha.AdminState_AdminState) error {
-	log.Debugw("updatePortsState", log.Fields{"deviceId": device.Id, "state": state})
+	log.Debugw("updatePortsState", log.Fields{"deviceId": device.Id, "state": state, "current-data": device})
 
 	var ldId *string
 	var err error