VOL-1775 VOL-1779 VOL-1780 : Fix several issues with overall stability
- Apply changes as reported by golang race utility
- Added version attribute in KV object
- Added context object to db/model api
- Carrying timestamp info through context to help in the
decision making when applying a revision change
- Replaced proxy access control mechanism with etcd reservation mechanism
Change-Id: If3d142a73b1da0d64fa6a819530f297dbfada2d3
diff --git a/db/kvstore/client.go b/db/kvstore/client.go
index 34ab711..937eefe 100644
--- a/db/kvstore/client.go
+++ b/db/kvstore/client.go
@@ -38,6 +38,7 @@
type KVPair struct {
Key string
Value interface{}
+ Version int64
Session string
Lease int64
}
@@ -47,12 +48,13 @@
}
// NewKVPair creates a new KVPair object
-func NewKVPair(key string, value interface{}, session string, lease int64) *KVPair {
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
kv := new(KVPair)
kv.Key = key
kv.Value = value
kv.Session = session
kv.Lease = lease
+ kv.Version = version
return kv
}
@@ -61,14 +63,16 @@
EventType int
Key interface{}
Value interface{}
+ Version int64
}
// NewEvent creates a new Event object
-func NewEvent(eventType int, key interface{}, value interface{}) *Event {
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
evnt := new(Event)
evnt.EventType = eventType
evnt.Key = key
evnt.Value = value
+ evnt.Version = version
return evnt
}
diff --git a/db/kvstore/consulclient.go b/db/kvstore/consulclient.go
index 2d02342..4b25b5f 100644
--- a/db/kvstore/consulclient.go
+++ b/db/kvstore/consulclient.go
@@ -79,7 +79,7 @@
}
m := make(map[string]*KVPair)
for _, kvp := range kvps {
- m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0)
+ m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1)
}
return m, nil
}
@@ -100,7 +100,7 @@
return nil, err
}
if kvp != nil {
- return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0), nil
+ return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1), nil
}
return nil, nil
@@ -455,7 +455,7 @@
default:
if err != nil {
log.Warnw("error-from-watch", log.Fields{"error": err})
- ch <- NewEvent(CONNECTIONDOWN, key, []byte(""))
+ ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
} else {
log.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
}
@@ -469,12 +469,12 @@
} else {
log.Debugw("update-received", log.Fields{"pair": pair})
if pair == nil {
- ch <- NewEvent(DELETE, key, []byte(""))
+ ch <- NewEvent(DELETE, key, []byte(""), -1)
} else if !c.isKVEqual(pair, previousKVPair) {
// Push the change onto the channel if the data has changed
// For now just assume it's a PUT change
log.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
- ch <- NewEvent(PUT, pair.Key, pair.Value)
+ ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
}
previousKVPair = pair
lastIndex = meta.LastIndex
diff --git a/db/kvstore/etcdclient.go b/db/kvstore/etcdclient.go
index 6935296..4f6f90b 100644
--- a/db/kvstore/etcdclient.go
+++ b/db/kvstore/etcdclient.go
@@ -74,7 +74,7 @@
}
m := make(map[string]*KVPair)
for _, ev := range resp.Kvs {
- m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease)
+ m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
}
return m, nil
}
@@ -94,7 +94,7 @@
}
for _, ev := range resp.Kvs {
// Only one value is returned
- return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease), nil
+ return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
}
return nil, nil
}
@@ -399,7 +399,7 @@
for resp := range channel {
for _, ev := range resp.Events {
//log.Debugf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
- ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value)
+ ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
}
}
log.Debug("stop-listening-on-channel ...")
diff --git a/db/model/branch.go b/db/model/branch.go
index 5502e63..3389291 100644
--- a/db/model/branch.go
+++ b/db/model/branch.go
@@ -26,7 +26,7 @@
// Branch structure is used to classify a collection of transaction based revisions
type Branch struct {
- sync.RWMutex
+ mutex sync.RWMutex
Node *node
Txid string
Origin Revision
@@ -85,8 +85,8 @@
// SetLatest assigns the latest revision for this branch
func (b *Branch) SetLatest(latest Revision) {
- b.Lock()
- defer b.Unlock()
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
if b.Latest != nil {
log.Debugw("updating-latest-revision", log.Fields{"current": b.Latest.GetHash(), "new": latest.GetHash()})
@@ -119,16 +119,16 @@
// GetLatest retrieves the latest revision of the branch
func (b *Branch) GetLatest() Revision {
- b.Lock()
- defer b.Unlock()
+ b.mutex.RLock()
+ defer b.mutex.RUnlock()
return b.Latest
}
// GetOrigin retrieves the original revision of the branch
func (b *Branch) GetOrigin() Revision {
- b.Lock()
- defer b.Unlock()
+ b.mutex.RLock()
+ defer b.mutex.RUnlock()
return b.Origin
}
@@ -142,8 +142,8 @@
// GetRevision pulls a revision entry at the specified hash
func (b *Branch) GetRevision(hash string) Revision {
- b.Lock()
- defer b.Unlock()
+ b.mutex.RLock()
+ defer b.mutex.RUnlock()
if revision, ok := b.Revisions[hash]; ok {
return revision
@@ -154,16 +154,16 @@
// SetRevision updates a revision entry at the specified hash
func (b *Branch) SetRevision(hash string, revision Revision) {
- b.Lock()
- defer b.Unlock()
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
b.Revisions[hash] = revision
}
// DeleteRevision removes a revision with the specified hash
func (b *Branch) DeleteRevision(hash string) {
- b.Lock()
- defer b.Unlock()
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
if _, ok := b.Revisions[hash]; ok {
delete(b.Revisions, hash)
diff --git a/db/model/child_type.go b/db/model/child_type.go
index da6f688..250de9c 100644
--- a/db/model/child_type.go
+++ b/db/model/child_type.go
@@ -27,18 +27,50 @@
"sync"
)
-type singletonChildTypeCache struct {
+type childTypesSingleton struct {
+ mutex sync.RWMutex
Cache map[interface{}]map[string]*ChildType
}
-var instanceChildTypeCache *singletonChildTypeCache
-var onceChildTypeCache sync.Once
+var instanceChildTypes *childTypesSingleton
+var onceChildTypes sync.Once
-func getChildTypeCache() *singletonChildTypeCache {
- onceChildTypeCache.Do(func() {
- instanceChildTypeCache = &singletonChildTypeCache{}
+func getChildTypes() *childTypesSingleton {
+ onceChildTypes.Do(func() {
+ instanceChildTypes = &childTypesSingleton{}
})
- return instanceChildTypeCache
+ return instanceChildTypes
+}
+
+func (s *childTypesSingleton) GetCache() map[interface{}]map[string]*ChildType {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ return s.Cache
+}
+
+func (s *childTypesSingleton) SetCache(cache map[interface{}]map[string]*ChildType) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.Cache = cache
+}
+
+func (s *childTypesSingleton) GetCacheEntry(key interface{}) (map[string]*ChildType, bool) {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ childTypeMap, exists := s.Cache[key]
+ return childTypeMap, exists
+}
+
+func (s *childTypesSingleton) SetCacheEntry(key interface{}, value map[string]*ChildType) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.Cache[key] = value
+}
+
+func (s *childTypesSingleton) ResetCache() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.Cache = make(map[interface{}]map[string]*ChildType)
}
// ChildType structure contains construct details of an object
@@ -58,12 +90,12 @@
var names map[string]*ChildType
var namesExist bool
- if getChildTypeCache().Cache == nil {
- getChildTypeCache().Cache = make(map[interface{}]map[string]*ChildType)
+ if getChildTypes().Cache == nil {
+ getChildTypes().Cache = make(map[interface{}]map[string]*ChildType)
}
msgType := reflect.TypeOf(cls)
- inst := getChildTypeCache()
+ inst := getChildTypes()
if names, namesExist = inst.Cache[msgType.String()]; !namesExist {
names = make(map[string]*ChildType)
@@ -127,9 +159,10 @@
}
}
- getChildTypeCache().Cache[msgType.String()] = names
+ getChildTypes().Cache[msgType.String()] = names
} else {
- log.Debugf("Cache entry for %s: %+v", msgType.String(), inst.Cache[msgType.String()])
+ entry, _ := inst.GetCacheEntry(msgType.String())
+ log.Debugf("Cache entry for %s: %+v", msgType.String(), entry)
}
return names
diff --git a/db/model/child_type_test.go b/db/model/child_type_test.go
index 349baa6..4725975 100644
--- a/db/model/child_type_test.go
+++ b/db/model/child_type_test.go
@@ -39,10 +39,10 @@
// Verify that the cache contains an entry for types on which ChildrenFields was performed
func TestChildType_02_Cache_Keys(t *testing.T) {
- if _, exists := getChildTypeCache().Cache[reflect.TypeOf(&voltha.Device{}).String()]; !exists {
+ if _, exists := getChildTypes().Cache[reflect.TypeOf(&voltha.Device{}).String()]; !exists {
t.Errorf("getChildTypeCache().Cache should have an entry of type: %+v\n", reflect.TypeOf(&voltha.Device{}).String())
}
- for k := range getChildTypeCache().Cache {
+ for k := range getChildTypes().Cache {
t.Logf("getChildTypeCache().Cache Key:%+v\n", k)
}
}
diff --git a/db/model/model.go b/db/model/model.go
index 18ff905..3446303 100644
--- a/db/model/model.go
+++ b/db/model/model.go
@@ -23,3 +23,15 @@
log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
defer log.CleanUp()
}
+
+const (
+ // period to determine when data requires a refresh (in milliseconds)
+ // TODO: make this configurable?
+ DataRefreshPeriod int64 = 5000
+
+ // Attribute used to store a timestamp in the context object
+ RequestTimestamp = "request-timestamp"
+
+ // Time limit for a KV path reservation (in seconds)
+ ReservationTTL int64 = 180
+)
diff --git a/db/model/node.go b/db/model/node.go
index 207df09..fcd3b5f 100644
--- a/db/model/node.go
+++ b/db/model/node.go
@@ -20,6 +20,7 @@
// TODO: proper logging
import (
+ "context"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/opencord/voltha-go/common/log"
@@ -32,10 +33,6 @@
// When a branch has no transaction id, everything gets stored in NONE
const (
NONE string = "none"
-
- // period to determine when data requires a refresh (in seconds)
- // TODO: make this configurable?
- DATA_REFRESH_PERIOD int64 = 5000
)
// Node interface is an abstraction of the node data structure
@@ -43,10 +40,14 @@
MakeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple)
// CRUD functions
- Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision
- Get(path string, hash string, depth int, deep bool, txid string) interface{}
- Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision
- Remove(path string, txid string, makeBranch MakeBranchFunction) Revision
+ Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision
+ Get(ctx context.Context, path string, hash string, depth int, deep bool, txid string) interface{}
+ List(ctx context.Context, path string, hash string, depth int, deep bool, txid string) interface{}
+ Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision
+ Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision
+ CreateProxy(ctx context.Context, path string, exclusive bool) *Proxy
+
+ GetProxy() *Proxy
MakeBranch(txid string) *Branch
DeleteBranch(txid string)
@@ -55,16 +56,12 @@
MakeTxBranch() string
DeleteTxBranch(txid string)
FoldTxBranch(txid string)
-
- CreateProxy(path string, exclusive bool) *Proxy
- GetProxy() *Proxy
}
type node struct {
- mutex sync.RWMutex
- Root *root
- Type interface{}
-
+ mutex sync.RWMutex
+ Root *root
+ Type interface{}
Branches map[string]*Branch
Tags map[string]Revision
Proxy *Proxy
@@ -133,7 +130,7 @@
log.Debugw("saving-latest-data", log.Fields{"hash": revision.GetHash(), "data": revision.GetData()})
// Tag a timestamp to that revision
revision.SetLastUpdate()
- GetRevCache().Cache.Store(revision.GetName(), revision)
+ GetRevCache().Set(revision.GetName(), revision)
}
branch.SetLatest(revision)
}
@@ -148,13 +145,13 @@
for _, change := range changeAnnouncement {
log.Debugw("adding-callback",
log.Fields{
- "callbacks": n.Proxy.getCallbacks(change.Type),
+ "callbacks": n.GetProxy().getCallbacks(change.Type),
"type": change.Type,
"previousData": change.PreviousData,
"latestData": change.LatestData,
})
n.Root.AddCallback(
- n.Proxy.InvokeCallbacks,
+ n.GetProxy().InvokeCallbacks,
change.Type,
true,
change.PreviousData,
@@ -253,7 +250,7 @@
}
// Get retrieves the data from a node tree that resides at the specified path
-func (n *node) List(path string, hash string, depth int, deep bool, txid string) interface{} {
+func (n *node) List(ctx context.Context, path string, hash string, depth int, deep bool, txid string) interface{} {
n.mutex.Lock()
defer n.mutex.Unlock()
@@ -281,7 +278,7 @@
var result interface{}
var prList []interface{}
- if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil {
+ if pr := rev.LoadFromPersistence(ctx, path, txid, nil); pr != nil {
for _, revEntry := range pr {
prList = append(prList, revEntry.GetData())
}
@@ -292,7 +289,7 @@
}
// Get retrieves the data from a node tree that resides at the specified path
-func (n *node) Get(path string, hash string, depth int, reconcile bool, txid string) interface{} {
+func (n *node) Get(ctx context.Context, path string, hash string, depth int, reconcile bool, txid string) interface{} {
n.mutex.Lock()
defer n.mutex.Unlock()
@@ -323,9 +320,9 @@
// 1. Start with the cache which stores revisions by watch names
// 2. Then look in the revision tree, especially if it's a sub-path such as /devices/1234/flows
// 3. Move on to the KV store if that path cannot be found or if the entry has expired
- if entry, exists := GetRevCache().Cache.Load(path); exists && entry.(Revision) != nil {
+ if entry, exists := GetRevCache().Get(path); exists && entry.(Revision) != nil {
entryAge := time.Now().Sub(entry.(Revision).GetLastUpdate()).Nanoseconds() / int64(time.Millisecond)
- if entryAge < DATA_REFRESH_PERIOD {
+ if entryAge < DataRefreshPeriod {
log.Debugw("using-cache-entry", log.Fields{
"path": path,
"hash": hash,
@@ -335,7 +332,7 @@
} else {
log.Debugw("cache-entry-expired", log.Fields{"path": path, "hash": hash, "age": entryAge})
}
- } else if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+ } else if result = n.getPath(ctx, rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
log.Debugw("using-rev-tree-entry", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
return result
} else {
@@ -357,7 +354,7 @@
// If we got to this point, we are either trying to reconcile with the db
// or we simply failed at getting information from memory
if n.Root.KvStore != nil {
- if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil && len(pr) > 0 {
+ if pr := rev.LoadFromPersistence(ctx, path, txid, nil); pr != nil && len(pr) > 0 {
// Did we receive a single or multiple revisions?
if len(pr) > 1 {
var revs []interface{}
@@ -375,7 +372,7 @@
}
//getPath traverses the specified path and retrieves the data associated to it
-func (n *node) getPath(rev Revision, path string, depth int) interface{} {
+func (n *node) getPath(ctx context.Context, rev Revision, path string, depth int) interface{} {
if path == "" {
return n.getData(rev, depth)
}
@@ -406,7 +403,7 @@
return nil
} else {
childNode := childRev.GetNode()
- return childNode.getPath(childRev, path, depth)
+ return childNode.getPath(ctx, childRev, path, depth)
}
} else {
var response []interface{}
@@ -430,11 +427,13 @@
}
return response
}
+ } else if children := rev.GetChildren(name); children != nil && len(children) > 0 {
+ childRev := children[0]
+ childNode := childRev.GetNode()
+ return childNode.getPath(ctx, childRev, path, depth)
}
- childRev := rev.GetChildren(name)[0]
- childNode := childRev.GetNode()
- return childNode.getPath(childRev, path, depth)
+ return nil
}
// getData retrieves the data from a node revision
@@ -454,7 +453,7 @@
}
// Update changes the content of a node at the specified path with the provided data
-func (n *node) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+func (n *node) Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
n.mutex.Lock()
defer n.mutex.Unlock()
@@ -475,7 +474,7 @@
log.Debugf("Branch data : %+v, Passed data: %+v", branch.GetLatest().GetData(), data)
}
if path == "" {
- return n.doUpdate(branch, data, strict)
+ return n.doUpdate(ctx, branch, data, strict)
}
rev := branch.GetLatest()
@@ -493,7 +492,7 @@
var children []Revision
if field == nil {
- return n.doUpdate(branch, data, strict)
+ return n.doUpdate(ctx, branch, data, strict)
}
if field.IsContainer {
@@ -523,11 +522,11 @@
// Save proxy in child node to ensure callbacks are called later on
// only assign in cases of non sub-folder proxies, i.e. "/"
- if childNode.Proxy == nil && n.Proxy != nil && n.Proxy.getFullPath() == "" {
+ if childNode.Proxy == nil && n.Proxy != nil && n.GetProxy().getFullPath() == "" {
childNode.Proxy = n.Proxy
}
- newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+ newChildRev := childNode.Update(ctx, path, data, strict, txid, makeBranch)
if newChildRev.GetHash() == childRev.GetHash() {
if newChildRev != childRev {
@@ -559,7 +558,7 @@
children = append(children, newChildRev)
}
- updatedRev := rev.UpdateChildren(name, children, branch)
+ updatedRev := rev.UpdateChildren(ctx, name, children, branch)
n.makeLatest(branch, updatedRev, nil)
updatedRev.ChildDrop(name, childRev.GetHash())
@@ -572,12 +571,12 @@
} else {
childRev := rev.GetChildren(name)[0]
childNode := childRev.GetNode()
- newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+ newChildRev := childNode.Update(ctx, path, data, strict, txid, makeBranch)
branch.LatestLock.Lock()
defer branch.LatestLock.Unlock()
- updatedRev := rev.UpdateChildren(name, []Revision{newChildRev}, branch)
+ updatedRev := rev.UpdateChildren(ctx, name, []Revision{newChildRev}, branch)
n.makeLatest(branch, updatedRev, nil)
updatedRev.ChildDrop(name, childRev.GetHash())
@@ -588,7 +587,7 @@
return nil
}
-func (n *node) doUpdate(branch *Branch, data interface{}, strict bool) Revision {
+func (n *node) doUpdate(ctx context.Context, branch *Branch, data interface{}, strict bool) Revision {
log.Debugw("comparing-types", log.Fields{"expected": reflect.ValueOf(n.Type).Type(), "actual": reflect.TypeOf(data)})
if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
@@ -613,7 +612,7 @@
log.Debugf("checking access violations")
}
- rev := branch.GetLatest().UpdateData(data, branch)
+ rev := branch.GetLatest().UpdateData(ctx, data, branch)
changes := []ChangeTuple{{POST_UPDATE, branch.GetLatest().GetData(), rev.GetData()}}
n.makeLatest(branch, rev, changes)
@@ -623,7 +622,7 @@
}
// Add inserts a new node at the specified path with the provided data
-func (n *node) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+func (n *node) Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
n.mutex.Lock()
defer n.mutex.Unlock()
@@ -688,7 +687,7 @@
children = append(children, childRev)
- updatedRev := rev.UpdateChildren(name, children, branch)
+ updatedRev := rev.UpdateChildren(ctx, name, children, branch)
changes := []ChangeTuple{{POST_ADD, nil, childRev.GetData()}}
childRev.SetupWatch(childRev.GetName())
@@ -718,7 +717,7 @@
}
childNode := childRev.GetNode()
- newChildRev := childNode.Add(path, data, txid, makeBranch)
+ newChildRev := childNode.Add(ctx, path, data, txid, makeBranch)
// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
newChildRev.SetName(name + "/" + keyValue.(string))
@@ -732,7 +731,7 @@
children = append(children, newChildRev)
}
- updatedRev := rev.UpdateChildren(name, children, branch)
+ updatedRev := rev.UpdateChildren(ctx, name, children, branch)
n.makeLatest(branch, updatedRev, nil)
updatedRev.ChildDrop(name, childRev.GetHash())
@@ -749,7 +748,7 @@
}
// Remove eliminates a node at the specified path
-func (n *node) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+func (n *node) Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision {
n.mutex.Lock()
defer n.mutex.Unlock()
@@ -805,7 +804,7 @@
if childNode.Proxy == nil {
childNode.Proxy = n.Proxy
}
- newChildRev := childNode.Remove(path, txid, makeBranch)
+ newChildRev := childNode.Remove(ctx, path, txid, makeBranch)
branch.LatestLock.Lock()
defer branch.LatestLock.Unlock()
@@ -833,7 +832,7 @@
}
childRev.StorageDrop(txid, true)
- GetRevCache().Cache.Delete(childRev.GetName())
+ GetRevCache().Delete(childRev.GetName())
branch.LatestLock.Lock()
defer branch.LatestLock.Unlock()
@@ -950,11 +949,11 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ node Proxy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// CreateProxy returns a reference to a sub-tree of the data model
-func (n *node) CreateProxy(path string, exclusive bool) *Proxy {
- return n.createProxy(path, path, n, exclusive)
+func (n *node) CreateProxy(ctx context.Context, path string, exclusive bool) *Proxy {
+ return n.createProxy(ctx, path, path, n, exclusive)
}
-func (n *node) createProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
+func (n *node) createProxy(ctx context.Context, path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
log.Debugw("node-create-proxy", log.Fields{
"node-type": reflect.ValueOf(n.Type).Type(),
"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
@@ -973,7 +972,6 @@
partition := strings.SplitN(path, "/", 2)
name := partition[0]
var nodeType interface{}
- // Node type is chosen depending on if we have reached the end of path or not
if len(partition) < 2 {
path = ""
nodeType = n.Type
@@ -1020,8 +1018,6 @@
children = make([]Revision, len(rev.GetChildren(name)))
copy(children, rev.GetChildren(name))
- // Try to find a matching revision in memory
- // If not found try the db
var childRev Revision
if _, childRev = n.findRevByKey(children, field.Key, keyValue); childRev != nil {
log.Debugw("found-revision-matching-key-in-memory", log.Fields{
@@ -1030,7 +1026,7 @@
"fullPath": fullPath,
"name": name,
})
- } else if revs := n.GetBranch(NONE).GetLatest().LoadFromPersistence(fullPath, "", nil); revs != nil && len(revs) > 0 {
+ } else if revs := n.GetBranch(NONE).GetLatest().LoadFromPersistence(ctx, fullPath, "", nil); revs != nil && len(revs) > 0 {
log.Debugw("found-revision-matching-key-in-db", log.Fields{
"node-type": reflect.ValueOf(n.Type).Type(),
"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
@@ -1048,7 +1044,7 @@
}
if childRev != nil {
childNode := childRev.GetNode()
- return childNode.createProxy(path, fullPath, n, exclusive)
+ return childNode.createProxy(ctx, path, fullPath, n, exclusive)
}
} else {
log.Errorw("cannot-access-index-of-empty-container", log.Fields{
@@ -1067,7 +1063,7 @@
})
childRev := rev.GetChildren(name)[0]
childNode := childRev.GetNode()
- return childNode.createProxy(path, fullPath, n, exclusive)
+ return childNode.createProxy(ctx, path, fullPath, n, exclusive)
}
} else {
log.Debugw("field-object-is-nil", log.Fields{
@@ -1116,12 +1112,12 @@
n.Proxy = NewProxy(r, n, parentNode, path, fullPath, exclusive)
} else {
log.Debugw("node-has-existing-proxy", log.Fields{
- "node-type": reflect.ValueOf(n.Proxy.Node.Type).Type(),
- "parent-node-type": reflect.ValueOf(n.Proxy.ParentNode.Type).Type(),
- "path": n.Proxy.Path,
- "fullPath": n.Proxy.FullPath,
+ "node-type": reflect.ValueOf(n.GetProxy().Node.Type).Type(),
+ "parent-node-type": reflect.ValueOf(n.GetProxy().ParentNode.Type).Type(),
+ "path": n.GetProxy().Path,
+ "fullPath": n.GetProxy().FullPath,
})
- if n.Proxy.Exclusive {
+ if n.GetProxy().Exclusive {
log.Error("node is already owned exclusively")
}
}
@@ -1160,3 +1156,6 @@
func (n *node) GetRoot() *root {
return n.Root
}
+func (n *node) SetRoot(root *root) {
+ n.Root = root
+}
diff --git a/db/model/non_persisted_revision.go b/db/model/non_persisted_revision.go
index 297a740..6900c5d 100644
--- a/db/model/non_persisted_revision.go
+++ b/db/model/non_persisted_revision.go
@@ -17,6 +17,7 @@
import (
"bytes"
+ "context"
"crypto/md5"
"fmt"
"github.com/golang/protobuf/proto"
@@ -35,6 +36,16 @@
Cache sync.Map
}
+func (s *revCacheSingleton) Get(path string) (interface{}, bool) {
+ return s.Cache.Load(path)
+}
+func (s *revCacheSingleton) Set(path string, value interface{}) {
+ s.Cache.Store(path, value)
+}
+func (s *revCacheSingleton) Delete(path string) {
+ s.Cache.Delete(path)
+}
+
var revCacheInstance *revCacheSingleton
var revCacheOnce sync.Once
@@ -269,10 +280,17 @@
}
// UpdateData will refresh the data content of the revision
-func (npr *NonPersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+func (npr *NonPersistedRevision) UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision {
npr.mutex.Lock()
defer npr.mutex.Unlock()
+ if ctx != nil {
+ if ctxTS, ok := ctx.Value(RequestTimestamp).(int64); ok && npr.lastUpdate.UnixNano() > ctxTS {
+ log.Warnw("data-is-older-than-current", log.Fields{"ctx-ts": ctxTS, "rev-ts": npr.lastUpdate.UnixNano()})
+ return npr
+ }
+ }
+
// Do not update the revision if data is the same
if npr.Config.Data != nil && npr.Config.hashData(npr.Root, data) == npr.Config.Hash {
log.Debugw("stored-data-matches-latest", log.Fields{"stored": npr.Config.Data, "provided": data})
@@ -300,7 +318,7 @@
// UpdateChildren will refresh the list of children with the provided ones
// It will carefully go through the list and ensure that no child is lost
-func (npr *NonPersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+func (npr *NonPersistedRevision) UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision {
npr.mutex.Lock()
defer npr.mutex.Unlock()
@@ -358,7 +376,7 @@
})
// replace entry
- newChild.GetNode().Root = existingChildren[nameIndex].GetNode().Root
+ newChild.GetNode().SetRoot(existingChildren[nameIndex].GetNode().GetRoot())
updatedChildren = append(updatedChildren, newChild)
} else {
log.Debugw("keeping-existing-child", log.Fields{
@@ -461,7 +479,7 @@
return npr.lastUpdate
}
-func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
+func (npr *NonPersistedRevision) LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
// stub... required by interface
return nil
}
@@ -473,3 +491,7 @@
func (npr *NonPersistedRevision) StorageDrop(txid string, includeConfig bool) {
// stub ... required by interface
}
+
+func (npr *NonPersistedRevision) getVersion() int64 {
+ return -1
+}
diff --git a/db/model/persisted_revision.go b/db/model/persisted_revision.go
index 2ab91b7..d2d228f 100644
--- a/db/model/persisted_revision.go
+++ b/db/model/persisted_revision.go
@@ -19,7 +19,9 @@
import (
"bytes"
"compress/gzip"
+ "context"
"github.com/golang/protobuf/proto"
+ "github.com/google/uuid"
"github.com/opencord/voltha-go/common/log"
"github.com/opencord/voltha-go/db/kvstore"
"reflect"
@@ -32,11 +34,13 @@
Revision
Compress bool
- events chan *kvstore.Event
- kvStore *Backend
- mutex sync.RWMutex
- isStored bool
- isWatched bool
+ events chan *kvstore.Event
+ kvStore *Backend
+ mutex sync.RWMutex
+ versionMutex sync.RWMutex
+ Version int64
+ isStored bool
+ isWatched bool
}
type watchCache struct {
@@ -57,10 +61,23 @@
func NewPersistedRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
pr := &PersistedRevision{}
pr.kvStore = branch.Node.GetRoot().KvStore
+ pr.Version = 1
pr.Revision = NewNonPersistedRevision(nil, branch, data, children)
return pr
}
+func (pr *PersistedRevision) getVersion() int64 {
+ pr.versionMutex.RLock()
+ defer pr.versionMutex.RUnlock()
+ return pr.Version
+}
+
+func (pr *PersistedRevision) setVersion(version int64) {
+ pr.versionMutex.Lock()
+ defer pr.versionMutex.Unlock()
+ pr.Version = version
+}
+
// Finalize is responsible of saving the revision in the persistent storage
func (pr *PersistedRevision) Finalize(skipOnExist bool) {
pr.store(skipOnExist)
@@ -73,8 +90,12 @@
log.Debugw("ready-to-store-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
- if blob, err := proto.Marshal(pr.GetConfig().Data.(proto.Message)); err != nil {
- // TODO report error
+ // clone the revision data to avoid any race conditions with processes
+ // accessing the same data
+ cloned := proto.Clone(pr.GetConfig().Data.(proto.Message))
+
+ if blob, err := proto.Marshal(cloned); err != nil {
+ log.Errorw("problem-to-marshal", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
} else {
if pr.Compress {
var b bytes.Buffer
@@ -84,10 +105,11 @@
blob = b.Bytes()
}
+ GetRevCache().Set(pr.GetName(), pr)
if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
log.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
} else {
- log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+ log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data, "version": pr.getVersion()})
pr.isStored = true
}
}
@@ -145,6 +167,20 @@
case kvstore.PUT:
log.Debugw("update-in-memory", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
+ if latestRev.getVersion() >= event.Version {
+ log.Debugw("skipping-matching-or-older-revision", log.Fields{
+ "watch": latestRev.GetName(),
+ "watch-version": event.Version,
+ "latest-version": latestRev.getVersion(),
+ })
+ continue
+ } else {
+ log.Debugw("watch-revision-is-newer", log.Fields{
+ "watch": latestRev.GetName(),
+ "watch-version": event.Version,
+ "latest-version": latestRev.getVersion(),
+ })
+ }
data := reflect.New(reflect.TypeOf(latestRev.GetData()).Elem())
@@ -154,7 +190,6 @@
log.Debugw("un-marshaled-watch-data", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "data": data.Interface()})
var pathLock string
- var pac *proxyAccessControl
var blobs map[string]*kvstore.KVPair
// The watch reported new persistence data.
@@ -166,6 +201,7 @@
Value: event.Value,
Session: "",
Lease: 0,
+ Version: event.Version,
}
if latestRev.GetNode().GetProxy() != nil {
@@ -173,82 +209,35 @@
// If a proxy exists for this revision, use it to lock access to the path
// and prevent simultaneous updates to the object in memory
//
- pathLock, _ = latestRev.GetNode().GetProxy().parseForControlledPath(latestRev.GetNode().GetProxy().getFullPath())
//If the proxy already has a request in progress, then there is no need to process the watch
- log.Debugw("checking-if-path-is-locked", log.Fields{"key": latestRev.GetHash(), "pathLock": pathLock})
- if PAC().IsReserved(pathLock) {
+ if latestRev.GetNode().GetProxy().GetOperation() != PROXY_NONE {
log.Debugw("operation-in-progress", log.Fields{
"key": latestRev.GetHash(),
"path": latestRev.GetNode().GetProxy().getFullPath(),
- "operation": latestRev.GetNode().GetProxy().Operation.String(),
+ "operation": latestRev.GetNode().GetProxy().operation.String(),
})
-
- //continue
-
- // Identify the operation type and determine if the watch event should be applied or not.
- switch latestRev.GetNode().GetProxy().Operation {
- case PROXY_REMOVE:
- fallthrough
-
- case PROXY_ADD:
- fallthrough
-
- case PROXY_UPDATE:
- // We will need to reload once the operation completes.
- // Therefore, the data of the current event is most likely out-dated
- // and should be ignored
- log.Debugw("ignore-watch-event", log.Fields{
- "key": latestRev.GetHash(),
- "path": latestRev.GetNode().GetProxy().getFullPath(),
- "operation": latestRev.GetNode().GetProxy().Operation.String(),
- })
-
- continue
-
- case PROXY_CREATE:
- fallthrough
-
- case PROXY_LIST:
- fallthrough
-
- case PROXY_GET:
- fallthrough
-
- case PROXY_WATCH:
- fallthrough
-
- default:
- log.Debugw("process-watch-event", log.Fields{
- "key": latestRev.GetHash(),
- "path": latestRev.GetNode().GetProxy().getFullPath(),
- "operation": latestRev.GetNode().GetProxy().Operation.String(),
- })
- }
+ continue
}
+ pathLock, _ = latestRev.GetNode().GetProxy().parseForControlledPath(latestRev.GetNode().GetProxy().getFullPath())
+
// Reserve the path to prevent others to modify while we reload from persistence
- log.Debugw("reserve-and-lock-path", log.Fields{"key": latestRev.GetHash(), "path": pathLock})
- pac = PAC().ReservePath(latestRev.GetNode().GetProxy().getFullPath(),
- latestRev.GetNode().GetProxy(), pathLock)
- pac.lock()
- latestRev.GetNode().GetProxy().Operation = PROXY_WATCH
- pac.SetProxy(latestRev.GetNode().GetProxy())
+ latestRev.GetNode().GetProxy().GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+ latestRev.GetNode().GetProxy().SetOperation(PROXY_WATCH)
// Load changes and apply to memory
- latestRev.LoadFromPersistence(latestRev.GetName(), "", blobs)
+ latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs)
- log.Debugw("release-and-unlock-path", log.Fields{"key": latestRev.GetHash(), "path": pathLock})
- pac.getProxy().Operation = PROXY_GET
- pac.unlock()
- PAC().ReleasePath(pathLock)
+ // Release path
+ latestRev.GetNode().GetProxy().GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
} else {
// This block should be reached only if coming from a non-proxied request
log.Debugw("revision-with-no-proxy", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
// Load changes and apply to memory
- latestRev.LoadFromPersistence(latestRev.GetName(), "", blobs)
+ latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs)
}
}
@@ -264,16 +253,17 @@
}
// UpdateData modifies the information in the data model and saves it in the persistent storage
-func (pr *PersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+func (pr *PersistedRevision) UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision {
log.Debugw("updating-persisted-data", log.Fields{"hash": pr.GetHash()})
- newNPR := pr.Revision.UpdateData(data, branch)
+ newNPR := pr.Revision.UpdateData(ctx, data, branch)
newPR := &PersistedRevision{
Revision: newNPR,
Compress: pr.Compress,
kvStore: pr.kvStore,
events: pr.events,
+ Version: pr.getVersion(),
isWatched: pr.isWatched,
}
@@ -289,17 +279,17 @@
}
// UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
-func (pr *PersistedRevision) UpdateChildren(name string, children []Revision,
- branch *Branch) Revision {
+func (pr *PersistedRevision) UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision {
log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
- newNPR := pr.Revision.UpdateChildren(name, children, branch)
+ newNPR := pr.Revision.UpdateChildren(ctx, name, children, branch)
newPR := &PersistedRevision{
Revision: newNPR,
Compress: pr.Compress,
kvStore: pr.kvStore,
events: pr.events,
+ Version: pr.getVersion(),
isWatched: pr.isWatched,
}
@@ -324,6 +314,7 @@
Compress: pr.Compress,
kvStore: pr.kvStore,
events: pr.events,
+ Version: pr.getVersion(),
isWatched: pr.isWatched,
}
@@ -346,8 +337,7 @@
// Drop takes care of eliminating a revision hash that is no longer needed
// and its associated config when required
func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
- log.Debugw("dropping-revision",
- log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash})
+ log.Debugw("dropping-revision", log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash})
pr.mutex.Lock()
defer pr.mutex.Unlock()
@@ -376,9 +366,10 @@
}
// verifyPersistedEntry validates if the provided data is available or not in memory and applies updates as required
-func (pr *PersistedRevision) verifyPersistedEntry(data interface{}, typeName string, keyName string, keyValue string, txid string) (response Revision) {
+func (pr *PersistedRevision) verifyPersistedEntry(ctx context.Context, data interface{}, typeName string, keyName string,
+ keyValue string, txid string, version int64) (response Revision) {
// Parent which holds the current node entry
- parent := pr.GetBranch().Node.Root
+ parent := pr.GetBranch().Node.GetRoot()
// Get a copy of the parent's children
children := make([]Revision, len(parent.GetBranch(NONE).Latest.GetChildren(typeName)))
@@ -389,11 +380,12 @@
// A child matching the provided key exists in memory
// Verify if the data differs from what was retrieved from persistence
// Also check if we are treating a newer revision of the data or not
- if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() {
+ if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() && childRev.getVersion() < version {
log.Debugw("revision-data-is-different", log.Fields{
- "key": childRev.GetHash(),
- "name": childRev.GetName(),
- "data": childRev.GetData(),
+ "key": childRev.GetHash(),
+ "name": childRev.GetName(),
+ "data": childRev.GetData(),
+ "version": childRev.getVersion(),
})
//
@@ -404,14 +396,15 @@
childRev.GetBranch().LatestLock.Lock()
// Update child
- updatedChildRev := childRev.UpdateData(data, childRev.GetBranch())
+ updatedChildRev := childRev.UpdateData(ctx, data, childRev.GetBranch())
updatedChildRev.GetNode().SetProxy(childRev.GetNode().GetProxy())
updatedChildRev.SetupWatch(updatedChildRev.GetName())
updatedChildRev.SetLastUpdate()
+ updatedChildRev.(*PersistedRevision).setVersion(version)
// Update cache
- GetRevCache().Cache.Store(updatedChildRev.GetName(), updatedChildRev)
+ GetRevCache().Set(updatedChildRev.GetName(), updatedChildRev)
childRev.Drop(txid, false)
childRev.GetBranch().LatestLock.Unlock()
@@ -423,7 +416,7 @@
// BEGIN lock parent -- Update parent
parent.GetBranch(NONE).LatestLock.Lock()
- updatedRev := parent.GetBranch(NONE).Latest.UpdateChildren(typeName, children, parent.GetBranch(NONE))
+ updatedRev := parent.GetBranch(NONE).GetLatest().UpdateChildren(ctx, typeName, children, parent.GetBranch(NONE))
parent.GetBranch(NONE).Node.makeLatest(parent.GetBranch(NONE), updatedRev, nil)
parent.GetBranch(NONE).LatestLock.Unlock()
@@ -441,14 +434,8 @@
response = updatedChildRev
}
} else {
- // Data is the same. Continue to the next entry
- log.Debugw("same-revision-data", log.Fields{
- "key": childRev.GetHash(),
- "name": childRev.GetName(),
- "data": childRev.GetData(),
- })
if childRev != nil {
- log.Debugw("keeping-same-revision-data", log.Fields{
+ log.Debugw("keeping-revision-data", log.Fields{
"key": childRev.GetHash(),
"name": childRev.GetName(),
"data": childRev.GetData(),
@@ -456,7 +443,10 @@
// Update timestamp to reflect when it was last read and to reset tracked timeout
childRev.SetLastUpdate()
- GetRevCache().Cache.Store(childRev.GetName(), childRev)
+ if childRev.getVersion() < version {
+ childRev.(*PersistedRevision).setVersion(version)
+ }
+ GetRevCache().Set(childRev.GetName(), childRev)
response = childRev
}
}
@@ -479,6 +469,10 @@
// We need to start watching this entry for future changes
childRev.SetName(typeName + "/" + keyValue)
childRev.SetupWatch(childRev.GetName())
+ childRev.(*PersistedRevision).setVersion(version)
+
+ // Add entry to cache
+ GetRevCache().Set(childRev.GetName(), childRev)
pr.GetBranch().LatestLock.Unlock()
// END child lock
@@ -490,7 +484,7 @@
// BEGIN parent lock
parent.GetBranch(NONE).LatestLock.Lock()
children = append(children, childRev)
- updatedRev := parent.GetBranch(NONE).Latest.UpdateChildren(typeName, children, parent.GetBranch(NONE))
+ updatedRev := parent.GetBranch(NONE).GetLatest().UpdateChildren(ctx, typeName, children, parent.GetBranch(NONE))
updatedRev.GetNode().SetProxy(parent.GetBranch(NONE).Node.GetProxy())
parent.GetBranch(NONE).Node.makeLatest(parent.GetBranch(NONE), updatedRev, nil)
parent.GetBranch(NONE).LatestLock.Unlock()
@@ -512,7 +506,7 @@
// LoadFromPersistence retrieves data from kv store at the specified location and refreshes the memory
// by adding missing entries, updating changed entries and ignoring unchanged ones
-func (pr *PersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
+func (pr *PersistedRevision) LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
pr.mutex.Lock()
defer pr.mutex.Unlock()
@@ -539,7 +533,7 @@
nodeType = pr.GetBranch().Node.Type
} else {
path = partition[1]
- nodeType = pr.GetBranch().Node.Root.Type
+ nodeType = pr.GetBranch().Node.GetRoot().Type
}
field := ChildrenFields(nodeType)[name]
@@ -574,7 +568,7 @@
// based on the field's key attribute
_, key := GetAttributeValue(data.Interface(), field.Key, 0)
- if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, key.String(), txid); entry != nil {
+ if entry := pr.verifyPersistedEntry(ctx, data.Interface(), name, field.Key, key.String(), txid, blob.Version); entry != nil {
response = append(response, entry)
}
} else {
@@ -601,7 +595,7 @@
}
keyValue := field.KeyFromStr(key)
- if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, keyValue.(string), txid); entry != nil {
+ if entry := pr.verifyPersistedEntry(ctx, data.Interface(), name, field.Key, keyValue.(string), txid, blob.Version); entry != nil {
response = append(response, entry)
}
}
diff --git a/db/model/proxy.go b/db/model/proxy.go
index 182dcdd..5c4d772 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -17,9 +17,11 @@
package model
import (
+ "context"
"crypto/md5"
"errors"
"fmt"
+ "github.com/google/uuid"
"github.com/opencord/voltha-go/common/log"
"reflect"
"runtime"
@@ -54,7 +56,7 @@
// Proxy holds the information for a specific location with the data model
type Proxy struct {
- sync.RWMutex
+ mutex sync.RWMutex
Root *root
Node *node
ParentNode *node
@@ -62,7 +64,7 @@
FullPath string
Exclusive bool
Callbacks map[CallbackType]map[string]*CallbackTuple
- Operation ProxyOperation
+ operation ProxyOperation
}
// NewProxy instantiates a new proxy to a specific location
@@ -100,6 +102,9 @@
// getCallbacks returns the full list of callbacks associated to the proxy
func (p *Proxy) getCallbacks(callbackType CallbackType) map[string]*CallbackTuple {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+
if p != nil {
if cb, exists := p.Callbacks[callbackType]; exists {
return cb
@@ -112,8 +117,8 @@
// getCallback returns a specific callback matching the type and function hash
func (p *Proxy) getCallback(callbackType CallbackType, funcHash string) *CallbackTuple {
- p.Lock()
- defer p.Unlock()
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
if tuple, exists := p.Callbacks[callbackType][funcHash]; exists {
return tuple
}
@@ -122,22 +127,22 @@
// setCallbacks applies a callbacks list to a type
func (p *Proxy) setCallbacks(callbackType CallbackType, callbacks map[string]*CallbackTuple) {
- p.Lock()
- defer p.Unlock()
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
p.Callbacks[callbackType] = callbacks
}
// setCallback applies a callback to a type and hash value
func (p *Proxy) setCallback(callbackType CallbackType, funcHash string, tuple *CallbackTuple) {
- p.Lock()
- defer p.Unlock()
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
p.Callbacks[callbackType][funcHash] = tuple
}
// DeleteCallback removes a callback matching the type and hash
func (p *Proxy) DeleteCallback(callbackType CallbackType, funcHash string) {
- p.Lock()
- defer p.Unlock()
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
delete(p.Callbacks[callbackType], funcHash)
}
@@ -146,7 +151,8 @@
// Enumerated list of callback types
const (
- PROXY_GET ProxyOperation = iota
+ PROXY_NONE ProxyOperation = iota
+ PROXY_GET
PROXY_LIST
PROXY_ADD
PROXY_UPDATE
@@ -156,6 +162,7 @@
)
var proxyOperationTypes = []string{
+ "PROXY_NONE",
"PROXY_GET",
"PROXY_LIST",
"PROXY_ADD",
@@ -169,6 +176,18 @@
return proxyOperationTypes[t]
}
+func (p *Proxy) GetOperation() ProxyOperation {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ return p.operation
+}
+
+func (p *Proxy) SetOperation(operation ProxyOperation) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ p.operation = operation
+}
+
// parseForControlledPath verifies if a proxy path matches a pattern
// for locations that need to be access controlled.
func (p *Proxy) parseForControlledPath(path string) (pathLock string, controlled bool) {
@@ -195,7 +214,7 @@
// List will retrieve information from the data model at the specified path location
// A list operation will force access to persistence storage
-func (p *Proxy) List(path string, depth int, deep bool, txid string) interface{} {
+func (p *Proxy) List(ctx context.Context, path string, depth int, deep bool, txid string) interface{} {
var effectivePath string
if path == "/" {
effectivePath = p.getFullPath()
@@ -205,28 +224,24 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_LIST)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-list", log.Fields{
"path": path,
"effective": effectivePath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(effectivePath, p, pathLock)
- defer PAC().ReleasePath(pathLock)
- p.Operation = PROXY_LIST
- pac.SetProxy(p)
- defer func(op ProxyOperation) {
- pac.getProxy().Operation = op
- }(PROXY_GET)
-
- rv := pac.List(path, depth, deep, txid, controlled)
+ rv := p.GetRoot().List(ctx, path, "", depth, deep, txid)
return rv
}
// Get will retrieve information from the data model at the specified path location
-func (p *Proxy) Get(path string, depth int, deep bool, txid string) interface{} {
+func (p *Proxy) Get(ctx context.Context, path string, depth int, deep bool, txid string) interface{} {
var effectivePath string
if path == "/" {
effectivePath = p.getFullPath()
@@ -236,25 +251,24 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_GET)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-get", log.Fields{
"path": path,
"effective": effectivePath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(effectivePath, p, pathLock)
- defer PAC().ReleasePath(pathLock)
- p.Operation = PROXY_GET
- pac.SetProxy(p)
-
- rv := pac.Get(path, depth, deep, txid, controlled)
+ rv := p.GetRoot().Get(ctx, path, "", depth, deep, txid)
return rv
}
// Update will modify information in the data model at the specified location with the provided data
-func (p *Proxy) Update(path string, data interface{}, strict bool, txid string) interface{} {
+func (p *Proxy) Update(ctx context.Context, path string, data interface{}, strict bool, txid string) interface{} {
if !strings.HasPrefix(path, "/") {
log.Errorf("invalid path: %s", path)
return nil
@@ -271,31 +285,36 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_UPDATE)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-update", log.Fields{
"path": path,
"effective": effectivePath,
"full": fullPath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(effectivePath, p, pathLock)
- defer PAC().ReleasePath(pathLock)
+ if p.GetRoot().KvStore != nil {
+ p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+ defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ }
- p.Operation = PROXY_UPDATE
- pac.SetProxy(p)
- defer func(op ProxyOperation) {
- pac.getProxy().Operation = op
- }(PROXY_GET)
- log.Debugw("proxy-operation--update", log.Fields{"operation": p.Operation})
+ result := p.GetRoot().Update(ctx, fullPath, data, strict, txid, nil)
- return pac.Update(fullPath, data, strict, txid, controlled)
+ if result != nil {
+ return result.GetData()
+ }
+
+ return nil
}
// AddWithID will insert new data at specified location.
// This method also allows the user to specify the ID of the data entry to ensure
// that access control is active while inserting the information.
-func (p *Proxy) AddWithID(path string, id string, data interface{}, txid string) interface{} {
+func (p *Proxy) AddWithID(ctx context.Context, path string, id string, data interface{}, txid string) interface{} {
if !strings.HasPrefix(path, "/") {
log.Errorf("invalid path: %s", path)
return nil
@@ -312,31 +331,34 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_ADD)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-add-with-id", log.Fields{
"path": path,
"effective": effectivePath,
"full": fullPath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(path, p, pathLock)
- defer PAC().ReleasePath(pathLock)
+ if p.GetRoot().KvStore != nil {
+ p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+ defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ }
- p.Operation = PROXY_ADD
- defer func(op ProxyOperation) {
- pac.getProxy().Operation = op
- }(PROXY_GET)
+ result := p.GetRoot().Add(ctx, fullPath, data, txid, nil)
- pac.SetProxy(p)
+ if result != nil {
+ return result.GetData()
+ }
- log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
-
- return pac.Add(fullPath, data, txid, controlled)
+ return nil
}
// Add will insert new data at specified location.
-func (p *Proxy) Add(path string, data interface{}, txid string) interface{} {
+func (p *Proxy) Add(ctx context.Context, path string, data interface{}, txid string) interface{} {
if !strings.HasPrefix(path, "/") {
log.Errorf("invalid path: %s", path)
return nil
@@ -353,30 +375,34 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_ADD)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-add", log.Fields{
"path": path,
"effective": effectivePath,
"full": fullPath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(path, p, pathLock)
- defer PAC().ReleasePath(pathLock)
+ if p.GetRoot().KvStore != nil {
+ p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+ defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ }
- p.Operation = PROXY_ADD
- pac.SetProxy(p)
- defer func(op ProxyOperation) {
- pac.getProxy().Operation = op
- }(PROXY_GET)
+ result := p.GetRoot().Add(ctx, fullPath, data, txid, nil)
- log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
+ if result != nil {
+ return result.GetData()
+ }
- return pac.Add(fullPath, data, txid, controlled)
+ return nil
}
// Remove will delete an entry at the specified location
-func (p *Proxy) Remove(path string, txid string) interface{} {
+func (p *Proxy) Remove(ctx context.Context, path string, txid string) interface{} {
if !strings.HasPrefix(path, "/") {
log.Errorf("invalid path: %s", path)
return nil
@@ -393,30 +419,34 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_REMOVE)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-remove", log.Fields{
"path": path,
"effective": effectivePath,
"full": fullPath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(effectivePath, p, pathLock)
- defer PAC().ReleasePath(pathLock)
+ if p.GetRoot().KvStore != nil {
+ p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+ defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ }
- p.Operation = PROXY_REMOVE
- pac.SetProxy(p)
- defer func(op ProxyOperation) {
- pac.getProxy().Operation = op
- }(PROXY_GET)
+ result := p.GetRoot().Remove(ctx, fullPath, txid, nil)
- log.Debugw("proxy-operation--remove", log.Fields{"operation": p.Operation})
+ if result != nil {
+ return result.GetData()
+ }
- return pac.Remove(fullPath, txid, controlled)
+ return nil
}
// CreateProxy to interact with specific path directly
-func (p *Proxy) CreateProxy(path string, exclusive bool) *Proxy {
+func (p *Proxy) CreateProxy(ctx context.Context, path string, exclusive bool) *Proxy {
if !strings.HasPrefix(path, "/") {
log.Errorf("invalid path: %s", path)
return nil
@@ -434,26 +464,24 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
+ p.SetOperation(PROXY_CREATE)
+ defer p.SetOperation(PROXY_NONE)
+
log.Debugw("proxy-create", log.Fields{
"path": path,
"effective": effectivePath,
"full": fullPath,
"pathLock": pathLock,
"controlled": controlled,
+ "operation": p.GetOperation(),
})
- pac := PAC().ReservePath(path, p, pathLock)
- defer PAC().ReleasePath(pathLock)
+ if p.GetRoot().KvStore != nil {
+ p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
+ defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ }
- p.Operation = PROXY_CREATE
- pac.SetProxy(p)
- defer func(op ProxyOperation) {
- pac.getProxy().Operation = op
- }(PROXY_GET)
-
- log.Debugw("proxy-operation--create-proxy", log.Fields{"operation": p.Operation})
-
- return pac.CreateProxy(fullPath, exclusive, controlled)
+ return p.GetRoot().CreateProxy(ctx, fullPath, exclusive)
}
// OpenTransaction creates a new transaction branch to isolate operations made to the data model
@@ -553,7 +581,7 @@
var err error
if callbacks := p.getCallbacks(callbackType); callbacks != nil {
- p.Lock()
+ p.mutex.Lock()
for _, callback := range callbacks {
if result, err = p.invoke(callback, context); err != nil {
if !proceedOnError {
@@ -563,7 +591,7 @@
log.Info("An error occurred. Invoking next callback")
}
}
- p.Unlock()
+ p.mutex.Unlock()
}
return result
diff --git a/db/model/proxy_access_control.go b/db/model/proxy_access_control.go
deleted file mode 100644
index a1ea6be..0000000
--- a/db/model/proxy_access_control.go
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package model
-
-import (
- "github.com/opencord/voltha-go/common/log"
- "sync"
- "time"
-)
-
-type singletonProxyAccessControl struct {
- sync.RWMutex
- cache sync.Map
- reservedCount int
-}
-
-var instanceProxyAccessControl *singletonProxyAccessControl
-var onceProxyAccessControl sync.Once
-
-// PAC provides access to the proxy access control singleton instance
-func PAC() *singletonProxyAccessControl {
- onceProxyAccessControl.Do(func() {
- instanceProxyAccessControl = &singletonProxyAccessControl{}
- })
- return instanceProxyAccessControl
-}
-
-// IsReserved will verify if access control is active for a specific path within the model
-func (singleton *singletonProxyAccessControl) IsReserved(pathLock string) bool {
- singleton.Lock()
- defer singleton.Unlock()
-
- _, exists := singleton.cache.Load(pathLock)
- log.Debugw("is-reserved", log.Fields{"pathLock": pathLock, "exists": exists})
-
- return exists
-}
-
-// ReservePath will apply access control for a specific path within the model
-func (singleton *singletonProxyAccessControl) ReservePath(path string, proxy *Proxy, pathLock string) *proxyAccessControl {
- singleton.Lock()
- defer singleton.Unlock()
- singleton.reservedCount++
- if pac, exists := singleton.cache.Load(pathLock); !exists {
- log.Debugf("Creating new PAC entry for path:%s pathLock:%s", path, pathLock)
- newPac := NewProxyAccessControl(proxy, pathLock)
- singleton.cache.Store(pathLock, newPac)
- return newPac
- } else {
- log.Debugf("Re-using existing PAC entry for path:%s pathLock:%s", path, pathLock)
- return pac.(*proxyAccessControl)
- }
-}
-
-// ReleasePath will remove access control for a specific path within the model
-func (singleton *singletonProxyAccessControl) ReleasePath(pathLock string) {
- singleton.Lock()
- defer singleton.Unlock()
-
- singleton.reservedCount--
-
- if singleton.reservedCount == 0 {
- singleton.cache.Delete(pathLock)
- }
-}
-
-// ProxyAccessControl is the abstraction interface to the base proxyAccessControl structure
-type ProxyAccessControl interface {
- Get(path string, depth int, deep bool, txid string, control bool) interface{}
- Update(path string, data interface{}, strict bool, txid string, control bool) interface{}
- Add(path string, data interface{}, txid string, control bool) interface{}
- Remove(path string, txid string, control bool) interface{}
- SetProxy(proxy *Proxy)
-}
-
-// proxyAccessControl holds details of the path and proxy that requires access control
-type proxyAccessControl struct {
- sync.RWMutex
- Proxy *Proxy
- PathLock chan struct{}
- Path string
-
- start time.Time
- stop time.Time
-}
-
-// NewProxyAccessControl creates a new instance of an access control structure
-func NewProxyAccessControl(proxy *Proxy, path string) *proxyAccessControl {
- return &proxyAccessControl{
- Proxy: proxy,
- Path: path,
- PathLock: make(chan struct{}, 1),
- }
-}
-
-// lock will prevent access to a model path
-func (pac *proxyAccessControl) lock() {
- log.Debugw("locking", log.Fields{"path": pac.Path})
- pac.PathLock <- struct{}{}
- pac.setStart(time.Now())
-}
-
-// unlock will release control of a model path
-func (pac *proxyAccessControl) unlock() {
- <-pac.PathLock
- log.Debugw("unlocking", log.Fields{"path": pac.Path})
- pac.setStop(time.Now())
- GetProfiling().AddToInMemoryLockTime(pac.getStop().Sub(pac.getStart()).Seconds())
-}
-
-// getStart is used for profiling purposes and returns the time at which access control was applied
-func (pac *proxyAccessControl) getStart() time.Time {
- pac.Lock()
- defer pac.Unlock()
- return pac.start
-}
-
-// getStart is used for profiling purposes and returns the time at which access control was removed
-func (pac *proxyAccessControl) getStop() time.Time {
- pac.Lock()
- defer pac.Unlock()
- return pac.stop
-}
-
-// getPath returns the access controlled path
-func (pac *proxyAccessControl) getPath() string {
- pac.Lock()
- defer pac.Unlock()
- return pac.Path
-}
-
-// getProxy returns the proxy used to reach a specific location in the data model
-func (pac *proxyAccessControl) getProxy() *Proxy {
- pac.Lock()
- defer pac.Unlock()
- return pac.Proxy
-}
-
-// setStart is for profiling purposes and applies a start time value at which access control was started
-func (pac *proxyAccessControl) setStart(time time.Time) {
- pac.Lock()
- defer pac.Unlock()
- pac.start = time
-}
-
-// setStop is for profiling purposes and applies a stop time value at which access control was stopped
-func (pac *proxyAccessControl) setStop(time time.Time) {
- pac.Lock()
- defer pac.Unlock()
- pac.stop = time
-}
-
-// SetProxy is used to changed the proxy object of an access controlled path
-func (pac *proxyAccessControl) SetProxy(proxy *Proxy) {
- pac.Lock()
- defer pac.Unlock()
- pac.Proxy = proxy
-}
-
-// List retrieves data linked to a data model path
-func (pac *proxyAccessControl) List(path string, depth int, deep bool, txid string, control bool) interface{} {
- if control {
- pac.lock()
- log.Debugw("locked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- defer pac.unlock()
- defer log.Debugw("unlocked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- }
-
- // FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
- // The data traversal through reflection currently corrupts the content
-
- return pac.getProxy().GetRoot().List(path, "", depth, deep, txid)
-}
-
-// Get retrieves data linked to a data model path
-func (pac *proxyAccessControl) Get(path string, depth int, deep bool, txid string, control bool) interface{} {
- if control {
- pac.lock()
- log.Debugw("locked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- defer pac.unlock()
- defer log.Debugw("unlocked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- }
-
- // FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
- // The data traversal through reflection currently corrupts the content
- return pac.getProxy().GetRoot().Get(path, "", 0, deep, txid)
-}
-
-// Update changes the content of the data model at the specified location with the provided data
-func (pac *proxyAccessControl) Update(path string, data interface{}, strict bool, txid string, control bool) interface{} {
- if control {
- pac.lock()
- log.Debugw("locked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- defer pac.unlock()
- defer log.Debugw("unlocked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- }
-
- result := pac.getProxy().GetRoot().Update(path, data, strict, txid, nil)
-
- if result != nil {
- return result.GetData()
- }
- return nil
-}
-
-// Add creates a new data model entry at the specified location with the provided data
-func (pac *proxyAccessControl) Add(path string, data interface{}, txid string, control bool) interface{} {
- if control {
- pac.lock()
- log.Debugw("locked-access--add", log.Fields{"path": path, "fullPath": pac.Path})
- defer pac.unlock()
- defer log.Debugw("unlocked-access--add", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- }
-
- result := pac.getProxy().GetRoot().Add(path, data, txid, nil)
-
- if result != nil {
- return result.GetData()
- }
- return nil
-}
-
-// Remove discards information linked to the data model path
-func (pac *proxyAccessControl) Remove(path string, txid string, control bool) interface{} {
- if control {
- pac.lock()
- log.Debugw("locked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- defer pac.unlock()
- defer log.Debugw("unlocked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- }
-
- return pac.getProxy().GetRoot().Remove(path, txid, nil)
-}
-
-// CreateProxy allows interaction for a specific path
-func (pac *proxyAccessControl) CreateProxy(path string, exclusive bool, control bool) *Proxy {
- if control {
- pac.lock()
- log.Debugw("locked-access--create-proxy", log.Fields{"path": path, "fullPath": pac.Path})
- defer pac.unlock()
- defer log.Debugw("unlocked-access--create-proxy", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
- }
-
- result := pac.getProxy().ParentNode.CreateProxy(path, exclusive)
-
- if result != nil {
- return result
- }
- return nil
-}
diff --git a/db/model/proxy_load_test.go b/db/model/proxy_load_test.go
index f44a6ae..f4fd325 100644
--- a/db/model/proxy_load_test.go
+++ b/db/model/proxy_load_test.go
@@ -16,6 +16,7 @@
package model
import (
+ "context"
"encoding/hex"
"github.com/google/uuid"
"github.com/opencord/voltha-go/common/log"
@@ -42,10 +43,16 @@
After interface{}
}
type proxyLoadTest struct {
- sync.RWMutex
- addedDevices []string
- updatedFirmwares []proxyLoadChanges
- updatedFlows []proxyLoadChanges
+ mutex sync.RWMutex
+
+ addMutex sync.RWMutex
+ addedDevices []string
+
+ firmwareMutex sync.RWMutex
+ updatedFirmwares []proxyLoadChanges
+ flowMutex sync.RWMutex
+ updatedFlows []proxyLoadChanges
+
preAddExecuted bool
postAddExecuted bool
preUpdateExecuted bool
@@ -53,33 +60,43 @@
}
func (plt *proxyLoadTest) SetPreAddExecuted(status bool) {
- plt.Lock()
- defer plt.Unlock()
+ plt.mutex.Lock()
+ defer plt.mutex.Unlock()
plt.preAddExecuted = status
}
func (plt *proxyLoadTest) SetPostAddExecuted(status bool) {
- plt.Lock()
- defer plt.Unlock()
+ plt.mutex.Lock()
+ defer plt.mutex.Unlock()
plt.postAddExecuted = status
}
func (plt *proxyLoadTest) SetPreUpdateExecuted(status bool) {
- plt.Lock()
- defer plt.Unlock()
+ plt.mutex.Lock()
+ defer plt.mutex.Unlock()
plt.preUpdateExecuted = status
}
func (plt *proxyLoadTest) SetPostUpdateExecuted(status bool) {
- plt.Lock()
- defer plt.Unlock()
+ plt.mutex.Lock()
+ defer plt.mutex.Unlock()
plt.postUpdateExecuted = status
}
func init() {
BenchmarkProxy_Root = NewRoot(&voltha.Voltha{}, nil)
- BenchmarkProxy_Logger, _ = log.AddPackage(log.JSON, log.InfoLevel, nil)
+ BenchmarkProxy_Logger, _ = log.AddPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
+ //Setup default logger - applies for packages that do not have specific logger set
+ if _, err := log.SetDefaultLogger(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"}); err != nil {
+ log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+ }
- BenchmarkProxy_DeviceProxy = BenchmarkProxy_Root.node.CreateProxy("/", false)
+ // Update all loggers (provisioned via init) with a common field
+ if err := log.UpdateAllLoggers(log.Fields{"instanceId": "PLT"}); err != nil {
+ log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+ }
+ log.SetPackageLogLevel("github.com/opencord/voltha-go/db/model", log.DebugLevel)
+
+ BenchmarkProxy_DeviceProxy = BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
// Register ADD instructions callbacks
BenchmarkProxy_PLT = &proxyLoadTest{}
@@ -133,16 +150,16 @@
var added interface{}
// Add the device
- if added = BenchmarkProxy_DeviceProxy.AddWithID("/devices", ltDevID, ltDevice, ""); added == nil {
+ if added = BenchmarkProxy_DeviceProxy.AddWithID(context.Background(), "/devices", ltDevID, ltDevice, ""); added == nil {
BenchmarkProxy_Logger.Errorf("Failed to add device: %+v", ltDevice)
continue
} else {
BenchmarkProxy_Logger.Infof("Device was added 1: %+v", added)
}
- BenchmarkProxy_PLT.Lock()
+ BenchmarkProxy_PLT.addMutex.Lock()
BenchmarkProxy_PLT.addedDevices = append(BenchmarkProxy_PLT.addedDevices, added.(*voltha.Device).Id)
- BenchmarkProxy_PLT.Unlock()
+ BenchmarkProxy_PLT.addMutex.Unlock()
}
})
@@ -157,8 +174,8 @@
if len(BenchmarkProxy_PLT.addedDevices) > 0 {
var target interface{}
randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
- firmProxy := BenchmarkProxy_Root.node.CreateProxy("/", false)
- if target = firmProxy.Get("/devices/"+randomID, 0, false,
+ firmProxy := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
+ if target = firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
""); !reflect.ValueOf(target).IsValid() {
BenchmarkProxy_Logger.Errorf("Failed to find device: %s %+v", randomID, target)
continue
@@ -183,7 +200,7 @@
after := target.(*voltha.Device).FirmwareVersion
var updated interface{}
- if updated = firmProxy.Update("/devices/"+randomID, target.(*voltha.Device), false,
+ if updated = firmProxy.Update(context.Background(), "/devices/"+randomID, target.(*voltha.Device), false,
""); updated == nil {
BenchmarkProxy_Logger.Errorf("Failed to update device: %+v", target)
continue
@@ -192,7 +209,7 @@
}
- if d := firmProxy.Get("/devices/"+randomID, 0, false,
+ if d := firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
""); !reflect.ValueOf(d).IsValid() {
BenchmarkProxy_Logger.Errorf("Failed to get device: %s", randomID)
continue
@@ -204,12 +221,13 @@
BenchmarkProxy_Logger.Errorf("Imm Device has unknown value: %s %+v %+v", randomID, d, target)
}
- BenchmarkProxy_PLT.Lock()
+ BenchmarkProxy_PLT.firmwareMutex.Lock()
+
BenchmarkProxy_PLT.updatedFirmwares = append(
BenchmarkProxy_PLT.updatedFirmwares,
proxyLoadChanges{ID: randomID, Before: before, After: after},
)
- BenchmarkProxy_PLT.Unlock()
+ BenchmarkProxy_PLT.firmwareMutex.Unlock()
}
}
})
@@ -246,8 +264,8 @@
if len(BenchmarkProxy_PLT.addedDevices) > 0 {
randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
- flowsProxy := BenchmarkProxy_Root.node.CreateProxy("/devices/"+randomID+"/flows", false)
- flows := flowsProxy.Get("/", 0, false, "")
+ flowsProxy := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/devices/"+randomID+"/flows", false)
+ flows := flowsProxy.Get(context.Background(), "/", 0, false, "")
before := flows.(*openflow_13.Flows).Items[0].TableId
flows.(*openflow_13.Flows).Items[0].TableId = uint32(rand.Intn(3000))
@@ -263,17 +281,17 @@
)
var updated interface{}
- if updated = flowsProxy.Update("/", flows.(*openflow_13.Flows), false, ""); updated == nil {
+ if updated = flowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
b.Errorf("Failed to update flows for device: %+v", flows)
} else {
BenchmarkProxy_Logger.Infof("Flows were updated : %+v", updated)
}
- BenchmarkProxy_PLT.Lock()
+ BenchmarkProxy_PLT.flowMutex.Lock()
BenchmarkProxy_PLT.updatedFlows = append(
BenchmarkProxy_PLT.updatedFlows,
proxyLoadChanges{ID: randomID, Before: before, After: after},
)
- BenchmarkProxy_PLT.Unlock()
+ BenchmarkProxy_PLT.flowMutex.Unlock()
}
}
})
@@ -285,7 +303,7 @@
for i := 0; i < len(BenchmarkProxy_PLT.addedDevices); i++ {
devToGet := BenchmarkProxy_PLT.addedDevices[i]
// Verify that the added device can now be retrieved
- if d := BenchmarkProxy_DeviceProxy.Get("/devices/"+devToGet, 0, false,
+ if d := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false,
""); !reflect.ValueOf(d).IsValid() {
BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
continue
@@ -299,7 +317,7 @@
for i := 0; i < len(BenchmarkProxy_PLT.updatedFirmwares); i++ {
devToGet := BenchmarkProxy_PLT.updatedFirmwares[i].ID
// Verify that the updated device can be retrieved and that the updates were actually applied
- if d := BenchmarkProxy_DeviceProxy.Get("/devices/"+devToGet, 0, false,
+ if d := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false,
""); !reflect.ValueOf(d).IsValid() {
BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
continue
@@ -312,23 +330,3 @@
}
}
}
-
-func BenchmarkProxy_GetUpdatedFlows(b *testing.B) {
- var d interface{}
- for i := 0; i < len(BenchmarkProxy_PLT.updatedFlows); i++ {
- devToGet := BenchmarkProxy_PLT.updatedFlows[i].ID
- // Verify that the updated device can be retrieved and that the updates were actually applied
- flowsProxy := BenchmarkProxy_Root.node.CreateProxy("/devices/"+devToGet+"/flows", false)
- if d = flowsProxy.Get("/", 0, false,
- ""); !reflect.ValueOf(d).IsValid() {
- BenchmarkProxy_Logger.Errorf("Failed to get device flows: %s", devToGet)
- continue
- } else if d.(*openflow_13.Flows).Items[0].TableId == BenchmarkProxy_PLT.updatedFlows[i].After.(uint32) {
- BenchmarkProxy_Logger.Infof("Device was updated with new flow value: %s %+v", devToGet, d)
- } else if d.(*openflow_13.Flows).Items[0].TableId == BenchmarkProxy_PLT.updatedFlows[i].Before.(uint32) {
- BenchmarkProxy_Logger.Errorf("Device kept old flow value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFlows[i])
- } else {
- BenchmarkProxy_Logger.Errorf("Device has unknown flow value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFlows[i])
- }
- }
-}
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index f583b99..3f65997 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -16,6 +16,7 @@
package model
import (
+ "context"
"encoding/hex"
"encoding/json"
"github.com/golang/protobuf/proto"
@@ -53,9 +54,9 @@
//log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
TestProxy_Root = NewRoot(&voltha.Voltha{}, nil)
- TestProxy_Root_LogicalDevice = TestProxy_Root.CreateProxy("/", false)
- TestProxy_Root_Device = TestProxy_Root.CreateProxy("/", false)
- TestProxy_Root_Adapter = TestProxy_Root.CreateProxy("/", false)
+ TestProxy_Root_LogicalDevice = TestProxy_Root.CreateProxy(context.Background(), "/", false)
+ TestProxy_Root_Device = TestProxy_Root.CreateProxy(context.Background(), "/", false)
+ TestProxy_Root_Adapter = TestProxy_Root.CreateProxy(context.Background(), "/", false)
TestProxy_LogicalPorts = []*voltha.LogicalPort{
{
@@ -115,7 +116,7 @@
postAddExecuted := make(chan struct{})
preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
- devicesProxy := TestProxy_Root.node.CreateProxy("/devices", false)
+ devicesProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/devices", false)
devicesProxy.RegisterCallback(PRE_ADD, commonCallback2, "PRE_ADD Device container changes")
devicesProxy.RegisterCallback(POST_ADD, commonCallback2, "POST_ADD Device container changes")
@@ -123,7 +124,7 @@
TestProxy_Root_Device.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
TestProxy_Root_Device.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
- if added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, ""); added == nil {
+ if added := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, ""); added == nil {
t.Error("Failed to add device")
} else {
t.Logf("Added device : %+v", added)
@@ -137,7 +138,7 @@
}
// Verify that the added device can now be retrieved
- if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find added device")
} else {
djson, _ := json.Marshal(d)
@@ -148,7 +149,7 @@
func TestProxy_1_1_2_Add_ExistingDevice(t *testing.T) {
TestProxy_Device.Id = TestProxy_DeviceId
- added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, "")
+ added := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, "")
if added.(proto.Message).String() != reflect.ValueOf(TestProxy_Device).Interface().(proto.Message).String() {
t.Errorf("Devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
}
@@ -180,7 +181,7 @@
TestProxy_Root_Adapter.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions for adapters", &postAddExecutedPtr)
// Add the adapter
- if added := TestProxy_Root_Adapter.Add("/adapters", TestProxy_Adapter, ""); added == nil {
+ if added := TestProxy_Root_Adapter.Add(context.Background(), "/adapters", TestProxy_Adapter, ""); added == nil {
t.Error("Failed to add adapter")
} else {
t.Logf("Added adapter : %+v", added)
@@ -189,7 +190,7 @@
verifyGotResponse(postAddExecuted)
// Verify that the added device can now be retrieved
- if d := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find added adapter")
} else {
djson, _ := json.Marshal(d)
@@ -205,7 +206,7 @@
}
func TestProxy_1_2_1_Get_AllDevices(t *testing.T) {
- devices := TestProxy_Root_Device.Get("/devices", 1, false, "")
+ devices := TestProxy_Root_Device.Get(context.Background(), "/devices", 1, false, "")
if len(devices.([]interface{})) == 0 {
t.Error("there are no available devices to retrieve")
@@ -217,7 +218,7 @@
}
func TestProxy_1_2_2_Get_SingleDevice(t *testing.T) {
- if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
t.Errorf("Failed to find device : %s", TestProxy_TargetDeviceId)
} else {
djson, _ := json.Marshal(d)
@@ -232,7 +233,7 @@
postUpdateExecuted := make(chan struct{})
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
- if retrieved := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 1, false, ""); retrieved == nil {
+ if retrieved := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, ""); retrieved == nil {
t.Error("Failed to get device")
} else {
t.Logf("Found raw device (root proxy): %+v", retrieved)
@@ -257,7 +258,7 @@
"POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
)
- if afterUpdate := TestProxy_Root_Device.Update("/devices/"+TestProxy_TargetDeviceId, retrieved, false, ""); afterUpdate == nil {
+ if afterUpdate := TestProxy_Root_Device.Update(context.Background(), "/devices/"+TestProxy_TargetDeviceId, retrieved, false, ""); afterUpdate == nil {
t.Error("Failed to update device")
} else {
t.Logf("Updated device : %+v", afterUpdate)
@@ -270,7 +271,7 @@
t.Error("POST_UPDATE callback was not executed")
}
- if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated device (root proxy)")
} else {
djson, _ := json.Marshal(d)
@@ -281,8 +282,8 @@
func TestProxy_1_3_2_Update_DeviceFlows(t *testing.T) {
// Get a device proxy and update a specific port
- devFlowsProxy := TestProxy_Root.node.CreateProxy("/devices/"+TestProxy_DeviceId+"/flows", false)
- flows := devFlowsProxy.Get("/", 0, false, "")
+ devFlowsProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", false)
+ flows := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
flows.(*openflow_13.Flows).Items[0].TableId = 2244
preUpdateExecuted := make(chan struct{})
@@ -300,13 +301,13 @@
"POST_UPDATE instructions (flows proxy)", &postUpdateExecutedPtr,
)
- kvFlows := devFlowsProxy.Get("/", 0, false, "")
+ kvFlows := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
if reflect.DeepEqual(flows, kvFlows) {
t.Errorf("Local changes have changed the KV store contents - local:%+v, kv: %+v", flows, kvFlows)
}
- if updated := devFlowsProxy.Update("/", flows.(*openflow_13.Flows), false, ""); updated == nil {
+ if updated := devFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
t.Error("Failed to update flow")
} else {
t.Logf("Updated flows : %+v", updated)
@@ -319,14 +320,14 @@
t.Error("POST_UPDATE callback was not executed")
}
- if d := devFlowsProxy.Get("/", 0, false, ""); d == nil {
+ if d := devFlowsProxy.Get(context.Background(), "/", 0, false, ""); d == nil {
t.Error("Failed to find updated flows (flows proxy)")
} else {
djson, _ := json.Marshal(d)
t.Logf("Found flows (flows proxy): %s", string(djson))
}
- if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId+"/flows", 1, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated flows (root proxy)")
} else {
djson, _ := json.Marshal(d)
@@ -339,9 +340,9 @@
postUpdateExecuted := make(chan struct{})
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
- adaptersProxy := TestProxy_Root.node.CreateProxy("/adapters", false)
+ adaptersProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/adapters", false)
- if retrieved := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 1, false, ""); retrieved == nil {
+ if retrieved := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, ""); retrieved == nil {
t.Error("Failed to get adapter")
} else {
t.Logf("Found raw adapter (root proxy): %+v", retrieved)
@@ -359,7 +360,7 @@
"POST_UPDATE instructions for adapters", &postUpdateExecutedPtr,
)
- if afterUpdate := adaptersProxy.Update("/"+TestProxy_AdapterId, retrieved, false, ""); afterUpdate == nil {
+ if afterUpdate := adaptersProxy.Update(context.Background(), "/"+TestProxy_AdapterId, retrieved, false, ""); afterUpdate == nil {
t.Error("Failed to update adapter")
} else {
t.Logf("Updated adapter : %+v", afterUpdate)
@@ -372,7 +373,7 @@
t.Error("POST_UPDATE callback for adapter was not executed")
}
- if d := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated adapter (root proxy)")
} else {
djson, _ := json.Marshal(d)
@@ -397,7 +398,7 @@
"POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
)
- if removed := TestProxy_Root_Device.Remove("/devices/"+TestProxy_DeviceId, ""); removed == nil {
+ if removed := TestProxy_Root_Device.Remove(context.Background(), "/devices/"+TestProxy_DeviceId, ""); removed == nil {
t.Error("Failed to remove device")
} else {
t.Logf("Removed device : %+v", removed)
@@ -410,7 +411,7 @@
t.Error("POST_REMOVE callback was not executed")
}
- if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
djson, _ := json.Marshal(d)
t.Errorf("Device was not removed - %s", djson)
} else {
@@ -432,7 +433,7 @@
TestProxy_Root_LogicalDevice.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
TestProxy_Root_LogicalDevice.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
- if added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, ""); added == nil {
+ if added := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, ""); added == nil {
t.Error("Failed to add logical device")
} else {
t.Logf("Added logical device : %+v", added)
@@ -440,7 +441,7 @@
verifyGotResponse(postAddExecuted)
- if ld := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
+ if ld := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
t.Error("Failed to find added logical device")
} else {
ldJSON, _ := json.Marshal(ld)
@@ -458,14 +459,14 @@
func TestProxy_2_1_2_Add_ExistingLogicalDevice(t *testing.T) {
TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
- added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, "")
+ added := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, "")
if added.(proto.Message).String() != reflect.ValueOf(TestProxy_LogicalDevice).Interface().(proto.Message).String() {
t.Errorf("Logical devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
}
}
func TestProxy_2_2_1_Get_AllLogicalDevices(t *testing.T) {
- logicalDevices := TestProxy_Root_LogicalDevice.Get("/logical_devices", 1, false, "")
+ logicalDevices := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices", 1, false, "")
if len(logicalDevices.([]interface{})) == 0 {
t.Error("there are no available logical devices to retrieve")
@@ -477,7 +478,7 @@
}
func TestProxy_2_2_2_Get_SingleLogicalDevice(t *testing.T) {
- if ld := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
+ if ld := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
t.Errorf("Failed to find logical device : %s", TestProxy_TargetLogicalDeviceId)
} else {
ldJSON, _ := json.Marshal(ld)
@@ -492,7 +493,7 @@
postUpdateExecuted := make(chan struct{})
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
- if retrieved := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); retrieved == nil {
+ if retrieved := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); retrieved == nil {
t.Error("Failed to get logical device")
} else {
t.Logf("Found raw logical device (root proxy): %+v", retrieved)
@@ -517,7 +518,7 @@
retrieved.(*voltha.LogicalDevice).RootDeviceId = strconv.Itoa(fwVersion)
- if afterUpdate := TestProxy_Root_LogicalDevice.Update("/logical_devices/"+TestProxy_TargetLogicalDeviceId, retrieved, false,
+ if afterUpdate := TestProxy_Root_LogicalDevice.Update(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, retrieved, false,
""); afterUpdate == nil {
t.Error("Failed to update logical device")
} else {
@@ -531,7 +532,7 @@
t.Error("POST_UPDATE callback was not executed")
}
- if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated logical device (root proxy)")
} else {
djson, _ := json.Marshal(d)
@@ -543,8 +544,8 @@
func TestProxy_2_3_2_Update_LogicalDeviceFlows(t *testing.T) {
// Get a device proxy and update a specific port
- ldFlowsProxy := TestProxy_Root.node.CreateProxy("/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", false)
- flows := ldFlowsProxy.Get("/", 0, false, "")
+ ldFlowsProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", false)
+ flows := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
flows.(*openflow_13.Flows).Items[0].TableId = rand.Uint32()
t.Logf("before updated flows: %+v", flows)
@@ -557,26 +558,26 @@
commonCallback2,
)
- kvFlows := ldFlowsProxy.Get("/", 0, false, "")
+ kvFlows := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
if reflect.DeepEqual(flows, kvFlows) {
t.Errorf("Local changes have changed the KV store contents - local:%+v, kv: %+v", flows, kvFlows)
}
- if updated := ldFlowsProxy.Update("/", flows.(*openflow_13.Flows), false, ""); updated == nil {
+ if updated := ldFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
t.Error("Failed to update logical device flows")
} else {
t.Logf("Updated logical device flows : %+v", updated)
}
- if d := ldFlowsProxy.Get("/", 0, false, ""); d == nil {
+ if d := ldFlowsProxy.Get(context.Background(), "/", 0, false, ""); d == nil {
t.Error("Failed to find updated logical device flows (flows proxy)")
} else {
djson, _ := json.Marshal(d)
t.Logf("Found flows (flows proxy): %s", string(djson))
}
- if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", 0, false,
+ if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", 0, false,
""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated logical device flows (root proxy)")
} else {
@@ -601,7 +602,7 @@
"POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
)
- if removed := TestProxy_Root_LogicalDevice.Remove("/logical_devices/"+TestProxy_LogicalDeviceId, ""); removed == nil {
+ if removed := TestProxy_Root_LogicalDevice.Remove(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, ""); removed == nil {
t.Error("Failed to remove logical device")
} else {
t.Logf("Removed device : %+v", removed)
@@ -614,7 +615,7 @@
t.Error("POST_REMOVE callback was not executed")
}
- if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
+ if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
djson, _ := json.Marshal(d)
t.Errorf("Device was not removed - %s", djson)
} else {
diff --git a/db/model/revision.go b/db/model/revision.go
index cd4c5df..6f52248 100644
--- a/db/model/revision.go
+++ b/db/model/revision.go
@@ -16,6 +16,7 @@
package model
import (
+ "context"
"github.com/opencord/voltha-go/db/kvstore"
"time"
)
@@ -34,6 +35,7 @@
SetHash(hash string)
GetHash() string
ClearHash()
+ getVersion() int64
SetupWatch(key string)
SetName(name string)
GetName() string
@@ -42,10 +44,10 @@
Get(int) interface{}
GetData() interface{}
GetNode() *node
- LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision
SetLastUpdate(ts ...time.Time)
GetLastUpdate() time.Time
- UpdateData(data interface{}, branch *Branch) Revision
- UpdateChildren(name string, children []Revision, branch *Branch) Revision
+ LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) []Revision
+ UpdateData(ctx context.Context, data interface{}, branch *Branch) Revision
+ UpdateChildren(ctx context.Context, name string, children []Revision, branch *Branch) Revision
UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision
}
diff --git a/db/model/root.go b/db/model/root.go
index 5036ce1..8331e11 100644
--- a/db/model/root.go
+++ b/db/model/root.go
@@ -17,6 +17,7 @@
package model
import (
+ "context"
"encoding/hex"
"encoding/json"
"github.com/golang/protobuf/proto"
@@ -103,7 +104,7 @@
r.DeleteTxBranch(txid)
} else {
r.node.MergeBranch(txid, false)
- r.ExecuteCallbacks()
+ r.node.GetRoot().ExecuteCallbacks()
r.DeleteTxBranch(txid)
}
}
@@ -162,7 +163,7 @@
}
func (r *root) syncParent(childRev Revision, txid string) {
- data := proto.Clone(r.Proxy.ParentNode.Latest().GetData().(proto.Message))
+ data := proto.Clone(r.GetProxy().ParentNode.Latest().GetData().(proto.Message))
for fieldName, _ := range ChildrenFields(data) {
childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
@@ -172,12 +173,12 @@
}
}
- r.Proxy.ParentNode.Latest().SetConfig(NewDataRevision(r.Proxy.ParentNode.Root, data))
- r.Proxy.ParentNode.Latest(txid).Finalize(false)
+ r.GetProxy().ParentNode.Latest().SetConfig(NewDataRevision(r.GetProxy().ParentNode.GetRoot(), data))
+ r.GetProxy().ParentNode.Latest(txid).Finalize(false)
}
// Update modifies the content of an object at a given path with the provided data
-func (r *root) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+func (r *root) Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
if makeBranch != nil {
@@ -193,13 +194,13 @@
r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
return node.MakeBranch(txid)
}
- result = r.node.Update(path, data, strict, txid, trackDirty)
+ result = r.node.Update(ctx, path, data, strict, txid, trackDirty)
} else {
- result = r.node.Update(path, data, strict, "", nil)
+ result = r.node.Update(ctx, path, data, strict, "", nil)
}
if result != nil {
- if r.Proxy.FullPath != r.Proxy.Path {
+ if r.GetProxy().FullPath != r.GetProxy().Path {
r.syncParent(result, txid)
} else {
result.Finalize(false)
@@ -212,7 +213,7 @@
}
// Add creates a new object at the given path with the provided data
-func (r *root) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+func (r *root) Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
if makeBranch != nil {
@@ -228,9 +229,9 @@
r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
return node.MakeBranch(txid)
}
- result = r.node.Add(path, data, txid, trackDirty)
+ result = r.node.Add(ctx, path, data, txid, trackDirty)
} else {
- result = r.node.Add(path, data, "", nil)
+ result = r.node.Add(ctx, path, data, "", nil)
}
if result != nil {
@@ -241,7 +242,7 @@
}
// Remove discards an object at a given path
-func (r *root) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+func (r *root) Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
if makeBranch != nil {
@@ -257,9 +258,9 @@
r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
return node.MakeBranch(txid)
}
- result = r.node.Remove(path, txid, trackDirty)
+ result = r.node.Remove(ctx, path, txid, trackDirty)
} else {
- result = r.node.Remove(path, "", nil)
+ result = r.node.Remove(ctx, path, "", nil)
}
r.node.GetRoot().ExecuteCallbacks()
diff --git a/db/model/transaction.go b/db/model/transaction.go
index fa8de1d..7529ff2 100644
--- a/db/model/transaction.go
+++ b/db/model/transaction.go
@@ -16,6 +16,7 @@
package model
import (
+ "context"
"github.com/opencord/voltha-go/common/log"
)
@@ -31,34 +32,34 @@
}
return tx
}
-func (t *Transaction) Get(path string, depth int, deep bool) interface{} {
+func (t *Transaction) Get(ctx context.Context, path string, depth int, deep bool) interface{} {
if t.txid == "" {
log.Errorf("closed transaction")
return nil
}
// TODO: need to review the return values at the different layers!!!!!
- return t.proxy.Get(path, depth, deep, t.txid)
+ return t.proxy.Get(ctx, path, depth, deep, t.txid)
}
-func (t *Transaction) Update(path string, data interface{}, strict bool) interface{} {
+func (t *Transaction) Update(ctx context.Context, path string, data interface{}, strict bool) interface{} {
if t.txid == "" {
log.Errorf("closed transaction")
return nil
}
- return t.proxy.Update(path, data, strict, t.txid)
+ return t.proxy.Update(ctx, path, data, strict, t.txid)
}
-func (t *Transaction) Add(path string, data interface{}) interface{} {
+func (t *Transaction) Add(ctx context.Context, path string, data interface{}) interface{} {
if t.txid == "" {
log.Errorf("closed transaction")
return nil
}
- return t.proxy.Add(path, data, t.txid)
+ return t.proxy.Add(ctx, path, data, t.txid)
}
-func (t *Transaction) Remove(path string) interface{} {
+func (t *Transaction) Remove(ctx context.Context, path string) interface{} {
if t.txid == "" {
log.Errorf("closed transaction")
return nil
}
- return t.proxy.Remove(path, t.txid)
+ return t.proxy.Remove(ctx, path, t.txid)
}
func (t *Transaction) Cancel() {
t.proxy.cancelTransaction(t.txid)
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
index bc53791..3660a86 100644
--- a/db/model/transaction_test.go
+++ b/db/model/transaction_test.go
@@ -17,6 +17,7 @@
package model
import (
+ "context"
"encoding/hex"
"github.com/google/uuid"
"github.com/opencord/voltha-protos/go/common"
@@ -34,7 +35,7 @@
func init() {
TestTransaction_Root = NewRoot(&voltha.Voltha{}, nil)
- TestTransaction_RootProxy = TestTransaction_Root.node.CreateProxy("/", false)
+ TestTransaction_RootProxy = TestTransaction_Root.node.CreateProxy(context.Background(), "/", false)
}
//func TestTransaction_1_GetDevices(t *testing.T) {
@@ -79,7 +80,7 @@
addTx := TestTransaction_RootProxy.OpenTransaction()
- if added := addTx.Add("/devices", device); added == nil {
+ if added := addTx.Add(context.Background(), "/devices", device); added == nil {
t.Error("Failed to add device")
} else {
TestTransaction_TargetDeviceId = added.(*voltha.Device).Id
@@ -93,12 +94,12 @@
basePath := "/devices/" + TestTransaction_DeviceId
getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
- device1 := getDevWithPortsTx.Get(basePath+"/ports", 1, false)
+ device1 := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
t.Logf("retrieved device with ports: %+v", device1)
getDevWithPortsTx.Commit()
getDevTx := TestTransaction_RootProxy.OpenTransaction()
- device2 := getDevTx.Get(basePath, 0, false)
+ device2 := getDevTx.Get(context.Background(), basePath, 0, false)
t.Logf("retrieved device: %+v", device2)
getDevTx.Commit()
@@ -106,7 +107,7 @@
func TestTransaction_4_UpdateDevice(t *testing.T) {
updateTx := TestTransaction_RootProxy.OpenTransaction()
- if retrieved := updateTx.Get("/devices/"+TestTransaction_TargetDeviceId, 1, false); retrieved == nil {
+ if retrieved := updateTx.Get(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, 1, false); retrieved == nil {
t.Error("Failed to get device")
} else {
var fwVersion int
@@ -122,7 +123,7 @@
t.Logf("Before update : %+v", retrieved)
// FIXME: The makeBranch passed in function is nil or not being executed properly!!!!!
- if afterUpdate := updateTx.Update("/devices/"+TestTransaction_TargetDeviceId, retrieved, false); afterUpdate == nil {
+ if afterUpdate := updateTx.Update(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, retrieved, false); afterUpdate == nil {
t.Error("Failed to update device")
} else {
t.Logf("Updated device : %+v", afterUpdate)
@@ -136,12 +137,12 @@
basePath := "/devices/" + TestTransaction_DeviceId
getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
- device1 := getDevWithPortsTx.Get(basePath+"/ports", 1, false)
+ device1 := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
t.Logf("retrieved device with ports: %+v", device1)
getDevWithPortsTx.Commit()
getDevTx := TestTransaction_RootProxy.OpenTransaction()
- device2 := getDevTx.Get(basePath, 0, false)
+ device2 := getDevTx.Get(context.Background(), basePath, 0, false)
t.Logf("retrieved device: %+v", device2)
getDevTx.Commit()
@@ -149,7 +150,7 @@
func TestTransaction_6_RemoveDevice(t *testing.T) {
removeTx := TestTransaction_RootProxy.OpenTransaction()
- if removed := removeTx.Remove("/devices/" + TestTransaction_DeviceId); removed == nil {
+ if removed := removeTx.Remove(context.Background(), "/devices/"+TestTransaction_DeviceId); removed == nil {
t.Error("Failed to remove device")
} else {
t.Logf("Removed device : %+v", removed)
@@ -162,7 +163,7 @@
basePath := "/devices/" + TestTransaction_DeviceId
getDevTx := TestTransaction_RootProxy.OpenTransaction()
- device := TestTransaction_RootProxy.Get(basePath, 0, false, "")
+ device := TestTransaction_RootProxy.Get(context.Background(), basePath, 0, false, "")
t.Logf("retrieved device: %+v", device)
getDevTx.Commit()