VOL-1900 lint warning fixes db
Change-Id: Iaa4e5c271c9e1d7c8ebce1e13c7e723ea4762304
diff --git a/Makefile b/Makefile
index c31e0e5..c486732 100644
--- a/Makefile
+++ b/Makefile
@@ -200,7 +200,7 @@
sca: golangci_lint_tool_install
rm -rf ./sca-report
@mkdir -p ./sca-report
- $(GOLANGCI_LINT_TOOL) run -E golint -D structcheck --out-format junit-xml ./rw_core/... ./ro_core/... ./tests/... ./common/... 2>&1 | tee ./sca-report/sca-report.xml
+ $(GOLANGCI_LINT_TOOL) run -E golint --out-format junit-xml ./cli/... ./rw_core/... ./ro_core/... ./tests/... ./common/... ./db/... 2>&1 | tee ./sca-report/sca-report.xml
test: go_junit_install gocover_cobertura_install local-lib-go
@mkdir -p ./tests/results
diff --git a/db/model/base_test.go b/db/model/base_test.go
index 45a6cdc..0fd9446 100644
--- a/db/model/base_test.go
+++ b/db/model/base_test.go
@@ -16,23 +16,12 @@
package model
import (
- "github.com/opencord/voltha-lib-go/v2/pkg/db"
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
- "github.com/opencord/voltha-protos/v2/go/voltha"
"runtime/debug"
"sync"
-)
-type ModelTestConfig struct {
- Root *root
- Backend *db.Backend
- RootProxy *Proxy
- DbPrefix string
- DbType string
- DbHost string
- DbPort int
- DbTimeout int
-}
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
+ "github.com/opencord/voltha-protos/v2/go/voltha"
+)
var callbackMutex sync.Mutex
diff --git a/db/model/branch.go b/db/model/branch.go
index 957e0ca..21669a1 100644
--- a/db/model/branch.go
+++ b/db/model/branch.go
@@ -17,8 +17,9 @@
package model
import (
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
"sync"
+
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
)
// TODO: implement weak references or something equivalent
diff --git a/db/model/branch_test.go b/db/model/branch_test.go
index cf8406c..d91d15e 100644
--- a/db/model/branch_test.go
+++ b/db/model/branch_test.go
@@ -22,8 +22,8 @@
)
var (
- TestBranch_BRANCH *Branch
- TestBranch_HASH string
+ TestBranchBranch *Branch
+ TestBranchHash string
)
// Create a new branch and ensure that fields are populated
@@ -38,72 +38,72 @@
}
txid := fmt.Sprintf("%x", md5.Sum([]byte("branch_transaction_id")))
- TestBranch_BRANCH = NewBranch(node, txid, origin, true)
- t.Logf("New Branch(txid:%s) created: %+v\n", txid, TestBranch_BRANCH)
+ TestBranchBranch = NewBranch(node, txid, origin, true)
+ t.Logf("New Branch(txid:%s) created: %+v\n", txid, TestBranchBranch)
- if TestBranch_BRANCH.Latest == nil {
+ if TestBranchBranch.Latest == nil {
t.Errorf("Branch latest pointer is nil")
- } else if TestBranch_BRANCH.Origin == nil {
+ } else if TestBranchBranch.Origin == nil {
t.Errorf("Branch origin pointer is nil")
- } else if TestBranch_BRANCH.Node == nil {
+ } else if TestBranchBranch.Node == nil {
t.Errorf("Branch node pointer is nil")
- } else if TestBranch_BRANCH.Revisions == nil {
+ } else if TestBranchBranch.Revisions == nil {
t.Errorf("Branch revisions map is nil")
- } else if TestBranch_BRANCH.Txid == "" {
+ } else if TestBranchBranch.Txid == "" {
t.Errorf("Branch transaction id is empty")
}
}
// Add a new revision to the branch
func TestBranch_AddRevision(t *testing.T) {
- TestBranch_HASH = fmt.Sprintf("%x", md5.Sum([]byte("revision_hash")))
+ TestBranchHash = fmt.Sprintf("%x", md5.Sum([]byte("revision_hash")))
rev := &NonPersistedRevision{
Config: &DataRevision{},
Children: make(map[string][]Revision),
- Hash: TestBranch_HASH,
+ Hash: TestBranchHash,
Branch: &Branch{},
}
- TestBranch_BRANCH.AddRevision(rev)
+ TestBranchBranch.AddRevision(rev)
t.Logf("Added revision: %+v\n", rev)
- if len(TestBranch_BRANCH.Revisions) == 0 {
+ if len(TestBranchBranch.Revisions) == 0 {
t.Errorf("Branch revisions map is empty")
}
}
// Ensure that the added revision can be retrieved
func TestBranch_GetRevision(t *testing.T) {
- if rev := TestBranch_BRANCH.GetRevision(TestBranch_HASH); rev == nil {
- t.Errorf("Unable to retrieve revision for hash:%s", TestBranch_HASH)
+ if rev := TestBranchBranch.GetRevision(TestBranchHash); rev == nil {
+ t.Errorf("Unable to retrieve revision for hash:%s", TestBranchHash)
} else {
- t.Logf("Got revision for hash:%s rev:%+v\n", TestBranch_HASH, rev)
+ t.Logf("Got revision for hash:%s rev:%+v\n", TestBranchHash, rev)
}
}
// Set the added revision as the latest
func TestBranch_LatestRevision(t *testing.T) {
- addedRevision := TestBranch_BRANCH.GetRevision(TestBranch_HASH)
- TestBranch_BRANCH.SetLatest(addedRevision)
+ addedRevision := TestBranchBranch.GetRevision(TestBranchHash)
+ TestBranchBranch.SetLatest(addedRevision)
- rev := TestBranch_BRANCH.GetLatest()
+ rev := TestBranchBranch.GetLatest()
t.Logf("Retrieved latest revision :%+v", rev)
if rev == nil {
t.Error("Unable to retrieve latest revision")
- } else if rev.GetHash() != TestBranch_HASH {
- t.Errorf("Latest revision does not match hash: %s", TestBranch_HASH)
+ } else if rev.GetHash() != TestBranchHash {
+ t.Errorf("Latest revision does not match hash: %s", TestBranchHash)
}
}
// Ensure that the origin revision remains and differs from subsequent revisions
func TestBranch_OriginRevision(t *testing.T) {
- rev := TestBranch_BRANCH.Origin
+ rev := TestBranchBranch.Origin
t.Logf("Retrieved origin revision :%+v", rev)
if rev == nil {
t.Error("Unable to retrieve origin revision")
- } else if rev.GetHash() == TestBranch_HASH {
- t.Errorf("Origin revision should differ from added revision: %s", TestBranch_HASH)
+ } else if rev.GetHash() == TestBranchHash {
+ t.Errorf("Origin revision should differ from added revision: %s", TestBranchHash)
}
}
diff --git a/db/model/callback_type.go b/db/model/callback_type.go
index b530dee..796a6ce 100644
--- a/db/model/callback_type.go
+++ b/db/model/callback_type.go
@@ -21,14 +21,14 @@
// Enumerated list of callback types
const (
- GET CallbackType = iota
- PRE_UPDATE
- POST_UPDATE
- PRE_ADD
- POST_ADD
- PRE_REMOVE
- POST_REMOVE
- POST_LISTCHANGE
+ Get CallbackType = iota
+ PreUpdate
+ PostUpdate
+ PreAdd
+ PostAdd
+ PreRemove
+ PostRemove
+ PostListchange
)
var enumCallbackTypes = []string{
diff --git a/db/model/data_revision.go b/db/model/data_revision.go
index 35f5958..1b1fc39 100644
--- a/db/model/data_revision.go
+++ b/db/model/data_revision.go
@@ -21,9 +21,10 @@
"crypto/md5"
"encoding/json"
"fmt"
+ "reflect"
+
"github.com/golang/protobuf/proto"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
- "reflect"
)
// DataRevision stores the data associated to a revision along with its calculated checksum hash value
diff --git a/db/model/data_revision_test.go b/db/model/data_revision_test.go
index 76b8923..635525f 100644
--- a/db/model/data_revision_test.go
+++ b/db/model/data_revision_test.go
@@ -17,13 +17,14 @@
package model
import (
+ "reflect"
+ "testing"
+
"github.com/golang/protobuf/ptypes/any"
"github.com/opencord/voltha-protos/v2/go/common"
"github.com/opencord/voltha-protos/v2/go/openflow_13"
"github.com/opencord/voltha-protos/v2/go/voltha"
"github.com/stretchr/testify/assert"
- "reflect"
- "testing"
)
var (
@@ -88,9 +89,9 @@
}
func TestNoDataRevision(t *testing.T) {
- TestNode_Data = nil
- TestNode_Root = &root{RevisionClass: reflect.TypeOf(NonPersistedRevision{})}
- rev := NewDataRevision(TestNode_Root, TestNode_Data)
+ TestNodeData = nil
+ TestNodeRoot = &root{RevisionClass: reflect.TypeOf(NonPersistedRevision{})}
+ rev := NewDataRevision(TestNodeRoot, TestNodeData)
assert.Nil(t, rev.Data, "Problem to marshal data when data is nil")
}
diff --git a/db/model/event_bus.go b/db/model/event_bus.go
index d0a21f1..2a2c21e 100644
--- a/db/model/event_bus.go
+++ b/db/model/event_bus.go
@@ -32,11 +32,11 @@
// ignoredCallbacks keeps a list of callbacks that should not be advertised on the event bus
var (
ignoredCallbacks = map[CallbackType]struct{}{
- PRE_ADD: {},
- GET: {},
- POST_LISTCHANGE: {},
- PRE_REMOVE: {},
- PRE_UPDATE: {},
+ PreAdd: {},
+ Get: {},
+ PostListchange: {},
+ PreRemove: {},
+ PreUpdate: {},
}
)
@@ -60,9 +60,9 @@
}
var kind voltha.ConfigEventType_ConfigEventType
switch eventType {
- case POST_ADD:
+ case PostAdd:
kind = voltha.ConfigEventType_add
- case POST_REMOVE:
+ case PostRemove:
kind = voltha.ConfigEventType_remove
default:
kind = voltha.ConfigEventType_update
diff --git a/db/model/merge.go b/db/model/merge.go
index 07ae9b9..a1cb2db 100644
--- a/db/model/merge.go
+++ b/db/model/merge.go
@@ -136,7 +136,7 @@
}
if field.IsContainer {
changes = append(
- changes, ChangeTuple{POST_LISTCHANGE,
+ changes, ChangeTuple{PostListchange,
NewOperationContext("", nil, fieldName, ""), nil},
)
}
@@ -160,13 +160,13 @@
// FIXME: newRev may come back as nil... exclude those entries for now
if newRev != nil {
newList[idx] = newRev
- changes = append(changes, ChangeTuple{POST_ADD, newList[idx].GetData(), newRev.GetData()})
+ changes = append(changes, ChangeTuple{PostAdd, newList[idx].GetData(), newRev.GetData()})
}
}
for key := range src.RemovedKeys {
oldRev := forkList[src.KeyMap1[key]]
revsToDiscard = append(revsToDiscard, oldRev)
- changes = append(changes, ChangeTuple{POST_REMOVE, oldRev.GetData(), nil})
+ changes = append(changes, ChangeTuple{PostRemove, oldRev.GetData(), nil})
}
for key := range src.ChangedKeys {
idx := src.KeyMap2[key]
@@ -200,7 +200,7 @@
} else {
newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
newList = append(newList, newRev)
- changes = append(changes, ChangeTuple{POST_ADD, srcList[src.KeyMap2[key]], newRev.GetData()})
+ changes = append(changes, ChangeTuple{PostAdd, srcList[src.KeyMap2[key]], newRev.GetData()})
}
}
for key := range src.ChangedKeys {
@@ -237,7 +237,7 @@
newList[len(newList)-1] = nil
newList = newList[:len(newList)-1]
- changes = append(changes, ChangeTuple{POST_REMOVE, oldRev.GetData(), nil})
+ changes = append(changes, ChangeTuple{PostRemove, oldRev.GetData(), nil})
}
}
@@ -264,7 +264,7 @@
rev = rev.UpdateAllChildren(newChildren, dstRev.GetBranch())
if configChanged {
- changes = append(changes, ChangeTuple{POST_UPDATE, dstRev.GetBranch().GetLatest().GetData(), rev.GetData()})
+ changes = append(changes, ChangeTuple{PostUpdate, dstRev.GetBranch().GetLatest().GetData(), rev.GetData()})
}
return rev, changes
}
diff --git a/db/model/model.go b/db/model/model.go
index ba4a9b1..34d2192 100644
--- a/db/model/model.go
+++ b/db/model/model.go
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package model
import (
@@ -20,18 +21,19 @@
)
func init() {
- log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
- defer log.CleanUp()
+ if _, err := log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"}); err != nil {
+ log.Errorw("Unable to register package to the log map", log.Fields{"error": err})
+ }
}
const (
- // period to determine when data requires a refresh (in milliseconds)
+ // DataRefreshPeriod is period to determine when data requires a refresh (in milliseconds)
// TODO: make this configurable?
DataRefreshPeriod int64 = 5000
- // Attribute used to store a timestamp in the context object
+ // RequestTimestamp attribute used to store a timestamp in the context object
RequestTimestamp = "request-timestamp"
- // Time limit for a KV path reservation (in seconds)
+ // ReservationTTL is time limit for a KV path reservation (in seconds)
ReservationTTL int64 = 180
)
diff --git a/db/model/node.go b/db/model/node.go
index 7e703ff..57f67d3 100644
--- a/db/model/node.go
+++ b/db/model/node.go
@@ -22,12 +22,13 @@
import (
"context"
"fmt"
- "github.com/golang/protobuf/proto"
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
"reflect"
"strings"
"sync"
"time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
)
// When a branch has no transaction id, everything gets stored in NONE
@@ -76,8 +77,8 @@
LatestData interface{}
}
-// NewNode creates a new instance of the node data structure
-func NewNode(root *root, initialData interface{}, autoPrune bool, txid string) *node {
+// newNode creates a new instance of the node data structure
+func newNode(root *root, initialData interface{}, autoPrune bool, txid string) *node {
n := &node{}
n.Root = root
@@ -106,7 +107,7 @@
// MakeNode creates a new node in the tree
func (n *node) MakeNode(data interface{}, txid string) *node {
- return NewNode(n.Root, data, true, txid)
+ return newNode(n.Root, data, true, txid)
}
// MakeRevision create a new revision of the node in the tree
@@ -130,7 +131,7 @@
log.Debugw("saving-latest-data", log.Fields{"hash": revision.GetHash(), "data": revision.GetData()})
// Tag a timestamp to that revision
revision.SetLastUpdate()
- GetRevCache().Set(revision.GetName(), revision)
+ getRevCache().Set(revision.GetName(), revision)
}
branch.SetLatest(revision)
}
@@ -255,9 +256,6 @@
defer n.mutex.Unlock()
log.Debugw("node-list-request", log.Fields{"path": path, "hash": hash, "depth": depth, "deep": deep, "txid": txid})
- if deep {
- depth = -1
- }
for strings.HasPrefix(path, "/") {
path = path[1:]
@@ -325,8 +323,8 @@
// 1. Start with the cache which stores revisions by watch names
// 2. Then look in the revision tree, especially if it's a sub-path such as /devices/1234/flows
// 3. Move on to the KV store if that path cannot be found or if the entry has expired
- if entry, exists := GetRevCache().Get(path); exists && entry.(Revision) != nil {
- entryAge := time.Now().Sub(entry.(Revision).GetLastUpdate()).Nanoseconds() / int64(time.Millisecond)
+ if entry, exists := getRevCache().Get(path); exists && entry.(Revision) != nil {
+ entryAge := time.Since(entry.(Revision).GetLastUpdate()).Nanoseconds() / int64(time.Millisecond)
if entryAge < DataRefreshPeriod {
log.Debugw("using-cache-entry", log.Fields{
"path": path,
@@ -334,9 +332,8 @@
"age": entryAge,
})
return proto.Clone(entry.(Revision).GetData().(proto.Message)), nil
- } else {
- log.Debugw("cache-entry-expired", log.Fields{"path": path, "hash": hash, "age": entryAge})
}
+ log.Debugw("cache-entry-expired", log.Fields{"path": path, "hash": hash, "age": entryAge})
} else if result = n.getPath(ctx, rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
log.Debugw("using-rev-tree-entry", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
return result, nil
@@ -406,37 +403,35 @@
key := partition[0]
path = ""
keyValue := field.KeyFromStr(key)
- if _, childRev := n.findRevByKey(children, field.Key, keyValue); childRev == nil {
+ _, childRev := n.findRevByKey(children, field.Key, keyValue)
+ if childRev == nil {
return nil
- } else {
- childNode := childRev.GetNode()
- return childNode.getPath(ctx, childRev, path, depth)
}
- } else {
- var response []interface{}
- for _, childRev := range children {
- childNode := childRev.GetNode()
- value := childNode.getData(childRev, depth)
- response = append(response, value)
- }
- return response
+ childNode := childRev.getNode()
+ return childNode.getPath(ctx, childRev, path, depth)
}
- } else {
var response []interface{}
- if path != "" {
- // TODO: raise error
- return response
- }
for _, childRev := range children {
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
value := childNode.getData(childRev, depth)
response = append(response, value)
}
return response
}
- } else if children := rev.GetChildren(name); children != nil && len(children) > 0 {
+ var response []interface{}
+ if path != "" {
+ // TODO: raise error
+ return response
+ }
+ for _, childRev := range children {
+ childNode := childRev.getNode()
+ value := childNode.getData(childRev, depth)
+ response = append(response, value)
+ }
+ return response
+ } else if children := rev.GetChildren(name); children != nil {
childRev := children[0]
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
return childNode.getPath(ctx, childRev, path, depth)
}
@@ -450,7 +445,7 @@
if n.GetProxy() != nil {
log.Debugw("invoking-get-callbacks", log.Fields{"data": msg})
- if modifiedMsg = n.GetProxy().InvokeCallbacks(GET, false, msg); modifiedMsg != nil {
+ if modifiedMsg = n.GetProxy().InvokeCallbacks(Get, false, msg); modifiedMsg != nil {
msg = modifiedMsg
}
@@ -525,7 +520,7 @@
return branch.GetLatest()
}
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
// Save proxy in child node to ensure callbacks are called later on
// only assign in cases of non sub-folder proxies, i.e. "/"
@@ -546,7 +541,7 @@
_, newKey := GetAttributeValue(newChildRev.GetData(), field.Key, 0)
- _newKeyType := fmt.Sprintf("%s", newKey)
+ _newKeyType := newKey.String()
_keyValueType := fmt.Sprintf("%s", keyValue)
if _newKeyType != _keyValueType {
@@ -577,7 +572,7 @@
}
} else {
childRev := rev.GetChildren(name)[0]
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
newChildRev := childNode.Update(ctx, path, data, strict, txid, makeBranch)
branch.LatestLock.Lock()
@@ -609,8 +604,8 @@
//}
if n.GetProxy() != nil {
- log.Debug("invoking proxy PRE_UPDATE Callbacks")
- n.GetProxy().InvokeCallbacks(PRE_UPDATE, false, branch.GetLatest(), data)
+ log.Debug("invoking proxy PreUpdate Callbacks")
+ n.GetProxy().InvokeCallbacks(PreUpdate, false, branch.GetLatest(), data)
}
if branch.GetLatest().GetData().(proto.Message).String() != data.(proto.Message).String() {
@@ -620,7 +615,7 @@
}
rev := branch.GetLatest().UpdateData(ctx, data, branch)
- changes := []ChangeTuple{{POST_UPDATE, branch.GetLatest().GetData(), rev.GetData()}}
+ changes := []ChangeTuple{{PostUpdate, branch.GetLatest().GetData(), rev.GetData()}}
n.makeLatest(branch, rev, changes)
return rev
@@ -670,8 +665,8 @@
if path == "" {
if field.Key != "" {
if n.GetProxy() != nil {
- log.Debug("invoking proxy PRE_ADD Callbacks")
- n.GetProxy().InvokeCallbacks(PRE_ADD, false, data)
+ log.Debug("invoking proxy PreAdd Callbacks")
+ n.GetProxy().InvokeCallbacks(PreAdd, false, data)
}
children = make([]Revision, len(rev.GetChildren(name)))
@@ -695,7 +690,7 @@
children = append(children, childRev)
updatedRev := rev.UpdateChildren(ctx, name, children, branch)
- changes := []ChangeTuple{{POST_ADD, nil, childRev.GetData()}}
+ changes := []ChangeTuple{{PostAdd, nil, childRev.GetData()}}
childRev.SetupWatch(childRev.GetName())
n.makeLatest(branch, updatedRev, changes)
@@ -723,7 +718,7 @@
return branch.GetLatest()
}
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
newChildRev := childNode.Add(ctx, path, data, txid, makeBranch)
// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
@@ -807,7 +802,7 @@
if path != "" {
if idx, childRev := n.findRevByKey(children, field.Key, keyValue); childRev != nil {
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
if childNode.Proxy == nil {
childNode.Proxy = n.Proxy
}
@@ -829,17 +824,18 @@
return branch.GetLatest()
}
- if idx, childRev := n.findRevByKey(children, field.Key, keyValue); childRev != nil && idx >= 0 {
+ idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+ if childRev != nil && idx >= 0 {
if n.GetProxy() != nil {
data := childRev.GetData()
- n.GetProxy().InvokeCallbacks(PRE_REMOVE, false, data)
- postAnnouncement = append(postAnnouncement, ChangeTuple{POST_REMOVE, data, nil})
+ n.GetProxy().InvokeCallbacks(PreRemove, false, data)
+ postAnnouncement = append(postAnnouncement, ChangeTuple{PostRemove, data, nil})
} else {
- postAnnouncement = append(postAnnouncement, ChangeTuple{POST_REMOVE, childRev.GetData(), nil})
+ postAnnouncement = append(postAnnouncement, ChangeTuple{PostRemove, childRev.GetData(), nil})
}
childRev.StorageDrop(txid, true)
- GetRevCache().Delete(childRev.GetName())
+ getRevCache().Delete(childRev.GetName())
branch.LatestLock.Lock()
defer branch.LatestLock.Unlock()
@@ -851,9 +847,8 @@
n.makeLatest(branch, rev, postAnnouncement)
return rev
- } else {
- log.Errorw("failed-to-find-revision", log.Fields{"name": name, "key": keyValue.(string)})
}
+ log.Errorw("failed-to-find-revision", log.Fields{"name": name, "key": keyValue.(string)})
}
log.Errorw("cannot-add-to-non-keyed-container", log.Fields{"name": name, "path": path, "fieldKey": field.Key})
@@ -918,43 +913,6 @@
return rev, nil
}
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diff utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-//func (n *node) diff(hash1, hash2, txid string) {
-// branch := n.Branches[txid]
-// rev1 := branch.GetHash(hash1)
-// rev2 := branch.GetHash(hash2)
-//
-// if rev1.GetHash() == rev2.GetHash() {
-// // empty patch
-// } else {
-// // translate data to json and generate patch
-// patch, err := jsonpatch.MakePatch(rev1.GetData(), rev2.GetData())
-// patch.
-// }
-//}
-
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tag utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-// TODO: is tag mgmt used in the python implementation? Need to validate
-
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Internals ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-func (n *node) hasChildren(data interface{}) bool {
- for fieldName, field := range ChildrenFields(n.Type) {
- _, fieldValue := GetAttributeValue(data, fieldName, 0)
-
- if (field.IsContainer && fieldValue.Len() > 0) || !fieldValue.IsNil() {
- log.Error("cannot update external children")
- return true
- }
- }
-
- return false
-}
-
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ node Proxy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
// CreateProxy returns a reference to a sub-tree of the data model
func (n *node) CreateProxy(ctx context.Context, path string, exclusive bool) (*Proxy, error) {
return n.createProxy(ctx, path, path, n, exclusive)
@@ -1021,8 +979,7 @@
path = partition[1]
}
keyValue := field.KeyFromStr(key)
- var children []Revision
- children = make([]Revision, len(rev.GetChildren(name)))
+ children := make([]Revision, len(rev.GetChildren(name)))
copy(children, rev.GetChildren(name))
var childRev Revision
@@ -1036,7 +993,7 @@
} else if revs, err := n.GetBranch(NONE).GetLatest().LoadFromPersistence(ctx, fullPath, "", nil); err != nil {
log.Errorf("failed-to-load-from-persistence")
return nil, err
- } else if revs != nil && len(revs) > 0 {
+ } else if len(revs) > 0 {
log.Debugw("found-revision-matching-key-in-db", log.Fields{
"node-type": reflect.ValueOf(n.Type).Type(),
"parent-node-type": reflect.ValueOf(parentNode.Type).Type(),
@@ -1053,7 +1010,7 @@
})
}
if childRev != nil {
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
return childNode.createProxy(ctx, path, fullPath, n, exclusive)
}
} else {
@@ -1072,7 +1029,7 @@
"name": name,
})
childRev := rev.GetChildren(name)[0]
- childNode := childRev.GetNode()
+ childNode := childRev.getNode()
return childNode.createProxy(ctx, path, fullPath, n, exclusive)
}
} else {
@@ -1135,13 +1092,6 @@
return n.Proxy
}
-func (n *node) makeEventBus() *EventBus {
- if n.EventBus == nil {
- n.EventBus = NewEventBus()
- }
- return n.EventBus
-}
-
func (n *node) SetProxy(proxy *Proxy) {
n.Proxy = proxy
}
diff --git a/db/model/node_test.go b/db/model/node_test.go
index 7e0a3ce..c54e35c 100644
--- a/db/model/node_test.go
+++ b/db/model/node_test.go
@@ -18,28 +18,17 @@
import (
"crypto/md5"
"fmt"
+ "reflect"
+ "testing"
+
"github.com/golang/protobuf/ptypes/any"
"github.com/opencord/voltha-protos/v2/go/common"
"github.com/opencord/voltha-protos/v2/go/openflow_13"
"github.com/opencord/voltha-protos/v2/go/voltha"
- "reflect"
- "testing"
)
var (
- TestNode_Port = []*voltha.Port{
- {
- PortNo: 123,
- Label: "test-etcd_port-0",
- Type: voltha.Port_PON_OLT,
- AdminState: common.AdminState_ENABLED,
- OperStatus: common.OperStatus_ACTIVE,
- DeviceId: "etcd_port-0-device-id",
- Peers: []*voltha.Port_PeerPort{},
- },
- }
-
- TestNode_Device = &voltha.Device{
+ TestNodeDevice = &voltha.Device{
Id: "Config-SomeNode-01-new-test",
Type: "simulated_olt",
Root: true,
@@ -62,32 +51,30 @@
Reason: "",
ConnectStatus: common.ConnectStatus_REACHABLE,
Custom: &any.Any{},
- Ports: TestNode_Port,
+ Ports: TestNodePort,
Flows: &openflow_13.Flows{},
FlowGroups: &openflow_13.FlowGroups{},
PmConfigs: &voltha.PmConfigs{},
ImageDownloads: []*voltha.ImageDownload{},
}
- TestNode_Data = TestNode_Device
-
- TestNode_Txid = fmt.Sprintf("%x", md5.Sum([]byte("node_transaction_id")))
- TestNode_Root = &root{RevisionClass: reflect.TypeOf(NonPersistedRevision{})}
+ TestNodeTxid = fmt.Sprintf("%x", md5.Sum([]byte("node_transaction_id")))
+ TestNodeRoot = &root{RevisionClass: reflect.TypeOf(NonPersistedRevision{})}
)
// Exercise node creation code
// This test will
func TestNode_01_NewNode(t *testing.T) {
- node := NewNode(TestNode_Root, TestNode_Data, false, TestNode_Txid)
+ node := newNode(TestNodeRoot, TestNodeDevice, false, TestNodeTxid)
- if reflect.ValueOf(node.Type).Type() != reflect.TypeOf(TestNode_Data) {
+ if reflect.ValueOf(node.Type).Type() != reflect.TypeOf(TestNodeDevice) {
t.Errorf("Node type does not match original data type: %+v", reflect.ValueOf(node.Type).Type())
- } else if node.GetBranch(TestNode_Txid) == nil || node.GetBranch(TestNode_Txid).Latest == nil {
- t.Errorf("No branch associated to txid: %s", TestNode_Txid)
- } else if node.GetBranch(TestNode_Txid).Latest == nil {
- t.Errorf("Branch has no latest revision : %s", TestNode_Txid)
- } else if node.GetBranch(TestNode_Txid).GetLatest().GetConfig() == nil {
- t.Errorf("Latest revision has no assigned data: %+v", node.GetBranch(TestNode_Txid).GetLatest())
+ } else if node.GetBranch(TestNodeTxid) == nil || node.GetBranch(TestNodeTxid).Latest == nil {
+ t.Errorf("No branch associated to txid: %s", TestNodeTxid)
+ } else if node.GetBranch(TestNodeTxid).Latest == nil {
+ t.Errorf("Branch has no latest revision : %s", TestNodeTxid)
+ } else if node.GetBranch(TestNodeTxid).GetLatest().GetConfig() == nil {
+ t.Errorf("Latest revision has no assigned data: %+v", node.GetBranch(TestNodeTxid).GetLatest())
}
t.Logf("Created new node successfully : %+v\n", node)
diff --git a/db/model/non_persisted_revision.go b/db/model/non_persisted_revision.go
index c7fb6ea..b796217 100644
--- a/db/model/non_persisted_revision.go
+++ b/db/model/non_persisted_revision.go
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package model
import (
@@ -20,14 +21,15 @@
"context"
"crypto/md5"
"fmt"
- "github.com/golang/protobuf/proto"
- "github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
"reflect"
"sort"
"strings"
"sync"
"time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
)
// TODO: Cache logic will have to be revisited to cleanup unused entries in memory (disabled for now)
@@ -50,13 +52,14 @@
var revCacheInstance *revCacheSingleton
var revCacheOnce sync.Once
-func GetRevCache() *revCacheSingleton {
+func getRevCache() *revCacheSingleton {
revCacheOnce.Do(func() {
revCacheInstance = &revCacheSingleton{Cache: sync.Map{}}
})
return revCacheInstance
}
+// NonPersistedRevision -
type NonPersistedRevision struct {
mutex sync.RWMutex
Root *root
@@ -70,6 +73,7 @@
lastUpdate time.Time
}
+// NewNonPersistedRevision -
func NewNonPersistedRevision(root *root, branch *Branch, data interface{}, children map[string][]Revision) Revision {
r := &NonPersistedRevision{}
r.Root = root
@@ -80,18 +84,21 @@
return r
}
+// SetConfig -
func (npr *NonPersistedRevision) SetConfig(config *DataRevision) {
npr.mutex.Lock()
defer npr.mutex.Unlock()
npr.Config = config
}
+// GetConfig -
func (npr *NonPersistedRevision) GetConfig() *DataRevision {
npr.mutex.Lock()
defer npr.mutex.Unlock()
return npr.Config
}
+// SetAllChildren -
func (npr *NonPersistedRevision) SetAllChildren(children map[string][]Revision) {
npr.childrenLock.Lock()
defer npr.childrenLock.Unlock()
@@ -103,6 +110,7 @@
}
}
+// SetChildren -
func (npr *NonPersistedRevision) SetChildren(name string, children []Revision) {
npr.childrenLock.Lock()
defer npr.childrenLock.Unlock()
@@ -111,6 +119,7 @@
copy(npr.Children[name], children)
}
+// GetAllChildren -
func (npr *NonPersistedRevision) GetAllChildren() map[string][]Revision {
npr.childrenLock.Lock()
defer npr.childrenLock.Unlock()
@@ -118,6 +127,7 @@
return npr.Children
}
+// GetChildren -
func (npr *NonPersistedRevision) GetChildren(name string) []Revision {
npr.childrenLock.Lock()
defer npr.childrenLock.Unlock()
@@ -128,47 +138,56 @@
return nil
}
+// SetHash -
func (npr *NonPersistedRevision) SetHash(hash string) {
npr.mutex.Lock()
defer npr.mutex.Unlock()
npr.Hash = hash
}
+// GetHash -
func (npr *NonPersistedRevision) GetHash() string {
//npr.mutex.Lock()
//defer npr.mutex.Unlock()
return npr.Hash
}
+// ClearHash -
func (npr *NonPersistedRevision) ClearHash() {
npr.mutex.Lock()
defer npr.mutex.Unlock()
npr.Hash = ""
}
+// GetName -
func (npr *NonPersistedRevision) GetName() string {
//npr.mutex.Lock()
//defer npr.mutex.Unlock()
return npr.Name
}
+// SetName -
func (npr *NonPersistedRevision) SetName(name string) {
//npr.mutex.Lock()
//defer npr.mutex.Unlock()
npr.Name = name
}
+
+// SetBranch -
func (npr *NonPersistedRevision) SetBranch(branch *Branch) {
npr.mutex.Lock()
defer npr.mutex.Unlock()
npr.Branch = branch
}
+// GetBranch -
func (npr *NonPersistedRevision) GetBranch() *Branch {
npr.mutex.Lock()
defer npr.mutex.Unlock()
return npr.Branch
}
+// GetData -
func (npr *NonPersistedRevision) GetData() interface{} {
npr.mutex.Lock()
defer npr.mutex.Unlock()
@@ -178,12 +197,13 @@
return npr.Config.Data
}
-func (npr *NonPersistedRevision) GetNode() *node {
+func (npr *NonPersistedRevision) getNode() *node {
npr.mutex.Lock()
defer npr.mutex.Unlock()
return npr.Branch.Node
}
+// Finalize -
func (npr *NonPersistedRevision) Finalize(skipOnExist bool) {
npr.Hash = npr.hashContent()
}
@@ -243,8 +263,8 @@
childData := rev.Get(depth - 1)
foundEntry := false
for i := 0; i < childDataHolder.Len(); i++ {
- cdh_if := childDataHolder.Index(i).Interface()
- if cdh_if.(proto.Message).String() == childData.(proto.Message).String() {
+ cdhIf := childDataHolder.Index(i).Interface()
+ if cdhIf.(proto.Message).String() == childData.(proto.Message).String() {
foundEntry = true
break
}
@@ -255,7 +275,7 @@
}
}
} else {
- if revs := npr.GetBranch().GetLatest().GetChildren(fieldName); revs != nil && len(revs) > 0 {
+ if revs := npr.GetBranch().GetLatest().GetChildren(fieldName); len(revs) > 0 {
rev := revs[0]
if rev != nil {
childData := rev.Get(depth - 1)
@@ -374,7 +394,7 @@
})
// replace entry
- newChild.GetNode().SetRoot(existingChildren[nameIndex].GetNode().GetRoot())
+ newChild.getNode().SetRoot(existingChildren[nameIndex].getNode().GetRoot())
updatedChildren = append(updatedChildren, newChild)
} else {
log.Debugw("keeping-existing-child", log.Fields{
@@ -459,7 +479,7 @@
}
}
-/// ChildDropByName will remove a child entry matching the type and name
+// ChildDropByName will remove a child entry matching the type and name
func (npr *NonPersistedRevision) ChildDropByName(childName string) {
// Extract device type
parts := strings.SplitN(childName, "/", 2)
@@ -478,17 +498,19 @@
}
}
+// SetLastUpdate -
func (npr *NonPersistedRevision) SetLastUpdate(ts ...time.Time) {
npr.mutex.Lock()
defer npr.mutex.Unlock()
- if ts != nil && len(ts) > 0 {
+ if len(ts) > 0 {
npr.lastUpdate = ts[0]
} else {
npr.lastUpdate = time.Now()
}
}
+// GetLastUpdate -
func (npr *NonPersistedRevision) GetLastUpdate() time.Time {
npr.mutex.RLock()
defer npr.mutex.RUnlock()
@@ -501,10 +523,12 @@
return nil, nil
}
+// SetupWatch -
func (npr *NonPersistedRevision) SetupWatch(key string) {
// stub ... required by interface
}
+// StorageDrop -
func (npr *NonPersistedRevision) StorageDrop(txid string, includeConfig bool) {
// stub ... required by interface
}
diff --git a/db/model/persisted_revision.go b/db/model/persisted_revision.go
index e0fcb10..3214054 100644
--- a/db/model/persisted_revision.go
+++ b/db/model/persisted_revision.go
@@ -20,14 +20,15 @@
"bytes"
"compress/gzip"
"context"
+ "reflect"
+ "strings"
+ "sync"
+
"github.com/golang/protobuf/proto"
"github.com/google/uuid"
"github.com/opencord/voltha-lib-go/v2/pkg/db"
"github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
- "reflect"
- "strings"
- "sync"
)
// PersistedRevision holds information of revision meant to be saved in a persistent storage
@@ -51,7 +52,7 @@
var watchCacheInstance *watchCache
var watchCacheOne sync.Once
-func Watches() *watchCache {
+func watches() *watchCache {
watchCacheOne.Do(func() {
watchCacheInstance = &watchCache{Cache: sync.Map{}}
})
@@ -101,12 +102,14 @@
if pr.Compress {
var b bytes.Buffer
w := gzip.NewWriter(&b)
- w.Write(blob)
+ if _, err := w.Write(blob); err != nil {
+ log.Errorw("Unable to write a compressed form of p to the underlying io.Writer.", log.Fields{"error": err})
+ }
w.Close()
blob = b.Bytes()
}
- GetRevCache().Set(pr.GetName(), pr)
+ getRevCache().Set(pr.GetName(), pr)
if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
log.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
} else {
@@ -116,13 +119,14 @@
}
}
+// SetupWatch -
func (pr *PersistedRevision) SetupWatch(key string) {
if key == "" {
log.Debugw("ignoring-watch", log.Fields{"key": key, "revision-hash": pr.GetHash()})
return
}
- if _, exists := Watches().Cache.LoadOrStore(key+"-"+pr.GetHash(), struct{}{}); exists {
+ if _, exists := watches().Cache.LoadOrStore(key+"-"+pr.GetHash(), struct{}{}); exists {
return
}
@@ -151,111 +155,114 @@
StopWatchLoop:
for {
latestRev := pr.GetBranch().GetLatest()
+ event, ok := <-pr.events
+ if !ok {
+ log.Errorw("event-channel-failure: stopping watch loop", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
+ break StopWatchLoop
+ }
+ log.Debugw("received-event", log.Fields{"type": event.EventType, "watch": latestRev.GetName()})
- select {
- case event, ok := <-pr.events:
- if !ok {
- log.Errorw("event-channel-failure: stopping watch loop", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
- break StopWatchLoop
+ switch event.EventType {
+ case kvstore.DELETE:
+ log.Debugw("delete-from-memory", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
+
+ // Remove reference from cache
+ getRevCache().Delete(latestRev.GetName())
+
+ // Remove reference from parent
+ parent := pr.GetBranch().Node.GetRoot()
+ parent.GetBranch(NONE).Latest.ChildDropByName(latestRev.GetName())
+
+ break StopWatchLoop
+
+ case kvstore.PUT:
+ log.Debugw("update-in-memory", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
+ if latestRev.getVersion() >= event.Version {
+ log.Debugw("skipping-matching-or-older-revision", log.Fields{
+ "watch": latestRev.GetName(),
+ "watch-version": event.Version,
+ "latest-version": latestRev.getVersion(),
+ })
+ continue
+ } else {
+ log.Debugw("watch-revision-is-newer", log.Fields{
+ "watch": latestRev.GetName(),
+ "watch-version": event.Version,
+ "latest-version": latestRev.getVersion(),
+ })
}
- log.Debugw("received-event", log.Fields{"type": event.EventType, "watch": latestRev.GetName()})
- switch event.EventType {
- case kvstore.DELETE:
- log.Debugw("delete-from-memory", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
+ data := reflect.New(reflect.TypeOf(latestRev.GetData()).Elem())
- // Remove reference from cache
- GetRevCache().Delete(latestRev.GetName())
+ if err := proto.Unmarshal(event.Value.([]byte), data.Interface().(proto.Message)); err != nil {
+ log.Errorw("failed-to-unmarshal-watch-data", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "error": err})
+ } else {
+ log.Debugw("un-marshaled-watch-data", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "data": data.Interface()})
- // Remove reference from parent
- parent := pr.GetBranch().Node.GetRoot()
- parent.GetBranch(NONE).Latest.ChildDropByName(latestRev.GetName())
+ var pathLock string
- break StopWatchLoop
-
- case kvstore.PUT:
- log.Debugw("update-in-memory", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
- if latestRev.getVersion() >= event.Version {
- log.Debugw("skipping-matching-or-older-revision", log.Fields{
- "watch": latestRev.GetName(),
- "watch-version": event.Version,
- "latest-version": latestRev.getVersion(),
- })
- continue
- } else {
- log.Debugw("watch-revision-is-newer", log.Fields{
- "watch": latestRev.GetName(),
- "watch-version": event.Version,
- "latest-version": latestRev.getVersion(),
- })
+ // The watch reported new persistence data.
+ // Construct an object that will be used to update the memory
+ blobs := make(map[string]*kvstore.KVPair)
+ key, _ := kvstore.ToString(event.Key)
+ blobs[key] = &kvstore.KVPair{
+ Key: key,
+ Value: event.Value,
+ Session: "",
+ Lease: 0,
+ Version: event.Version,
}
- data := reflect.New(reflect.TypeOf(latestRev.GetData()).Elem())
+ if latestRev.getNode().GetProxy() != nil {
+ //
+ // If a proxy exists for this revision, use it to lock access to the path
+ // and prevent simultaneous updates to the object in memory
+ //
- if err := proto.Unmarshal(event.Value.([]byte), data.Interface().(proto.Message)); err != nil {
- log.Errorw("failed-to-unmarshal-watch-data", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "error": err})
- } else {
- log.Debugw("un-marshaled-watch-data", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "data": data.Interface()})
-
- var pathLock string
- var blobs map[string]*kvstore.KVPair
-
- // The watch reported new persistence data.
- // Construct an object that will be used to update the memory
- blobs = make(map[string]*kvstore.KVPair)
- key, _ := kvstore.ToString(event.Key)
- blobs[key] = &kvstore.KVPair{
- Key: key,
- Value: event.Value,
- Session: "",
- Lease: 0,
- Version: event.Version,
+ //If the proxy already has a request in progress, then there is no need to process the watch
+ if latestRev.getNode().GetProxy().GetOperation() != ProxyNone {
+ log.Debugw("operation-in-progress", log.Fields{
+ "key": latestRev.GetHash(),
+ "path": latestRev.getNode().GetProxy().getFullPath(),
+ "operation": latestRev.getNode().GetProxy().operation.String(),
+ })
+ continue
}
- if latestRev.GetNode().GetProxy() != nil {
- //
- // If a proxy exists for this revision, use it to lock access to the path
- // and prevent simultaneous updates to the object in memory
- //
+ pathLock, _ = latestRev.getNode().GetProxy().parseForControlledPath(latestRev.getNode().GetProxy().getFullPath())
- //If the proxy already has a request in progress, then there is no need to process the watch
- if latestRev.GetNode().GetProxy().GetOperation() != PROXY_NONE {
- log.Debugw("operation-in-progress", log.Fields{
- "key": latestRev.GetHash(),
- "path": latestRev.GetNode().GetProxy().getFullPath(),
- "operation": latestRev.GetNode().GetProxy().operation.String(),
- })
- continue
- }
+ // Reserve the path to prevent others to modify while we reload from persistence
+ if _, err = latestRev.getNode().GetProxy().getRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
+ log.Errorw("Unable to acquire a key and set it to a given value", log.Fields{"error": err})
+ }
+ latestRev.getNode().GetProxy().SetOperation(ProxyWatch)
- pathLock, _ = latestRev.GetNode().GetProxy().parseForControlledPath(latestRev.GetNode().GetProxy().getFullPath())
+ // Load changes and apply to memory
+ if _, err = latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs); err != nil {
+ log.Errorw("Unable to refresh the memory by adding missing entries", log.Fields{"error": err})
+ }
- // Reserve the path to prevent others to modify while we reload from persistence
- latestRev.GetNode().GetProxy().GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL)
- latestRev.GetNode().GetProxy().SetOperation(PROXY_WATCH)
+ // Release path
+ if err = latestRev.getNode().GetProxy().getRoot().KvStore.Client.ReleaseReservation(pathLock + "_"); err != nil {
+ log.Errorw("Unable to release reservation for a specific key", log.Fields{"error": err})
+ }
+ } else {
+ // This block should be reached only if coming from a non-proxied request
+ log.Debugw("revision-with-no-proxy", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
- // Load changes and apply to memory
- latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs)
-
- // Release path
- latestRev.GetNode().GetProxy().GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
-
- } else {
- // This block should be reached only if coming from a non-proxied request
- log.Debugw("revision-with-no-proxy", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName()})
-
- // Load changes and apply to memory
- latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs)
+ // Load changes and apply to memory
+ if _, err = latestRev.LoadFromPersistence(context.Background(), latestRev.GetName(), "", blobs); err != nil {
+ log.Errorw("Unable to refresh the memory by adding missing entries", log.Fields{"error": err})
}
}
-
- default:
- log.Debugw("unhandled-event", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "type": event.EventType})
}
+
+ default:
+ log.Debugw("unhandled-event", log.Fields{"key": latestRev.GetHash(), "watch": latestRev.GetName(), "type": event.EventType})
}
}
- Watches().Cache.Delete(pr.GetName() + "-" + pr.GetHash())
+ watches().Cache.Delete(pr.GetName() + "-" + pr.GetHash())
log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
}
@@ -342,7 +349,7 @@
pr.Revision.Drop(txid, includeConfig)
}
-// Drop takes care of eliminating a revision hash that is no longer needed
+// StorageDrop takes care of eliminating a revision hash that is no longer needed
// and its associated config when required
func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
log.Debugw("dropping-revision", log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "key": pr.GetName(), "isStored": pr.isStored})
@@ -381,7 +388,7 @@
copy(children, parent.GetBranch(NONE).Latest.GetChildren(typeName))
// Verify if a child with the provided key value can be found
- if childIdx, childRev := pr.GetNode().findRevByKey(children, keyName, keyValue); childRev != nil {
+ if childIdx, childRev := pr.getNode().findRevByKey(children, keyName, keyValue); childRev != nil {
// A child matching the provided key exists in memory
// Verify if the data differs from what was retrieved from persistence
// Also check if we are treating a newer revision of the data or not
@@ -404,13 +411,13 @@
// Update child
updatedChildRev := childRev.UpdateData(ctx, data, childRev.GetBranch())
- updatedChildRev.GetNode().SetProxy(childRev.GetNode().GetProxy())
+ updatedChildRev.getNode().SetProxy(childRev.getNode().GetProxy())
updatedChildRev.SetupWatch(updatedChildRev.GetName())
updatedChildRev.SetLastUpdate()
updatedChildRev.(*PersistedRevision).setVersion(version)
// Update cache
- GetRevCache().Set(updatedChildRev.GetName(), updatedChildRev)
+ getRevCache().Set(updatedChildRev.GetName(), updatedChildRev)
childRev.Drop(txid, false)
childRev.GetBranch().LatestLock.Unlock()
@@ -440,23 +447,21 @@
response = updatedChildRev
}
} else {
- if childRev != nil {
- log.Debugw("keeping-revision-data", log.Fields{
- "key": childRev.GetHash(),
- "name": childRev.GetName(),
- "data": childRev.GetData(),
- "in-memory-version": childRev.getVersion(),
- "persistence-version": version,
- })
+ log.Debugw("keeping-revision-data", log.Fields{
+ "key": childRev.GetHash(),
+ "name": childRev.GetName(),
+ "data": childRev.GetData(),
+ "in-memory-version": childRev.getVersion(),
+ "persistence-version": version,
+ })
- // Update timestamp to reflect when it was last read and to reset tracked timeout
- childRev.SetLastUpdate()
- if childRev.getVersion() < version {
- childRev.(*PersistedRevision).setVersion(version)
- }
- GetRevCache().Set(childRev.GetName(), childRev)
- response = childRev
+ // Update timestamp to reflect when it was last read and to reset tracked timeout
+ childRev.SetLastUpdate()
+ if childRev.getVersion() < version {
+ childRev.(*PersistedRevision).setVersion(version)
}
+ getRevCache().Set(childRev.GetName(), childRev)
+ response = childRev
}
} else {
@@ -481,7 +486,7 @@
childRev.(*PersistedRevision).setVersion(version)
// Add entry to cache
- GetRevCache().Set(childRev.GetName(), childRev)
+ getRevCache().Set(childRev.GetName(), childRev)
pr.GetBranch().LatestLock.Unlock()
// END child lock
@@ -494,7 +499,7 @@
parent.GetBranch(NONE).LatestLock.Lock()
children = append(children, childRev)
updatedRev := parent.GetBranch(NONE).GetLatest().UpdateChildren(ctx, typeName, children, parent.GetBranch(NONE))
- updatedRev.GetNode().SetProxy(parent.GetBranch(NONE).Node.GetProxy())
+ updatedRev.getNode().SetProxy(parent.GetBranch(NONE).Node.GetProxy())
parent.GetBranch(NONE).Node.makeLatest(parent.GetBranch(NONE), updatedRev, nil)
parent.GetBranch(NONE).LatestLock.Unlock()
// END parent lock
@@ -529,7 +534,7 @@
}
if pr.kvStore != nil && path != "" {
- if blobs == nil || len(blobs) == 0 {
+ if len(blobs) == 0 {
log.Debugw("retrieve-from-kv", log.Fields{"path": path, "txid": txid})
if blobs, err = pr.kvStore.List(path); err != nil {
diff --git a/db/model/profiling.go b/db/model/profiling.go
index f8e9f7a..702f295 100644
--- a/db/model/profiling.go
+++ b/db/model/profiling.go
@@ -17,12 +17,13 @@
package model
import (
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
"sync"
+
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
)
// Profiling is used to store performance details collected at runtime
-type profiling struct {
+type Profiling struct {
sync.RWMutex
DatabaseRetrieveTime float64
DatabaseRetrieveCount int
@@ -34,19 +35,19 @@
InMemoryLockCount int
}
-var profilingInstance *profiling
+var profilingInstance *Profiling
var profilingOnce sync.Once
// GetProfiling returns a singleton instance of the Profiling structure
-func GetProfiling() *profiling {
+func GetProfiling() *Profiling {
profilingOnce.Do(func() {
- profilingInstance = &profiling{}
+ profilingInstance = &Profiling{}
})
return profilingInstance
}
// AddToDatabaseRetrieveTime appends a time period to retrieve data from the database
-func (p *profiling) AddToDatabaseRetrieveTime(period float64) {
+func (p *Profiling) AddToDatabaseRetrieveTime(period float64) {
p.Lock()
defer p.Unlock()
@@ -55,7 +56,7 @@
}
// AddToInMemoryModelTime appends a time period to construct/deconstruct data in memory
-func (p *profiling) AddToInMemoryModelTime(period float64) {
+func (p *Profiling) AddToInMemoryModelTime(period float64) {
p.Lock()
defer p.Unlock()
@@ -64,7 +65,7 @@
}
// AddToInMemoryProcessTime appends a time period to process data
-func (p *profiling) AddToInMemoryProcessTime(period float64) {
+func (p *Profiling) AddToInMemoryProcessTime(period float64) {
p.Lock()
defer p.Unlock()
@@ -72,7 +73,7 @@
}
// AddToDatabaseStoreTime appends a time period to store data in the database
-func (p *profiling) AddToDatabaseStoreTime(period float64) {
+func (p *Profiling) AddToDatabaseStoreTime(period float64) {
p.Lock()
defer p.Unlock()
@@ -80,7 +81,7 @@
}
// AddToInMemoryLockTime appends a time period when a code block was locked
-func (p *profiling) AddToInMemoryLockTime(period float64) {
+func (p *Profiling) AddToInMemoryLockTime(period float64) {
p.Lock()
defer p.Unlock()
@@ -89,7 +90,7 @@
}
// Reset initializes the profile counters
-func (p *profiling) Reset() {
+func (p *Profiling) Reset() {
p.Lock()
defer p.Unlock()
@@ -104,7 +105,7 @@
}
// Report will provide the current profile counter status
-func (p *profiling) Report() {
+func (p *Profiling) Report() {
p.Lock()
defer p.Unlock()
diff --git a/db/model/profiling_test.go b/db/model/profiling_test.go
index e73ccf6..ebdd09c 100644
--- a/db/model/profiling_test.go
+++ b/db/model/profiling_test.go
@@ -16,14 +16,15 @@
package model
import (
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
- "github.com/stretchr/testify/assert"
"reflect"
"testing"
+
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
+ "github.com/stretchr/testify/assert"
)
func TestProfiling(t *testing.T) {
- want := &profiling{}
+ want := &Profiling{}
result := GetProfiling()
if reflect.TypeOf(result) != reflect.TypeOf(want) {
t.Errorf("GetProfiling() = result: %v, want: %v", result, want)
diff --git a/db/model/proxy.go b/db/model/proxy.go
index a7eedda..f4c8bbc 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -21,12 +21,13 @@
"crypto/md5"
"errors"
"fmt"
- "github.com/google/uuid"
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
"reflect"
"runtime"
"strings"
"sync"
+
+ "github.com/google/uuid"
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
)
// OperationContext holds details on the information used during an operation
@@ -85,8 +86,8 @@
return p
}
-// GetRoot returns the root attribute of the proxy
-func (p *Proxy) GetRoot() *root {
+// getRoot returns the root attribute of the proxy
+func (p *Proxy) getRoot() *root {
return p.Root
}
@@ -146,19 +147,19 @@
delete(p.Callbacks[callbackType], funcHash)
}
-// CallbackType is an enumerated value to express when a callback should be executed
+// ProxyOperation callbackType is an enumerated value to express when a callback should be executed
type ProxyOperation uint8
// Enumerated list of callback types
const (
- PROXY_NONE ProxyOperation = iota
- PROXY_GET
- PROXY_LIST
- PROXY_ADD
- PROXY_UPDATE
- PROXY_REMOVE
- PROXY_CREATE
- PROXY_WATCH
+ ProxyNone ProxyOperation = iota
+ ProxyGet
+ ProxyList
+ ProxyAdd
+ ProxyUpdate
+ ProxyRemove
+ ProxyCreate
+ ProxyWatch
)
var proxyOperationTypes = []string{
@@ -176,12 +177,14 @@
return proxyOperationTypes[t]
}
+// GetOperation -
func (p *Proxy) GetOperation() ProxyOperation {
p.mutex.RLock()
defer p.mutex.RUnlock()
return p.operation
}
+// SetOperation -
func (p *Proxy) SetOperation(operation ProxyOperation) {
p.mutex.Lock()
defer p.mutex.Unlock()
@@ -201,7 +204,6 @@
case 2:
controlled = false
pathLock = ""
- break
case 3:
fallthrough
default:
@@ -224,8 +226,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_LIST)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyList)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-list", log.Fields{
"path": path,
@@ -234,7 +236,7 @@
"controlled": controlled,
"operation": p.GetOperation(),
})
- return p.GetRoot().List(ctx, path, "", depth, deep, txid)
+ return p.getRoot().List(ctx, path, "", depth, deep, txid)
}
// Get will retrieve information from the data model at the specified path location
@@ -248,8 +250,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_GET)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyGet)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-get", log.Fields{
"path": path,
@@ -259,7 +261,7 @@
"operation": p.GetOperation(),
})
- return p.GetRoot().Get(ctx, path, "", depth, deep, txid)
+ return p.getRoot().Get(ctx, path, "", depth, deep, txid)
}
// Update will modify information in the data model at the specified location with the provided data
@@ -280,8 +282,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_UPDATE)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyUpdate)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-update", log.Fields{
"path": path,
@@ -292,15 +294,20 @@
"operation": p.GetOperation(),
})
- if p.GetRoot().KvStore != nil {
- if _, err := p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
+ if p.getRoot().KvStore != nil {
+ if _, err := p.getRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
log.Errorw("unable-to-acquire-key-from-kvstore", log.Fields{"error": err})
return nil, err
}
- defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ defer func() {
+ err := p.getRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ if err != nil {
+ log.Errorw("Unable to release reservation for key", log.Fields{"error": err})
+ }
+ }()
}
- result := p.GetRoot().Update(ctx, fullPath, data, strict, txid, nil)
+ result := p.getRoot().Update(ctx, fullPath, data, strict, txid, nil)
if result != nil {
return result.GetData(), nil
@@ -329,8 +336,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_ADD)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyAdd)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-add-with-id", log.Fields{
"path": path,
@@ -341,15 +348,20 @@
"operation": p.GetOperation(),
})
- if p.GetRoot().KvStore != nil {
- if _, err := p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
+ if p.getRoot().KvStore != nil {
+ if _, err := p.getRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
log.Errorw("unable-to-acquire-key-from-kvstore", log.Fields{"error": err})
return nil, err
}
- defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ defer func() {
+ err := p.getRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ if err != nil {
+ log.Errorw("Unable to release reservation for key", log.Fields{"error": err})
+ }
+ }()
}
- result := p.GetRoot().Add(ctx, fullPath, data, txid, nil)
+ result := p.getRoot().Add(ctx, fullPath, data, txid, nil)
if result != nil {
return result.GetData(), nil
@@ -376,8 +388,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_ADD)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyAdd)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-add", log.Fields{
"path": path,
@@ -388,15 +400,20 @@
"operation": p.GetOperation(),
})
- if p.GetRoot().KvStore != nil {
- if _, err := p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
+ if p.getRoot().KvStore != nil {
+ if _, err := p.getRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
log.Errorw("unable-to-acquire-key-from-kvstore", log.Fields{"error": err})
return nil, err
}
- defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ defer func() {
+ err := p.getRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ if err != nil {
+ log.Errorw("Unable to release reservation for key", log.Fields{"error": err})
+ }
+ }()
}
- result := p.GetRoot().Add(ctx, fullPath, data, txid, nil)
+ result := p.getRoot().Add(ctx, fullPath, data, txid, nil)
if result != nil {
return result.GetData(), nil
@@ -423,8 +440,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_REMOVE)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyRemove)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-remove", log.Fields{
"path": path,
@@ -435,15 +452,20 @@
"operation": p.GetOperation(),
})
- if p.GetRoot().KvStore != nil {
- if _, err := p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
+ if p.getRoot().KvStore != nil {
+ if _, err := p.getRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
log.Errorw("unable-to-acquire-key-from-kvstore", log.Fields{"error": err})
return nil, err
}
- defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ defer func() {
+ err := p.getRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ if err != nil {
+ log.Errorw("Unable to release reservation for key", log.Fields{"error": err})
+ }
+ }()
}
- result := p.GetRoot().Remove(ctx, fullPath, txid, nil)
+ result := p.getRoot().Remove(ctx, fullPath, txid, nil)
if result != nil {
return result.GetData(), nil
@@ -471,8 +493,8 @@
pathLock, controlled := p.parseForControlledPath(effectivePath)
- p.SetOperation(PROXY_CREATE)
- defer p.SetOperation(PROXY_NONE)
+ p.SetOperation(ProxyCreate)
+ defer p.SetOperation(ProxyNone)
log.Debugw("proxy-create", log.Fields{
"path": path,
@@ -483,30 +505,35 @@
"operation": p.GetOperation(),
})
- if p.GetRoot().KvStore != nil {
- if _, err := p.GetRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
+ if p.getRoot().KvStore != nil {
+ if _, err := p.getRoot().KvStore.Client.Reserve(pathLock+"_", uuid.New().String(), ReservationTTL); err != nil {
log.Errorw("unable-to-acquire-key-from-kvstore", log.Fields{"error": err})
return nil, err
}
- defer p.GetRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ defer func() {
+ err := p.getRoot().KvStore.Client.ReleaseReservation(pathLock + "_")
+ if err != nil {
+ log.Errorw("Unable to release reservation for key", log.Fields{"error": err})
+ }
+ }()
}
- return p.GetRoot().CreateProxy(ctx, fullPath, exclusive)
+ return p.getRoot().CreateProxy(ctx, fullPath, exclusive)
}
// OpenTransaction creates a new transaction branch to isolate operations made to the data model
func (p *Proxy) OpenTransaction() *Transaction {
- txid := p.GetRoot().MakeTxBranch()
+ txid := p.getRoot().MakeTxBranch()
return NewTransaction(p, txid)
}
// commitTransaction will apply and merge modifications made in the transaction branch to the data model
func (p *Proxy) commitTransaction(txid string) {
- p.GetRoot().FoldTxBranch(txid)
+ p.getRoot().FoldTxBranch(txid)
}
// cancelTransaction will terminate a transaction branch along will all changes within it
func (p *Proxy) cancelTransaction(txid string) {
- p.GetRoot().DeleteTxBranch(txid)
+ p.getRoot().DeleteTxBranch(txid)
}
// CallbackFunction is a type used to define callback functions
@@ -522,15 +549,9 @@
func (tuple *CallbackTuple) Execute(contextArgs []interface{}) interface{} {
args := []interface{}{}
- for _, ta := range tuple.args {
- args = append(args, ta)
- }
+ args = append(args, tuple.args...)
- if contextArgs != nil {
- for _, ca := range contextArgs {
- args = append(args, ca)
- }
- }
+ args = append(args, contextArgs...)
return tuple.callback(args...)
}
diff --git a/db/model/proxy_load_test.go b/db/model/proxy_load_test.go
index e5ae1c1..9c264e4 100644
--- a/db/model/proxy_load_test.go
+++ b/db/model/proxy_load_test.go
@@ -18,24 +18,25 @@
import (
"context"
"encoding/hex"
+ "math/rand"
+ "reflect"
+ "strconv"
+ "sync"
+ "testing"
+
"github.com/google/uuid"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
"github.com/opencord/voltha-protos/v2/go/common"
"github.com/opencord/voltha-protos/v2/go/openflow_13"
"github.com/opencord/voltha-protos/v2/go/voltha"
"github.com/stretchr/testify/assert"
- "math/rand"
- "reflect"
- "strconv"
- "sync"
- "testing"
)
var (
- BenchmarkProxy_Root *root
- BenchmarkProxy_DeviceProxy *Proxy
- BenchmarkProxy_PLT *proxyLoadTest
- BenchmarkProxy_Logger log.Logger
+ BenchmarkProxyRoot Root
+ BenchmarkProxyDeviceProxy *Proxy
+ BenchmarkProxyPLT *proxyLoadTest
+ BenchmarkProxyLogger log.Logger
)
type proxyLoadChanges struct {
@@ -83,9 +84,9 @@
func init() {
var err error
- BenchmarkProxy_Root = NewRoot(&voltha.Voltha{}, nil)
+ BenchmarkProxyRoot = NewRoot(&voltha.Voltha{}, nil)
- BenchmarkProxy_Logger, _ = log.AddPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
+ BenchmarkProxyLogger, _ = log.AddPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
//Setup default logger - applies for packages that do not have specific logger set
if _, err := log.SetDefaultLogger(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"}); err != nil {
@@ -98,18 +99,18 @@
}
log.SetPackageLogLevel("github.com/opencord/voltha-go/db/model", log.DebugLevel)
- if BenchmarkProxy_DeviceProxy, err = BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false); err != nil {
+ if BenchmarkProxyDeviceProxy, err = BenchmarkProxyRoot.CreateProxy(context.Background(), "/", false); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create benchmark proxy")
}
// Register ADD instructions callbacks
- BenchmarkProxy_PLT = &proxyLoadTest{}
+ BenchmarkProxyPLT = &proxyLoadTest{}
- BenchmarkProxy_DeviceProxy.RegisterCallback(PRE_ADD, commonCallbackFunc, "PRE_ADD", BenchmarkProxy_PLT.SetPreAddExecuted)
- BenchmarkProxy_DeviceProxy.RegisterCallback(POST_ADD, commonCallbackFunc, "POST_ADD", BenchmarkProxy_PLT.SetPostAddExecuted)
+ BenchmarkProxyDeviceProxy.RegisterCallback(PreAdd, commonCallbackFunc, "PreAdd", BenchmarkProxyPLT.SetPreAddExecuted)
+ BenchmarkProxyDeviceProxy.RegisterCallback(PostAdd, commonCallbackFunc, "PostAdd", BenchmarkProxyPLT.SetPostAddExecuted)
//// Register UPDATE instructions callbacks
- BenchmarkProxy_DeviceProxy.RegisterCallback(PRE_UPDATE, commonCallbackFunc, "PRE_UPDATE", BenchmarkProxy_PLT.SetPreUpdateExecuted)
- BenchmarkProxy_DeviceProxy.RegisterCallback(POST_UPDATE, commonCallbackFunc, "POST_UPDATE", BenchmarkProxy_PLT.SetPostUpdateExecuted)
+ BenchmarkProxyDeviceProxy.RegisterCallback(PreUpdate, commonCallbackFunc, "PreUpdate", BenchmarkProxyPLT.SetPreUpdateExecuted)
+ BenchmarkProxyDeviceProxy.RegisterCallback(PostUpdate, commonCallbackFunc, "PostUpdate", BenchmarkProxyPLT.SetPostUpdateExecuted)
}
@@ -150,28 +151,28 @@
ltDevID := "0001" + hex.EncodeToString(ltDevIDBin)[:12]
ltDevice.Id = ltDevID
- BenchmarkProxy_PLT.SetPreAddExecuted(false)
- BenchmarkProxy_PLT.SetPostAddExecuted(false)
+ BenchmarkProxyPLT.SetPreAddExecuted(false)
+ BenchmarkProxyPLT.SetPostAddExecuted(false)
var added interface{}
// Add the device
- if added, err = BenchmarkProxy_DeviceProxy.AddWithID(context.Background(), "/devices", ltDevID, ltDevice, ""); err != nil {
+ if added, err = BenchmarkProxyDeviceProxy.AddWithID(context.Background(), "/devices", ltDevID, ltDevice, ""); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create proxy")
}
if added == nil {
- BenchmarkProxy_Logger.Errorf("Failed to add device: %+v", ltDevice)
+ BenchmarkProxyLogger.Errorf("Failed to add device: %+v", ltDevice)
continue
} else {
- BenchmarkProxy_Logger.Infof("Device was added 1: %+v", added)
+ BenchmarkProxyLogger.Infof("Device was added 1: %+v", added)
}
- BenchmarkProxy_PLT.addMutex.Lock()
- BenchmarkProxy_PLT.addedDevices = append(BenchmarkProxy_PLT.addedDevices, added.(*voltha.Device).Id)
- BenchmarkProxy_PLT.addMutex.Unlock()
+ BenchmarkProxyPLT.addMutex.Lock()
+ BenchmarkProxyPLT.addedDevices = append(BenchmarkProxyPLT.addedDevices, added.(*voltha.Device).Id)
+ BenchmarkProxyPLT.addMutex.Unlock()
}
})
- BenchmarkProxy_Logger.Infof("Number of added devices : %d", len(BenchmarkProxy_PLT.addedDevices))
+ BenchmarkProxyLogger.Infof("Number of added devices : %d", len(BenchmarkProxyPLT.addedDevices))
}
func BenchmarkProxy_UpdateFirmware(b *testing.B) {
@@ -179,28 +180,28 @@
for pb.Next() {
//for i:=0; i < b.N; i++ {
- if len(BenchmarkProxy_PLT.addedDevices) > 0 {
+ if len(BenchmarkProxyPLT.addedDevices) > 0 {
var target interface{}
- randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
- firmProxy, err := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
+ randomID := BenchmarkProxyPLT.addedDevices[rand.Intn(len(BenchmarkProxyPLT.addedDevices))]
+ firmProxy, err := BenchmarkProxyRoot.CreateProxy(context.Background(), "/", false)
if err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create firmware proxy")
}
target, err = firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
"")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to create target due to error %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to create target due to error %v", err)
assert.NotNil(b, err)
}
if !reflect.ValueOf(target).IsValid() {
- BenchmarkProxy_Logger.Errorf("Failed to find device: %s %+v", randomID, target)
+ BenchmarkProxyLogger.Errorf("Failed to find device: %s %+v", randomID, target)
continue
}
- BenchmarkProxy_PLT.SetPreUpdateExecuted(false)
- BenchmarkProxy_PLT.SetPostUpdateExecuted(false)
- firmProxy.RegisterCallback(PRE_UPDATE, commonCallbackFunc, "PRE_UPDATE", BenchmarkProxy_PLT.SetPreUpdateExecuted)
- firmProxy.RegisterCallback(POST_UPDATE, commonCallbackFunc, "POST_UPDATE", BenchmarkProxy_PLT.SetPostUpdateExecuted)
+ BenchmarkProxyPLT.SetPreUpdateExecuted(false)
+ BenchmarkProxyPLT.SetPostUpdateExecuted(false)
+ firmProxy.RegisterCallback(PreUpdate, commonCallbackFunc, "PreUpdate", BenchmarkProxyPLT.SetPreUpdateExecuted)
+ firmProxy.RegisterCallback(PostUpdate, commonCallbackFunc, "PostUpdate", BenchmarkProxyPLT.SetPostUpdateExecuted)
var fwVersion int
@@ -217,83 +218,58 @@
var updated interface{}
if updated, err = firmProxy.Update(context.Background(), "/devices/"+randomID, target.(*voltha.Device), false, ""); err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to update firmware proxy due to error %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to update firmware proxy due to error %v", err)
assert.NotNil(b, err)
}
if updated == nil {
- BenchmarkProxy_Logger.Errorf("Failed to update device: %+v", target)
+ BenchmarkProxyLogger.Errorf("Failed to update device: %+v", target)
continue
} else {
- BenchmarkProxy_Logger.Infof("Device was updated : %+v", updated)
+ BenchmarkProxyLogger.Infof("Device was updated : %+v", updated)
}
d, err := firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get device info from firmware proxy due to error %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get device info from firmware proxy due to error %v", err)
assert.NotNil(b, err)
}
if !reflect.ValueOf(d).IsValid() {
- BenchmarkProxy_Logger.Errorf("Failed to get device: %s", randomID)
+ BenchmarkProxyLogger.Errorf("Failed to get device: %s", randomID)
continue
} else if d.(*voltha.Device).FirmwareVersion == after {
- BenchmarkProxy_Logger.Infof("Imm Device was updated with new value: %s %+v", randomID, d)
+ BenchmarkProxyLogger.Infof("Imm Device was updated with new value: %s %+v", randomID, d)
} else if d.(*voltha.Device).FirmwareVersion == before {
- BenchmarkProxy_Logger.Errorf("Imm Device kept old value: %s %+v %+v", randomID, d, target)
+ BenchmarkProxyLogger.Errorf("Imm Device kept old value: %s %+v %+v", randomID, d, target)
} else {
- BenchmarkProxy_Logger.Errorf("Imm Device has unknown value: %s %+v %+v", randomID, d, target)
+ BenchmarkProxyLogger.Errorf("Imm Device has unknown value: %s %+v %+v", randomID, d, target)
}
- BenchmarkProxy_PLT.firmwareMutex.Lock()
+ BenchmarkProxyPLT.firmwareMutex.Lock()
- BenchmarkProxy_PLT.updatedFirmwares = append(
- BenchmarkProxy_PLT.updatedFirmwares,
+ BenchmarkProxyPLT.updatedFirmwares = append(
+ BenchmarkProxyPLT.updatedFirmwares,
proxyLoadChanges{ID: randomID, Before: before, After: after},
)
- BenchmarkProxy_PLT.firmwareMutex.Unlock()
+ BenchmarkProxyPLT.firmwareMutex.Unlock()
}
}
})
}
-func traverseBranches(revision Revision, depth int) {
- if revision == nil {
- return
- }
- prefix := strconv.Itoa(depth) + " ~~~~ "
- for i := 0; i < depth; i++ {
- prefix += " "
- }
-
- BenchmarkProxy_Logger.Debugf("%sRevision: %s %+v", prefix, revision.GetHash(), revision.GetData())
-
- //for brIdx, brRev := range revision.GetBranch().Revisions {
- // BenchmarkProxy_Logger.Debugf("%sbranchIndex: %s", prefix, brIdx)
- // traverseBranches(brRev, depth+1)
- //}
- for childrenI, children := range revision.GetAllChildren() {
- BenchmarkProxy_Logger.Debugf("%schildrenIndex: %s, length: %d", prefix, childrenI, len(children))
-
- for _, subrev := range children {
- //subrev.GetBranch().Latest
- traverseBranches(subrev, depth+1)
- }
- }
-
-}
func BenchmarkProxy_UpdateFlows(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if len(BenchmarkProxy_PLT.addedDevices) > 0 {
- randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
+ if len(BenchmarkProxyPLT.addedDevices) > 0 {
+ randomID := BenchmarkProxyPLT.addedDevices[rand.Intn(len(BenchmarkProxyPLT.addedDevices))]
- flowsProxy, err := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/devices/"+randomID+"/flows", false)
+ flowsProxy, err := BenchmarkProxyRoot.CreateProxy(context.Background(), "/devices/"+randomID+"/flows", false)
if err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create flows proxy")
}
flows, err := flowsProxy.Get(context.Background(), "/", 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows from flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows from flows proxy due to error: %v", err)
assert.NotNil(b, err)
}
@@ -302,30 +278,30 @@
after := flows.(*openflow_13.Flows).Items[0].TableId
flowsProxy.RegisterCallback(
- PRE_UPDATE,
+ PreUpdate,
commonCallback2,
)
flowsProxy.RegisterCallback(
- POST_UPDATE,
+ PostUpdate,
commonCallback2,
)
var updated interface{}
if updated, err = flowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); err != nil {
- BenchmarkProxy_Logger.Errorf("Cannot update flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Cannot update flows proxy due to error: %v", err)
assert.NotNil(b, err)
}
if updated == nil {
b.Errorf("Failed to update flows for device: %+v", flows)
} else {
- BenchmarkProxy_Logger.Infof("Flows were updated : %+v", updated)
+ BenchmarkProxyLogger.Infof("Flows were updated : %+v", updated)
}
- BenchmarkProxy_PLT.flowMutex.Lock()
- BenchmarkProxy_PLT.updatedFlows = append(
- BenchmarkProxy_PLT.updatedFlows,
+ BenchmarkProxyPLT.flowMutex.Lock()
+ BenchmarkProxyPLT.updatedFlows = append(
+ BenchmarkProxyPLT.updatedFlows,
proxyLoadChanges{ID: randomID, Before: before, After: after},
)
- BenchmarkProxy_PLT.flowMutex.Unlock()
+ BenchmarkProxyPLT.flowMutex.Unlock()
}
}
})
@@ -334,41 +310,41 @@
func BenchmarkProxy_GetDevices(b *testing.B) {
//traverseBranches(BenchmarkProxy_DeviceProxy.Root.node.Branches[NONE].GetLatest(), 0)
- for i := 0; i < len(BenchmarkProxy_PLT.addedDevices); i++ {
- devToGet := BenchmarkProxy_PLT.addedDevices[i]
+ for i := 0; i < len(BenchmarkProxyPLT.addedDevices); i++ {
+ devToGet := BenchmarkProxyPLT.addedDevices[i]
// Verify that the added device can now be retrieved
- d, err := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false, "")
+ d, err := BenchmarkProxyDeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get device info from device proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get device info from device proxy due to error: %v", err)
assert.NotNil(b, err)
}
if !reflect.ValueOf(d).IsValid() {
- BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
+ BenchmarkProxyLogger.Errorf("Failed to get device: %s", devToGet)
continue
} else {
- BenchmarkProxy_Logger.Infof("Got device: %s %+v", devToGet, d)
+ BenchmarkProxyLogger.Infof("Got device: %s %+v", devToGet, d)
}
}
}
func BenchmarkProxy_GetUpdatedFirmware(b *testing.B) {
- for i := 0; i < len(BenchmarkProxy_PLT.updatedFirmwares); i++ {
- devToGet := BenchmarkProxy_PLT.updatedFirmwares[i].ID
+ for i := 0; i < len(BenchmarkProxyPLT.updatedFirmwares); i++ {
+ devToGet := BenchmarkProxyPLT.updatedFirmwares[i].ID
// Verify that the updated device can be retrieved and that the updates were actually applied
- d, err := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false, "")
+ d, err := BenchmarkProxyDeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get device info from device proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get device info from device proxy due to error: %v", err)
assert.NotNil(b, err)
}
if !reflect.ValueOf(d).IsValid() {
- BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
+ BenchmarkProxyLogger.Errorf("Failed to get device: %s", devToGet)
continue
- } else if d.(*voltha.Device).FirmwareVersion == BenchmarkProxy_PLT.updatedFirmwares[i].After.(string) {
- BenchmarkProxy_Logger.Infof("Device was updated with new value: %s %+v", devToGet, d)
- } else if d.(*voltha.Device).FirmwareVersion == BenchmarkProxy_PLT.updatedFirmwares[i].Before.(string) {
- BenchmarkProxy_Logger.Errorf("Device kept old value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFirmwares[i])
+ } else if d.(*voltha.Device).FirmwareVersion == BenchmarkProxyPLT.updatedFirmwares[i].After.(string) {
+ BenchmarkProxyLogger.Infof("Device was updated with new value: %s %+v", devToGet, d)
+ } else if d.(*voltha.Device).FirmwareVersion == BenchmarkProxyPLT.updatedFirmwares[i].Before.(string) {
+ BenchmarkProxyLogger.Errorf("Device kept old value: %s %+v %+v", devToGet, d, BenchmarkProxyPLT.updatedFirmwares[i])
} else {
- BenchmarkProxy_Logger.Errorf("Device has unknown value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFirmwares[i])
+ BenchmarkProxyLogger.Errorf("Device has unknown value: %s %+v %+v", devToGet, d, BenchmarkProxyPLT.updatedFirmwares[i])
}
}
}
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index 0ed8af9..77a0dea 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -19,6 +19,12 @@
"context"
"encoding/hex"
"encoding/json"
+ "math/rand"
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+
"github.com/golang/protobuf/proto"
"github.com/google/uuid"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
@@ -26,48 +32,43 @@
"github.com/opencord/voltha-protos/v2/go/openflow_13"
"github.com/opencord/voltha-protos/v2/go/voltha"
"github.com/stretchr/testify/assert"
- "math/rand"
- "reflect"
- "strconv"
- "testing"
- "time"
)
var (
- TestProxy_Root *root
- TestProxy_Root_LogicalDevice *Proxy
- TestProxy_Root_Device *Proxy
- TestProxy_Root_Adapter *Proxy
- TestProxy_DeviceId string
- TestProxy_AdapterId string
- TestProxy_LogicalDeviceId string
- TestProxy_TargetDeviceId string
- TestProxy_TargetLogicalDeviceId string
- TestProxy_LogicalPorts []*voltha.LogicalPort
- TestProxy_Ports []*voltha.Port
- TestProxy_Stats *openflow_13.OfpFlowStats
- TestProxy_Flows *openflow_13.Flows
- TestProxy_Device *voltha.Device
- TestProxy_LogicalDevice *voltha.LogicalDevice
- TestProxy_Adapter *voltha.Adapter
+ TestProxyRoot Root
+ TestProxyRootLogicalDevice *Proxy
+ TestProxyRootDevice *Proxy
+ TestProxyRootAdapter *Proxy
+ TestProxyDeviceID string
+ TestProxyAdapterID string
+ TestProxyLogicalDeviceID string
+ TestProxyTargetDeviceID string
+ TestProxyTargetLogicalDeviceID string
+ TestProxyLogicalPorts []*voltha.LogicalPort
+ TestProxyPorts []*voltha.Port
+ TestProxyStats *openflow_13.OfpFlowStats
+ TestProxyFlows *openflow_13.Flows
+ TestProxyDevice *voltha.Device
+ TestProxyLogicalDevice *voltha.LogicalDevice
+ TestProxyAdapter *voltha.Adapter
)
func init() {
//log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
var err error
- TestProxy_Root = NewRoot(&voltha.Voltha{}, nil)
- if TestProxy_Root_LogicalDevice, err = TestProxy_Root.CreateProxy(context.Background(), "/", false); err != nil {
+ TestProxyRoot = NewRoot(&voltha.Voltha{}, nil)
+ if TestProxyRootLogicalDevice, err = TestProxyRoot.CreateProxy(context.Background(), "/", false); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create logical device proxy")
}
- if TestProxy_Root_Device, err = TestProxy_Root.CreateProxy(context.Background(), "/", false); err != nil {
+ if TestProxyRootDevice, err = TestProxyRoot.CreateProxy(context.Background(), "/", false); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create device proxy")
}
- if TestProxy_Root_Adapter, err = TestProxy_Root.CreateProxy(context.Background(), "/", false); err != nil {
+ if TestProxyRootAdapter, err = TestProxyRoot.CreateProxy(context.Background(), "/", false); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create adapter proxy")
}
- TestProxy_LogicalPorts = []*voltha.LogicalPort{
+ TestProxyLogicalPorts = []*voltha.LogicalPort{
{
Id: "123",
DeviceId: "logicalport-0-device-id",
@@ -75,7 +76,7 @@
RootPort: false,
},
}
- TestProxy_Ports = []*voltha.Port{
+ TestProxyPorts = []*voltha.Port{
{
PortNo: 123,
Label: "test-port-0",
@@ -87,30 +88,30 @@
},
}
- TestProxy_Stats = &openflow_13.OfpFlowStats{
+ TestProxyStats = &openflow_13.OfpFlowStats{
Id: 1111,
}
- TestProxy_Flows = &openflow_13.Flows{
- Items: []*openflow_13.OfpFlowStats{TestProxy_Stats},
+ TestProxyFlows = &openflow_13.Flows{
+ Items: []*openflow_13.OfpFlowStats{TestProxyStats},
}
- TestProxy_Device = &voltha.Device{
- Id: TestProxy_DeviceId,
+ TestProxyDevice = &voltha.Device{
+ Id: TestProxyDeviceID,
Type: "simulated_olt",
Address: &voltha.Device_HostAndPort{HostAndPort: "1.2.3.4:5555"},
AdminState: voltha.AdminState_PREPROVISIONED,
- Flows: TestProxy_Flows,
- Ports: TestProxy_Ports,
+ Flows: TestProxyFlows,
+ Ports: TestProxyPorts,
}
- TestProxy_LogicalDevice = &voltha.LogicalDevice{
- Id: TestProxy_DeviceId,
+ TestProxyLogicalDevice = &voltha.LogicalDevice{
+ Id: TestProxyDeviceID,
DatapathId: 0,
- Ports: TestProxy_LogicalPorts,
- Flows: TestProxy_Flows,
+ Ports: TestProxyLogicalPorts,
+ Flows: TestProxyFlows,
}
- TestProxy_Adapter = &voltha.Adapter{
- Id: TestProxy_AdapterId,
+ TestProxyAdapter = &voltha.Adapter{
+ Id: TestProxyAdapterID,
Vendor: "test-adapter-vendor",
Version: "test-adapter-version",
}
@@ -118,27 +119,27 @@
func TestProxy_1_1_1_Add_NewDevice(t *testing.T) {
devIDBin, _ := uuid.New().MarshalBinary()
- TestProxy_DeviceId = "0001" + hex.EncodeToString(devIDBin)[:12]
- TestProxy_Device.Id = TestProxy_DeviceId
+ TestProxyDeviceID = "0001" + hex.EncodeToString(devIDBin)[:12]
+ TestProxyDevice.Id = TestProxyDeviceID
preAddExecuted := make(chan struct{})
postAddExecuted := make(chan struct{})
preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
- devicesProxy, err := TestProxy_Root.node.CreateProxy(context.Background(), "/devices", false)
+ devicesProxy, err := TestProxyRoot.CreateProxy(context.Background(), "/devices", false)
if err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create devices proxy")
}
- devicesProxy.RegisterCallback(PRE_ADD, commonCallback2, "PRE_ADD Device container changes")
- devicesProxy.RegisterCallback(POST_ADD, commonCallback2, "POST_ADD Device container changes")
+ devicesProxy.RegisterCallback(PreAdd, commonCallback2, "PRE_ADD Device container changes")
+ devicesProxy.RegisterCallback(PostAdd, commonCallback2, "POST_ADD Device container changes")
// Register ADD instructions callbacks
- TestProxy_Root_Device.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
- TestProxy_Root_Device.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
+ TestProxyRootDevice.RegisterCallback(PreAdd, commonChanCallback, "PreAdd instructions", &preAddExecutedPtr)
+ TestProxyRootDevice.RegisterCallback(PostAdd, commonChanCallback, "PostAdd instructions", &postAddExecutedPtr)
- added, err := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, "")
+ added, err := TestProxyRootDevice.Add(context.Background(), "/devices", TestProxyDevice, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to add test proxy device due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to add test proxy device due to error: %v", err)
assert.NotNil(t, err)
}
if added == nil {
@@ -148,16 +149,16 @@
}
if !verifyGotResponse(preAddExecuted) {
- t.Error("PRE_ADD callback was not executed")
+ t.Error("PreAdd callback was not executed")
}
if !verifyGotResponse(postAddExecuted) {
- t.Error("POST_ADD callback was not executed")
+ t.Error("PostAdd callback was not executed")
}
// Verify that the added device can now be retrieved
- d, err := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, "")
+ d, err := TestProxyRootDevice.Get(context.Background(), "/devices/"+TestProxyDeviceID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -169,15 +170,15 @@
}
func TestProxy_1_1_2_Add_ExistingDevice(t *testing.T) {
- TestProxy_Device.Id = TestProxy_DeviceId
+ TestProxyDevice.Id = TestProxyDeviceID
- added, err := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, "")
+ added, err := TestProxyRootDevice.Add(context.Background(), "/devices", TestProxyDevice, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to add device to test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to add device to test proxy due to error: %v", err)
assert.NotNil(t, err)
}
- if added.(proto.Message).String() != reflect.ValueOf(TestProxy_Device).Interface().(proto.Message).String() {
- t.Errorf("Devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
+ if added.(proto.Message).String() != reflect.ValueOf(TestProxyDevice).Interface().(proto.Message).String() {
+ t.Errorf("Devices don't match - existing: %+v returned: %+v", TestProxyLogicalDevice, added)
}
}
@@ -196,20 +197,20 @@
}
func TestProxy_1_1_3_Add_NewAdapter(t *testing.T) {
- TestProxy_AdapterId = "test-adapter"
- TestProxy_Adapter.Id = TestProxy_AdapterId
+ TestProxyAdapterID = "test-adapter"
+ TestProxyAdapter.Id = TestProxyAdapterID
preAddExecuted := make(chan struct{})
postAddExecuted := make(chan struct{})
preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
// Register ADD instructions callbacks
- TestProxy_Root_Adapter.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions for adapters", &preAddExecutedPtr)
- TestProxy_Root_Adapter.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions for adapters", &postAddExecutedPtr)
+ TestProxyRootAdapter.RegisterCallback(PreAdd, commonChanCallback, "PreAdd instructions for adapters", &preAddExecutedPtr)
+ TestProxyRootAdapter.RegisterCallback(PostAdd, commonChanCallback, "PostAdd instructions for adapters", &postAddExecutedPtr)
// Add the adapter
- added, err := TestProxy_Root_Adapter.Add(context.Background(), "/adapters", TestProxy_Adapter, "")
+ added, err := TestProxyRootAdapter.Add(context.Background(), "/adapters", TestProxyAdapter, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to add adapter to test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to add adapter to test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if added == nil {
@@ -221,9 +222,9 @@
verifyGotResponse(postAddExecuted)
// Verify that the added device can now be retrieved
- d, err := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 0, false, "")
+ d, err := TestProxyRootAdapter.Get(context.Background(), "/adapters/"+TestProxyAdapterID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to retrieve device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to retrieve device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -234,36 +235,36 @@
}
if !verifyGotResponse(preAddExecuted) {
- t.Error("PRE_ADD callback was not executed")
+ t.Error("PreAdd callback was not executed")
}
if !verifyGotResponse(postAddExecuted) {
- t.Error("POST_ADD callback was not executed")
+ t.Error("PostAdd callback was not executed")
}
}
func TestProxy_1_2_1_Get_AllDevices(t *testing.T) {
- devices, err := TestProxy_Root_Device.Get(context.Background(), "/devices", 1, false, "")
+ devices, err := TestProxyRootDevice.Get(context.Background(), "/devices", 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get all devices info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get all devices info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if len(devices.([]interface{})) == 0 {
t.Error("there are no available devices to retrieve")
} else {
// Save the target device id for later tests
- TestProxy_TargetDeviceId = devices.([]interface{})[0].(*voltha.Device).Id
+ TestProxyTargetDeviceID = devices.([]interface{})[0].(*voltha.Device).Id
t.Logf("retrieved all devices: %+v", devices)
}
}
func TestProxy_1_2_2_Get_SingleDevice(t *testing.T) {
- d, err := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 0, false, "")
+ d, err := TestProxyRootDevice.Get(context.Background(), "/devices/"+TestProxyTargetDeviceID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get single device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get single device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
- t.Errorf("Failed to find device : %s", TestProxy_TargetDeviceId)
+ t.Errorf("Failed to find device : %s", TestProxyTargetDeviceID)
} else {
djson, _ := json.Marshal(d)
t.Logf("Found device: %s", string(djson))
@@ -277,9 +278,9 @@
postUpdateExecuted := make(chan struct{})
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
- retrieved, err := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, "")
+ retrieved, err := TestProxyRootDevice.Get(context.Background(), "/devices/"+TestProxyTargetDeviceID, 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if retrieved == nil {
@@ -296,20 +297,20 @@
retrieved.(*voltha.Device).FirmwareVersion = strconv.Itoa(fwVersion)
- TestProxy_Root_Device.RegisterCallback(
- PRE_UPDATE,
+ TestProxyRootDevice.RegisterCallback(
+ PreUpdate,
commonChanCallback,
- "PRE_UPDATE instructions (root proxy)", &preUpdateExecutedPtr,
+ "PreUpdate instructions (root proxy)", &preUpdateExecutedPtr,
)
- TestProxy_Root_Device.RegisterCallback(
- POST_UPDATE,
+ TestProxyRootDevice.RegisterCallback(
+ PostUpdate,
commonChanCallback,
- "POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
+ "PostUpdate instructions (root proxy)", &postUpdateExecutedPtr,
)
- afterUpdate, err := TestProxy_Root_Device.Update(context.Background(), "/devices/"+TestProxy_TargetDeviceId, retrieved, false, "")
+ afterUpdate, err := TestProxyRootDevice.Update(context.Background(), "/devices/"+TestProxyTargetDeviceID, retrieved, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to update device info test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to update device info test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if afterUpdate == nil {
@@ -319,15 +320,15 @@
}
if !verifyGotResponse(preUpdateExecuted) {
- t.Error("PRE_UPDATE callback was not executed")
+ t.Error("PreUpdate callback was not executed")
}
if !verifyGotResponse(postUpdateExecuted) {
- t.Error("POST_UPDATE callback was not executed")
+ t.Error("PostUpdate callback was not executed")
}
- d, err := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, "")
+ d, err := TestProxyRootDevice.Get(context.Background(), "/devices/"+TestProxyTargetDeviceID, 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -341,13 +342,13 @@
func TestProxy_1_3_2_Update_DeviceFlows(t *testing.T) {
// Get a device proxy and update a specific port
- devFlowsProxy, err := TestProxy_Root.node.CreateProxy(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", false)
+ devFlowsProxy, err := TestProxyRoot.CreateProxy(context.Background(), "/devices/"+TestProxyDeviceID+"/flows", false)
if err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create device flows proxy")
}
flows, err := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows from device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows from device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
flows.(*openflow_13.Flows).Items[0].TableId = 2244
@@ -357,19 +358,19 @@
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
devFlowsProxy.RegisterCallback(
- PRE_UPDATE,
+ PreUpdate,
commonChanCallback,
- "PRE_UPDATE instructions (flows proxy)", &preUpdateExecutedPtr,
+ "PreUpdate instructions (flows proxy)", &preUpdateExecutedPtr,
)
devFlowsProxy.RegisterCallback(
- POST_UPDATE,
+ PostUpdate,
commonChanCallback,
- "POST_UPDATE instructions (flows proxy)", &postUpdateExecutedPtr,
+ "PostUpdate instructions (flows proxy)", &postUpdateExecutedPtr,
)
kvFlows, err := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows from device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows from device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
@@ -379,7 +380,7 @@
updated, err := devFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to update flows in device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to update flows in device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
if updated == nil {
@@ -389,15 +390,15 @@
}
if !verifyGotResponse(preUpdateExecuted) {
- t.Error("PRE_UPDATE callback was not executed")
+ t.Error("PreUpdate callback was not executed")
}
if !verifyGotResponse(postUpdateExecuted) {
- t.Error("POST_UPDATE callback was not executed")
+ t.Error("PostUpdate callback was not executed")
}
d, err := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows in device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows in device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
if d == nil {
@@ -407,9 +408,9 @@
t.Logf("Found flows (flows proxy): %s", string(djson))
}
- d, err = TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", 1, false, "")
+ d, err = TestProxyRootDevice.Get(context.Background(), "/devices/"+TestProxyDeviceID+"/flows", 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows from device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows from device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -425,13 +426,13 @@
postUpdateExecuted := make(chan struct{})
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
- adaptersProxy, err := TestProxy_Root.node.CreateProxy(context.Background(), "/adapters", false)
+ adaptersProxy, err := TestProxyRoot.CreateProxy(context.Background(), "/adapters", false)
if err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create adapters proxy")
}
- retrieved, err := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, "")
+ retrieved, err := TestProxyRootAdapter.Get(context.Background(), "/adapters/"+TestProxyAdapterID, 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to retrieve adapter info from adapters proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to retrieve adapter info from adapters proxy due to error: %v", err)
assert.NotNil(t, err)
}
if retrieved == nil {
@@ -442,19 +443,19 @@
retrieved.(*voltha.Adapter).Version = "test-adapter-version-2"
adaptersProxy.RegisterCallback(
- PRE_UPDATE,
+ PreUpdate,
commonChanCallback,
- "PRE_UPDATE instructions for adapters", &preUpdateExecutedPtr,
+ "PreUpdate instructions for adapters", &preUpdateExecutedPtr,
)
adaptersProxy.RegisterCallback(
- POST_UPDATE,
+ PostUpdate,
commonChanCallback,
- "POST_UPDATE instructions for adapters", &postUpdateExecutedPtr,
+ "PostUpdate instructions for adapters", &postUpdateExecutedPtr,
)
- afterUpdate, err := adaptersProxy.Update(context.Background(), "/"+TestProxy_AdapterId, retrieved, false, "")
+ afterUpdate, err := adaptersProxy.Update(context.Background(), "/"+TestProxyAdapterID, retrieved, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to update adapter info in adapters proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to update adapter info in adapters proxy due to error: %v", err)
assert.NotNil(t, err)
}
if afterUpdate == nil {
@@ -464,15 +465,15 @@
}
if !verifyGotResponse(preUpdateExecuted) {
- t.Error("PRE_UPDATE callback for adapter was not executed")
+ t.Error("PreUpdate callback for adapter was not executed")
}
if !verifyGotResponse(postUpdateExecuted) {
- t.Error("POST_UPDATE callback for adapter was not executed")
+ t.Error("PostUpdate callback for adapter was not executed")
}
- d, err := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, "")
+ d, err := TestProxyRootAdapter.Get(context.Background(), "/adapters/"+TestProxyAdapterID, 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get updated adapter info from adapters proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get updated adapter info from adapters proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -489,20 +490,20 @@
postRemoveExecuted := make(chan struct{})
preRemoveExecutedPtr, postRemoveExecutedPtr := preRemoveExecuted, postRemoveExecuted
- TestProxy_Root_Device.RegisterCallback(
- PRE_REMOVE,
+ TestProxyRootDevice.RegisterCallback(
+ PreRemove,
commonChanCallback,
- "PRE_REMOVE instructions (root proxy)", &preRemoveExecutedPtr,
+ "PreRemove instructions (root proxy)", &preRemoveExecutedPtr,
)
- TestProxy_Root_Device.RegisterCallback(
- POST_REMOVE,
+ TestProxyRootDevice.RegisterCallback(
+ PostRemove,
commonChanCallback,
- "POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
+ "PostRemove instructions (root proxy)", &postRemoveExecutedPtr,
)
- removed, err := TestProxy_Root_Device.Remove(context.Background(), "/devices/"+TestProxy_DeviceId, "")
+ removed, err := TestProxyRootDevice.Remove(context.Background(), "/devices/"+TestProxyDeviceID, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to remove device from devices proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to remove device from devices proxy due to error: %v", err)
assert.NotNil(t, err)
}
if removed == nil {
@@ -512,42 +513,42 @@
}
if !verifyGotResponse(preRemoveExecuted) {
- t.Error("PRE_REMOVE callback was not executed")
+ t.Error("PreRemove callback was not executed")
}
if !verifyGotResponse(postRemoveExecuted) {
- t.Error("POST_REMOVE callback was not executed")
+ t.Error("PostRemove callback was not executed")
}
- d, err := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, "")
+ d, err := TestProxyRootDevice.Get(context.Background(), "/devices/"+TestProxyDeviceID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get device info from devices proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get device info from devices proxy due to error: %v", err)
assert.NotNil(t, err)
}
if reflect.ValueOf(d).IsValid() {
djson, _ := json.Marshal(d)
t.Errorf("Device was not removed - %s", djson)
} else {
- t.Logf("Device was removed: %s", TestProxy_DeviceId)
+ t.Logf("Device was removed: %s", TestProxyDeviceID)
}
}
func TestProxy_2_1_1_Add_NewLogicalDevice(t *testing.T) {
ldIDBin, _ := uuid.New().MarshalBinary()
- TestProxy_LogicalDeviceId = "0001" + hex.EncodeToString(ldIDBin)[:12]
- TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
+ TestProxyLogicalDeviceID = "0001" + hex.EncodeToString(ldIDBin)[:12]
+ TestProxyLogicalDevice.Id = TestProxyLogicalDeviceID
preAddExecuted := make(chan struct{})
postAddExecuted := make(chan struct{})
preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
// Register
- TestProxy_Root_LogicalDevice.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
- TestProxy_Root_LogicalDevice.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
+ TestProxyRootLogicalDevice.RegisterCallback(PreAdd, commonChanCallback, "PreAdd instructions", &preAddExecutedPtr)
+ TestProxyRootLogicalDevice.RegisterCallback(PostAdd, commonChanCallback, "PostAdd instructions", &postAddExecutedPtr)
- added, err := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, "")
+ added, err := TestProxyRootLogicalDevice.Add(context.Background(), "/logical_devices", TestProxyLogicalDevice, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to add new logical device into proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to add new logical device into proxy due to error: %v", err)
assert.NotNil(t, err)
}
if added == nil {
@@ -558,9 +559,9 @@
verifyGotResponse(postAddExecuted)
- ld, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, "")
+ ld, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxyLogicalDeviceID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get logical device info from logical device proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get logical device info from logical device proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(ld).IsValid() {
@@ -571,49 +572,49 @@
}
if !verifyGotResponse(preAddExecuted) {
- t.Error("PRE_ADD callback was not executed")
+ t.Error("PreAdd callback was not executed")
}
if !verifyGotResponse(postAddExecuted) {
- t.Error("POST_ADD callback was not executed")
+ t.Error("PostAdd callback was not executed")
}
}
func TestProxy_2_1_2_Add_ExistingLogicalDevice(t *testing.T) {
- TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
+ TestProxyLogicalDevice.Id = TestProxyLogicalDeviceID
- added, err := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, "")
+ added, err := TestProxyRootLogicalDevice.Add(context.Background(), "/logical_devices", TestProxyLogicalDevice, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to add logical device due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to add logical device due to error: %v", err)
assert.NotNil(t, err)
}
- if added.(proto.Message).String() != reflect.ValueOf(TestProxy_LogicalDevice).Interface().(proto.Message).String() {
- t.Errorf("Logical devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
+ if added.(proto.Message).String() != reflect.ValueOf(TestProxyLogicalDevice).Interface().(proto.Message).String() {
+ t.Errorf("Logical devices don't match - existing: %+v returned: %+v", TestProxyLogicalDevice, added)
}
}
func TestProxy_2_2_1_Get_AllLogicalDevices(t *testing.T) {
- logicalDevices, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices", 1, false, "")
+ logicalDevices, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices", 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get all logical devices from proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get all logical devices from proxy due to error: %v", err)
assert.NotNil(t, err)
}
if len(logicalDevices.([]interface{})) == 0 {
t.Error("there are no available logical devices to retrieve")
} else {
// Save the target device id for later tests
- TestProxy_TargetLogicalDeviceId = logicalDevices.([]interface{})[0].(*voltha.LogicalDevice).Id
+ TestProxyTargetLogicalDeviceID = logicalDevices.([]interface{})[0].(*voltha.LogicalDevice).Id
t.Logf("retrieved all logical devices: %+v", logicalDevices)
}
}
func TestProxy_2_2_2_Get_SingleLogicalDevice(t *testing.T) {
- ld, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 0, false, "")
+ ld, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxyTargetLogicalDeviceID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get single logical device from proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get single logical device from proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(ld).IsValid() {
- t.Errorf("Failed to find logical device : %s", TestProxy_TargetLogicalDeviceId)
+ t.Errorf("Failed to find logical device : %s", TestProxyTargetLogicalDeviceID)
} else {
ldJSON, _ := json.Marshal(ld)
t.Logf("Found logical device: %s", string(ldJSON))
@@ -627,9 +628,9 @@
postUpdateExecuted := make(chan struct{})
preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
- retrieved, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, "")
+ retrieved, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxyTargetLogicalDeviceID, 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get logical devices due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get logical devices due to error: %v", err)
assert.NotNil(t, err)
}
if retrieved == nil {
@@ -644,23 +645,23 @@
fwVersion++
}
- TestProxy_Root_LogicalDevice.RegisterCallback(
- PRE_UPDATE,
+ TestProxyRootLogicalDevice.RegisterCallback(
+ PreUpdate,
commonChanCallback,
- "PRE_UPDATE instructions (root proxy)", &preUpdateExecutedPtr,
+ "PreUpdate instructions (root proxy)", &preUpdateExecutedPtr,
)
- TestProxy_Root_LogicalDevice.RegisterCallback(
- POST_UPDATE,
+ TestProxyRootLogicalDevice.RegisterCallback(
+ PostUpdate,
commonChanCallback,
- "POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
+ "PostUpdate instructions (root proxy)", &postUpdateExecutedPtr,
)
retrieved.(*voltha.LogicalDevice).RootDeviceId = strconv.Itoa(fwVersion)
- afterUpdate, err := TestProxy_Root_LogicalDevice.Update(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, retrieved, false,
+ afterUpdate, err := TestProxyRootLogicalDevice.Update(context.Background(), "/logical_devices/"+TestProxyTargetLogicalDeviceID, retrieved, false,
"")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Faield to update logical device info due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Faield to update logical device info due to error: %v", err)
assert.NotNil(t, err)
}
if afterUpdate == nil {
@@ -670,15 +671,15 @@
}
if !verifyGotResponse(preUpdateExecuted) {
- t.Error("PRE_UPDATE callback was not executed")
+ t.Error("PreUpdate callback was not executed")
}
if !verifyGotResponse(postUpdateExecuted) {
- t.Error("POST_UPDATE callback was not executed")
+ t.Error("PostUpdate callback was not executed")
}
- d, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, "")
+ d, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxyTargetLogicalDeviceID, 1, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get logical device info due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get logical device info due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -693,30 +694,30 @@
func TestProxy_2_3_2_Update_LogicalDeviceFlows(t *testing.T) {
// Get a device proxy and update a specific port
- ldFlowsProxy, err := TestProxy_Root.node.CreateProxy(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", false)
+ ldFlowsProxy, err := TestProxyRoot.CreateProxy(context.Background(), "/logical_devices/"+TestProxyLogicalDeviceID+"/flows", false)
if err != nil {
log.With(log.Fields{"error": err}).Fatal("Failed to create logical device flows proxy")
}
flows, err := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows from logical device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows from logical device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
flows.(*openflow_13.Flows).Items[0].TableId = rand.Uint32()
t.Logf("before updated flows: %+v", flows)
ldFlowsProxy.RegisterCallback(
- PRE_UPDATE,
+ PreUpdate,
commonCallback2,
)
ldFlowsProxy.RegisterCallback(
- POST_UPDATE,
+ PostUpdate,
commonCallback2,
)
kvFlows, err := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Faield to get flows from logical device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Faield to get flows from logical device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
if reflect.DeepEqual(flows, kvFlows) {
@@ -725,7 +726,7 @@
updated, err := ldFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to update flows in logical device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to update flows in logical device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
if updated == nil {
@@ -741,10 +742,10 @@
t.Logf("Found flows (flows proxy): %s", string(djson))
}
- d, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", 0, false,
+ d, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxyLogicalDeviceID+"/flows", 0, false,
"")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get flows from logical device flows proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get flows from logical device flows proxy due to error: %v", err)
assert.NotNil(t, err)
}
if !reflect.ValueOf(d).IsValid() {
@@ -760,20 +761,20 @@
postRemoveExecuted := make(chan struct{})
preRemoveExecutedPtr, postRemoveExecutedPtr := preRemoveExecuted, postRemoveExecuted
- TestProxy_Root_LogicalDevice.RegisterCallback(
- PRE_REMOVE,
+ TestProxyRootLogicalDevice.RegisterCallback(
+ PreRemove,
commonChanCallback,
- "PRE_REMOVE instructions (root proxy)", &preRemoveExecutedPtr,
+ "PreRemove instructions (root proxy)", &preRemoveExecutedPtr,
)
- TestProxy_Root_LogicalDevice.RegisterCallback(
- POST_REMOVE,
+ TestProxyRootLogicalDevice.RegisterCallback(
+ PostRemove,
commonChanCallback,
- "POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
+ "PostRemove instructions (root proxy)", &postRemoveExecutedPtr,
)
- removed, err := TestProxy_Root_LogicalDevice.Remove(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, "")
+ removed, err := TestProxyRootLogicalDevice.Remove(context.Background(), "/logical_devices/"+TestProxyLogicalDeviceID, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to remove device from logical devices proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to remove device from logical devices proxy due to error: %v", err)
assert.NotNil(t, err)
}
if removed == nil {
@@ -783,22 +784,22 @@
}
if !verifyGotResponse(preRemoveExecuted) {
- t.Error("PRE_REMOVE callback was not executed")
+ t.Error("PreRemove callback was not executed")
}
if !verifyGotResponse(postRemoveExecuted) {
- t.Error("POST_REMOVE callback was not executed")
+ t.Error("PostRemove callback was not executed")
}
- d, err := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, "")
+ d, err := TestProxyRootLogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxyLogicalDeviceID, 0, false, "")
if err != nil {
- BenchmarkProxy_Logger.Errorf("Failed to get logical device info due to error: %v", err)
+ BenchmarkProxyLogger.Errorf("Failed to get logical device info due to error: %v", err)
assert.NotNil(t, err)
}
if reflect.ValueOf(d).IsValid() {
djson, _ := json.Marshal(d)
t.Errorf("Device was not removed - %s", djson)
} else {
- t.Logf("Device was removed: %s", TestProxy_LogicalDeviceId)
+ t.Logf("Device was removed: %s", TestProxyLogicalDeviceID)
}
}
@@ -807,34 +808,34 @@
// -----------------------------
func TestProxy_Callbacks_1_Register(t *testing.T) {
- TestProxy_Root_Device.RegisterCallback(PRE_ADD, firstCallback, "abcde", "12345")
+ TestProxyRootDevice.RegisterCallback(PreAdd, firstCallback, "abcde", "12345")
m := make(map[string]string)
m["name"] = "fghij"
- TestProxy_Root_Device.RegisterCallback(PRE_ADD, secondCallback, m, 1.2345)
+ TestProxyRootDevice.RegisterCallback(PreAdd, secondCallback, m, 1.2345)
d := &voltha.Device{Id: "12345"}
- TestProxy_Root_Device.RegisterCallback(PRE_ADD, thirdCallback, "klmno", d)
+ TestProxyRootDevice.RegisterCallback(PreAdd, thirdCallback, "klmno", d)
}
func TestProxy_Callbacks_2_Invoke_WithNoInterruption(t *testing.T) {
- TestProxy_Root_Device.InvokeCallbacks(PRE_ADD, false, nil)
+ TestProxyRootDevice.InvokeCallbacks(PreAdd, false, nil)
}
func TestProxy_Callbacks_3_Invoke_WithInterruption(t *testing.T) {
- TestProxy_Root_Device.InvokeCallbacks(PRE_ADD, true, nil)
+ TestProxyRootDevice.InvokeCallbacks(PreAdd, true, nil)
}
func TestProxy_Callbacks_4_Unregister(t *testing.T) {
- TestProxy_Root_Device.UnregisterCallback(PRE_ADD, firstCallback)
- TestProxy_Root_Device.UnregisterCallback(PRE_ADD, secondCallback)
- TestProxy_Root_Device.UnregisterCallback(PRE_ADD, thirdCallback)
+ TestProxyRootDevice.UnregisterCallback(PreAdd, firstCallback)
+ TestProxyRootDevice.UnregisterCallback(PreAdd, secondCallback)
+ TestProxyRootDevice.UnregisterCallback(PreAdd, thirdCallback)
}
//func TestProxy_Callbacks_5_Add(t *testing.T) {
-// TestProxy_Root_Device.Root.AddCallback(TestProxy_Root_Device.InvokeCallbacks, POST_UPDATE, false, "some data", "some new data")
+// TestProxyRootDevice.Root.AddCallback(TestProxyRootDevice.InvokeCallbacks, PostUpdate, false, "some data", "some new data")
//}
//
//func TestProxy_Callbacks_6_Execute(t *testing.T) {
-// TestProxy_Root_Device.Root.ExecuteCallbacks()
+// TestProxyRootDevice.Root.ExecuteCallbacks()
//}
diff --git a/db/model/revision.go b/db/model/revision.go
index 9addad4..cba06cb 100644
--- a/db/model/revision.go
+++ b/db/model/revision.go
@@ -13,14 +13,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package model
import (
"context"
- "github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
"time"
+
+ "github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
)
+// Revision -
type Revision interface {
Finalize(bool)
SetConfig(revision *DataRevision)
@@ -44,7 +47,7 @@
GetBranch() *Branch
Get(int) interface{}
GetData() interface{}
- GetNode() *node
+ getNode() *node
SetLastUpdate(ts ...time.Time)
GetLastUpdate() time.Time
LoadFromPersistence(ctx context.Context, path string, txid string, blobs map[string]*kvstore.KVPair) ([]Revision, error)
diff --git a/db/model/root.go b/db/model/root.go
index bca2d4f..24cfe46 100644
--- a/db/model/root.go
+++ b/db/model/root.go
@@ -20,12 +20,13 @@
"context"
"encoding/hex"
"encoding/json"
+ "reflect"
+ "sync"
+
"github.com/golang/protobuf/proto"
"github.com/google/uuid"
"github.com/opencord/voltha-lib-go/v2/pkg/db"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
- "reflect"
- "sync"
)
// Root is used to provide an abstraction to the base root structure
@@ -53,7 +54,7 @@
}
// NewRoot creates an new instance of a root object
-func NewRoot(initialData interface{}, kvStore *db.Backend) *root {
+func NewRoot(initialData interface{}, kvStore *db.Backend) Root {
root := &root{}
root.KvStore = kvStore
@@ -71,7 +72,7 @@
root.Callbacks = []CallbackTuple{}
root.NotificationCallbacks = []CallbackTuple{}
- root.node = NewNode(root, initialData, false, "")
+ root.node = newNode(root, initialData, false, "")
return root
}
@@ -104,7 +105,9 @@
// Merge operation fails
r.DeleteTxBranch(txid)
} else {
- r.node.MergeBranch(txid, false)
+ if _, err = r.node.MergeBranch(txid, false); err != nil {
+ log.Errorw("Unable to integrate the contents of a transaction branch within the latest branch of a given node", log.Fields{"error": err})
+ }
r.node.GetRoot().ExecuteCallbacks()
r.DeleteTxBranch(txid)
}
@@ -127,10 +130,6 @@
//}
}
-func (r *root) hasCallbacks() bool {
- return len(r.Callbacks) > 0
-}
-
// getCallbacks returns the available callbacks
func (r *root) GetCallbacks() []CallbackTuple {
r.mutex.Lock()
@@ -166,7 +165,7 @@
func (r *root) syncParent(childRev Revision, txid string) {
data := proto.Clone(r.GetProxy().ParentNode.Latest().GetData().(proto.Message))
- for fieldName, _ := range ChildrenFields(data) {
+ for fieldName := range ChildrenFields(data) {
childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
if reflect.TypeOf(childRev.GetData()) == reflect.TypeOf(childDataHolder.Interface()) {
childDataHolder = reflect.ValueOf(childRev.GetData())
@@ -182,14 +181,6 @@
func (r *root) Update(ctx context.Context, path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
- if makeBranch != nil {
- // TODO: raise error
- }
-
- if r.hasCallbacks() {
- // TODO: raise error
- }
-
if txid != "" {
trackDirty := func(node *node) *Branch {
r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
@@ -217,14 +208,6 @@
func (r *root) Add(ctx context.Context, path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
- if makeBranch != nil {
- // TODO: raise error
- }
-
- if r.hasCallbacks() {
- // TODO: raise error
- }
-
if txid != "" {
trackDirty := func(node *node) *Branch {
r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
@@ -246,14 +229,6 @@
func (r *root) Remove(ctx context.Context, path string, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
- if makeBranch != nil {
- // TODO: raise error
- }
-
- if r.hasCallbacks() {
- // TODO: raise error
- }
-
if txid != "" {
trackDirty := func(node *node) *Branch {
r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
diff --git a/db/model/transaction.go b/db/model/transaction.go
index e10236c..f54b0d2 100644
--- a/db/model/transaction.go
+++ b/db/model/transaction.go
@@ -13,19 +13,23 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package model
import (
"context"
"fmt"
+
"github.com/opencord/voltha-lib-go/v2/pkg/log"
)
+// Transaction -
type Transaction struct {
proxy *Proxy
txid string
}
+// NewTransaction -
func NewTransaction(proxy *Proxy, txid string) *Transaction {
tx := &Transaction{
proxy: proxy,
@@ -62,10 +66,14 @@
}
return t.proxy.Remove(ctx, path, t.txid)
}
+
+// Cancel -
func (t *Transaction) Cancel() {
t.proxy.cancelTransaction(t.txid)
t.txid = ""
}
+
+// Commit -
func (t *Transaction) Commit() {
t.proxy.commitTransaction(t.txid)
t.txid = ""
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
index a52688a..ff2385a 100644
--- a/db/model/transaction_test.go
+++ b/db/model/transaction_test.go
@@ -19,33 +19,34 @@
import (
"context"
"encoding/hex"
+ "strconv"
+ "testing"
+
"github.com/google/uuid"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
"github.com/opencord/voltha-protos/v2/go/common"
"github.com/opencord/voltha-protos/v2/go/voltha"
"github.com/stretchr/testify/assert"
- "strconv"
- "testing"
)
var (
- TestTransaction_Root *root
- TestTransaction_RootProxy *Proxy
- TestTransaction_TargetDeviceId string
- TestTransaction_DeviceId string
+ TestTransactionRoot Root
+ TestTransactionRootProxy *Proxy
+ TestTransactionTargetDeviceID string
+ TestTransactionDeviceID string
)
func init() {
var err error
- TestTransaction_Root = NewRoot(&voltha.Voltha{}, nil)
- if TestTransaction_RootProxy, err = TestTransaction_Root.node.CreateProxy(context.Background(), "/", false); err != nil {
+ TestTransactionRoot = NewRoot(&voltha.Voltha{}, nil)
+ if TestTransactionRootProxy, err = TestTransactionRoot.CreateProxy(context.Background(), "/", false); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot create proxy")
}
}
func TestTransaction_2_AddDevice(t *testing.T) {
devIDBin, _ := uuid.New().MarshalBinary()
- TestTransaction_DeviceId = "0001" + hex.EncodeToString(devIDBin)[:12]
+ TestTransactionDeviceID = "0001" + hex.EncodeToString(devIDBin)[:12]
ports := []*voltha.Port{
{
@@ -60,14 +61,14 @@
}
device := &voltha.Device{
- Id: TestTransaction_DeviceId,
+ Id: TestTransactionDeviceID,
Type: "simulated_olt",
Address: &voltha.Device_HostAndPort{HostAndPort: "1.2.3.4:5555"},
AdminState: voltha.AdminState_PREPROVISIONED,
Ports: ports,
}
- addTx := TestTransaction_RootProxy.OpenTransaction()
+ addTx := TestTransactionRootProxy.OpenTransaction()
added, err := addTx.Add(context.Background(), "/devices", device)
if err != nil {
@@ -77,7 +78,7 @@
if added == nil {
t.Error("Failed to add device")
} else {
- TestTransaction_TargetDeviceId = added.(*voltha.Device).Id
+ TestTransactionTargetDeviceID = added.(*voltha.Device).Id
t.Logf("Added device : %+v", added)
}
addTx.Commit()
@@ -85,9 +86,9 @@
func TestTransaction_3_GetDevice_PostAdd(t *testing.T) {
- basePath := "/devices/" + TestTransaction_DeviceId
+ basePath := "/devices/" + TestTransactionDeviceID
- getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
+ getDevWithPortsTx := TestTransactionRootProxy.OpenTransaction()
device1, err := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
if err != nil {
log.Errorf("Failed to get device with ports due to error %v", err)
@@ -96,7 +97,7 @@
t.Logf("retrieved device with ports: %+v", device1)
getDevWithPortsTx.Commit()
- getDevTx := TestTransaction_RootProxy.OpenTransaction()
+ getDevTx := TestTransactionRootProxy.OpenTransaction()
device2, err := getDevTx.Get(context.Background(), basePath, 0, false)
if err != nil {
log.Errorf("Failed to open transaction due to error %v", err)
@@ -108,8 +109,8 @@
}
func TestTransaction_4_UpdateDevice(t *testing.T) {
- updateTx := TestTransaction_RootProxy.OpenTransaction()
- if retrieved, err := updateTx.Get(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, 1, false); err != nil {
+ updateTx := TestTransactionRootProxy.OpenTransaction()
+ if retrieved, err := updateTx.Get(context.Background(), "/devices/"+TestTransactionTargetDeviceID, 1, false); err != nil {
log.Errorf("Failed to retrieve device info due to error %v", err)
assert.NotNil(t, err)
} else if retrieved == nil {
@@ -128,7 +129,7 @@
t.Logf("Before update : %+v", retrieved)
// FIXME: The makeBranch passed in function is nil or not being executed properly!!!!!
- afterUpdate, err := updateTx.Update(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, retrieved, false)
+ afterUpdate, err := updateTx.Update(context.Background(), "/devices/"+TestTransactionTargetDeviceID, retrieved, false)
if err != nil {
log.Errorf("Failed to update device info due to error %v", err)
assert.NotNil(t, err)
@@ -144,9 +145,9 @@
func TestTransaction_5_GetDevice_PostUpdate(t *testing.T) {
- basePath := "/devices/" + TestTransaction_DeviceId
+ basePath := "/devices/" + TestTransactionDeviceID
- getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
+ getDevWithPortsTx := TestTransactionRootProxy.OpenTransaction()
device1, err := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
if err != nil {
log.Errorf("Failed to device with ports info due to error %v", err)
@@ -155,7 +156,7 @@
t.Logf("retrieved device with ports: %+v", device1)
getDevWithPortsTx.Commit()
- getDevTx := TestTransaction_RootProxy.OpenTransaction()
+ getDevTx := TestTransactionRootProxy.OpenTransaction()
device2, err := getDevTx.Get(context.Background(), basePath, 0, false)
if err != nil {
log.Errorf("Failed to get device info due to error %v", err)
@@ -167,8 +168,8 @@
}
func TestTransaction_6_RemoveDevice(t *testing.T) {
- removeTx := TestTransaction_RootProxy.OpenTransaction()
- removed, err := removeTx.Remove(context.Background(), "/devices/"+TestTransaction_DeviceId)
+ removeTx := TestTransactionRootProxy.OpenTransaction()
+ removed, err := removeTx.Remove(context.Background(), "/devices/"+TestTransactionDeviceID)
if err != nil {
log.Errorf("Failed to remove device due to error %v", err)
assert.NotNil(t, err)
@@ -183,10 +184,10 @@
func TestTransaction_7_GetDevice_PostRemove(t *testing.T) {
- basePath := "/devices/" + TestTransaction_DeviceId
+ basePath := "/devices/" + TestTransactionDeviceID
- getDevTx := TestTransaction_RootProxy.OpenTransaction()
- device, err := TestTransaction_RootProxy.Get(context.Background(), basePath, 0, false, "")
+ getDevTx := TestTransactionRootProxy.OpenTransaction()
+ device, err := TestTransactionRootProxy.Get(context.Background(), basePath, 0, false, "")
if err != nil {
log.Errorf("Failed to get device info post remove due to error %v", err)
assert.NotNil(t, err)
diff --git a/db/model/utils.go b/db/model/utils.go
index b28e92f..769aa78 100644
--- a/db/model/utils.go
+++ b/db/model/utils.go
@@ -17,8 +17,6 @@
package model
import (
- "bytes"
- "encoding/gob"
"reflect"
"strings"
)
@@ -253,23 +251,3 @@
return result
}
-
-func clone2(a interface{}) interface{} {
- b := reflect.ValueOf(a)
- buff := new(bytes.Buffer)
- enc := gob.NewEncoder(buff)
- dec := gob.NewDecoder(buff)
- enc.Encode(a)
- dec.Decode(b.Elem().Interface())
-
- return b.Interface()
-}
-
-func clone(a, b interface{}) interface{} {
- buff := new(bytes.Buffer)
- enc := gob.NewEncoder(buff)
- dec := gob.NewDecoder(buff)
- enc.Encode(a)
- dec.Decode(b)
- return b
-}
diff --git a/rw_core/core/adapter_manager.go b/rw_core/core/adapter_manager.go
index 3155ab8..ef02ff8 100644
--- a/rw_core/core/adapter_manager.go
+++ b/rw_core/core/adapter_manager.go
@@ -134,8 +134,8 @@
}
// Register the callbacks
- aMgr.adapterProxy.RegisterCallback(model.POST_UPDATE, aMgr.adapterUpdated)
- aMgr.deviceTypeProxy.RegisterCallback(model.POST_UPDATE, aMgr.deviceTypesUpdated)
+ aMgr.adapterProxy.RegisterCallback(model.PostUpdate, aMgr.adapterUpdated)
+ aMgr.deviceTypeProxy.RegisterCallback(model.PostUpdate, aMgr.deviceTypesUpdated)
probe.UpdateStatusFromContext(ctx, "adapter-manager", probe.ServiceStatusRunning)
log.Info("adapter-manager-started")
return nil
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
index d71da27..0cbd448 100755
--- a/rw_core/core/device_agent.go
+++ b/rw_core/core/device_agent.go
@@ -140,7 +140,7 @@
log.Errorw("failed-to-add-devices-to-cluster-proxy", log.Fields{"error": err})
return nil, err
}
- agent.deviceProxy.RegisterCallback(model.POST_UPDATE, agent.processUpdate)
+ agent.deviceProxy.RegisterCallback(model.PostUpdate, agent.processUpdate)
log.Debugw("device-agent-started", log.Fields{"deviceId": agent.deviceID})
return device, nil
@@ -154,7 +154,7 @@
log.Debugw("stopping-device-agent", log.Fields{"deviceId": agent.deviceID, "parentId": agent.parentID})
// First unregister any callbacks
- agent.deviceProxy.UnregisterCallback(model.POST_UPDATE, agent.processUpdate)
+ agent.deviceProxy.UnregisterCallback(model.PostUpdate, agent.processUpdate)
// Remove the device from the KV store
removed, err := agent.clusterDataProxy.Remove(ctx, "/devices/"+agent.deviceID, "")
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
index 097f9ab..776a0fa 100644
--- a/rw_core/core/logical_device_agent.go
+++ b/rw_core/core/logical_device_agent.go
@@ -199,7 +199,7 @@
}
// TODO: Use a port proxy once the POST_ADD is fixed
if agent.ldProxy != nil {
- agent.ldProxy.RegisterCallback(model.POST_UPDATE, agent.portUpdated)
+ agent.ldProxy.RegisterCallback(model.PostUpdate, agent.portUpdated)
} else {
log.Errorw("logical-device-proxy-null", log.Fields{"logicalDeviceId": agent.logicalDeviceID})
return status.Error(codes.Internal, "logical-device-proxy-null")