VOL-1558 Update vendored voltha-go and other items

Result of running dep ensure.  golang openolt now
builds.

Also update dockerfile to used specific alpine version

Change-Id: I1e5407e25bb0636a241a0650d1e44e5df567f44b
diff --git a/vendor/github.com/opencord/voltha-go/db/model/backend.go b/vendor/github.com/opencord/voltha-go/db/model/backend.go
new file mode 100644
index 0000000..981a1d5
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/backend.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
+	"strconv"
+	"sync"
+	"time"
+)
+
+//TODO: missing cache stuff
+//TODO: missing retry stuff
+//TODO: missing proper logging
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	sync.RWMutex
+	Client     kvstore.Client
+	StoreType  string
+	Host       string
+	Port       int
+	Timeout    int
+	PathPrefix string
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(storeType string, host string, port int, timeout int, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:  storeType,
+		Host:       host,
+		Port:       port,
+		Timeout:    timeout,
+		PathPrefix: pathPrefix,
+	}
+
+	address := host + ":" + strconv.Itoa(port)
+	if b.Client, err = b.newClient(address, timeout); err != nil {
+		log.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "host": host, "port": port,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(address string, timeout int) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(key string, lock ...bool) (map[string]*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath, "lock": lock})
+
+	return b.Client.List(formattedPath, b.Timeout, lock...)
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(key string, lock ...bool) (*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath, "lock": lock})
+
+	start := time.Now()
+	err, pair := b.Client.Get(formattedPath, b.Timeout, lock...)
+	stop := time.Now()
+
+	GetProfiling().AddToDatabaseRetrieveTime(stop.Sub(start).Seconds())
+
+	return err, pair
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(key string, value interface{}, lock ...bool) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath, "lock": lock})
+
+	return b.Client.Put(formattedPath, value, b.Timeout, lock...)
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(key string, lock ...bool) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath, "lock": lock})
+
+	return b.Client.Delete(formattedPath, b.Timeout, lock...)
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(key string) chan *kvstore.Event {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(formattedPath)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/branch.go b/vendor/github.com/opencord/voltha-go/db/model/branch.go
new file mode 100644
index 0000000..40c66ad
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/branch.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"sync"
+)
+
+// TODO: implement weak references or something equivalent
+// TODO: missing proper logging
+
+// Branch structure is used to classify a collection of transaction based revisions
+type Branch struct {
+	sync.RWMutex
+	Node      *node
+	Txid      string
+	Origin    Revision
+	Revisions map[string]Revision
+	Latest    Revision
+}
+
+// NewBranch creates a new instance of the Branch structure
+func NewBranch(node *node, txid string, origin Revision, autoPrune bool) *Branch {
+	b := &Branch{}
+	b.Node = node
+	b.Txid = txid
+	b.Origin = origin
+	b.Revisions = make(map[string]Revision)
+	b.Latest = origin
+
+	return b
+}
+
+// SetLatest assigns the latest revision for this branch
+func (b *Branch) SetLatest(latest Revision) {
+	b.Lock()
+	defer b.Unlock()
+
+	if b.Latest != nil {
+		log.Debugf("Switching latest from <%s> to <%s>", b.Latest.GetHash(), latest.GetHash())
+	} else {
+		log.Debugf("Switching latest from <NIL> to <%s>", latest.GetHash())
+	}
+
+
+	b.Latest = latest
+}
+
+// GetLatest retrieves the latest revision of the branch
+func (b *Branch) GetLatest() Revision {
+	b.Lock()
+	defer b.Unlock()
+
+	return b.Latest
+}
+
+// GetOrigin retrieves the original revision of the branch
+func (b *Branch) GetOrigin() Revision {
+	b.Lock()
+	defer b.Unlock()
+
+	return b.Origin
+}
+
+// AddRevision inserts a new revision to the branch
+func (b *Branch) AddRevision(revision Revision) {
+	if revision != nil && b.GetRevision(revision.GetHash()) == nil {
+		b.SetRevision(revision.GetHash(), revision)
+	}
+}
+
+// GetRevision pulls a revision entry at the specified hash
+func (b *Branch) GetRevision(hash string) Revision {
+	b.Lock()
+	defer b.Unlock()
+
+	if revision, ok := b.Revisions[hash]; ok {
+		return revision
+	}
+
+	return nil
+}
+
+// SetRevision updates a revision entry at the specified hash
+func (b *Branch) SetRevision(hash string, revision Revision) {
+	b.Lock()
+	defer b.Unlock()
+
+	b.Revisions[hash] = revision
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/callback_type.go b/vendor/github.com/opencord/voltha-go/db/model/callback_type.go
new file mode 100644
index 0000000..b530dee
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/callback_type.go
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+// CallbackType is an enumerated value to express when a callback should be executed
+type CallbackType uint8
+
+// Enumerated list of callback types
+const (
+	GET CallbackType = iota
+	PRE_UPDATE
+	POST_UPDATE
+	PRE_ADD
+	POST_ADD
+	PRE_REMOVE
+	POST_REMOVE
+	POST_LISTCHANGE
+)
+
+var enumCallbackTypes = []string{
+	"GET",
+	"PRE_UPDATE",
+	"POST_UPDATE",
+	"PRE_ADD",
+	"POST_ADD",
+	"PRE_REMOVE",
+	"POST_REMOVE",
+	"POST_LISTCHANGE",
+}
+
+func (t CallbackType) String() string {
+	return enumCallbackTypes[t]
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/child_type.go b/vendor/github.com/opencord/voltha-go/db/model/child_type.go
new file mode 100644
index 0000000..da6f688
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/child_type.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	desc "github.com/golang/protobuf/descriptor"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/common"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+type singletonChildTypeCache struct {
+	Cache map[interface{}]map[string]*ChildType
+}
+
+var instanceChildTypeCache *singletonChildTypeCache
+var onceChildTypeCache sync.Once
+
+func getChildTypeCache() *singletonChildTypeCache {
+	onceChildTypeCache.Do(func() {
+		instanceChildTypeCache = &singletonChildTypeCache{}
+	})
+	return instanceChildTypeCache
+}
+
+// ChildType structure contains construct details of an object
+type ChildType struct {
+	ClassModule string
+	ClassType   reflect.Type
+	IsContainer bool
+	Key         string
+	KeyFromStr  func(s string) interface{}
+}
+
+// ChildrenFields retrieves list of child objects associated to a given interface
+func ChildrenFields(cls interface{}) map[string]*ChildType {
+	if cls == nil {
+		return nil
+	}
+	var names map[string]*ChildType
+	var namesExist bool
+
+	if getChildTypeCache().Cache == nil {
+		getChildTypeCache().Cache = make(map[interface{}]map[string]*ChildType)
+	}
+
+	msgType := reflect.TypeOf(cls)
+	inst := getChildTypeCache()
+
+	if names, namesExist = inst.Cache[msgType.String()]; !namesExist {
+		names = make(map[string]*ChildType)
+
+		_, md := desc.ForMessage(cls.(desc.Message))
+
+		// TODO: Do we need to validate MD for nil, panic or exception?
+		for _, field := range md.Field {
+			if options := field.GetOptions(); options != nil {
+				if proto.HasExtension(options, common.E_ChildNode) {
+					isContainer := *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+					meta, _ := proto.GetExtension(options, common.E_ChildNode)
+
+					var keyFromStr func(string) interface{}
+					var ct ChildType
+
+					parentType := FindOwnerType(reflect.ValueOf(cls), field.GetName(), 0, false)
+					if meta.(*common.ChildNode).GetKey() != "" {
+						keyType := FindKeyOwner(reflect.New(parentType).Elem().Interface(), meta.(*common.ChildNode).GetKey(), 0)
+
+						switch keyType.(reflect.Type).Name() {
+						case "string":
+							keyFromStr = func(s string) interface{} {
+								return s
+							}
+						case "int32":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return int32(i)
+							}
+						case "int64":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return int64(i)
+							}
+						case "uint32":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return uint32(i)
+							}
+						case "uint64":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return uint64(i)
+							}
+						default:
+							log.Errorf("Key type not implemented - type: %s\n", keyType.(reflect.Type))
+						}
+					}
+
+					ct = ChildType{
+						ClassModule: parentType.String(),
+						ClassType:   parentType,
+						IsContainer: isContainer,
+						Key:         meta.(*common.ChildNode).GetKey(),
+						KeyFromStr:  keyFromStr,
+					}
+
+					names[field.GetName()] = &ct
+				}
+			}
+		}
+
+		getChildTypeCache().Cache[msgType.String()] = names
+	} else {
+		log.Debugf("Cache entry for %s: %+v", msgType.String(), inst.Cache[msgType.String()])
+	}
+
+	return names
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/data_revision.go b/vendor/github.com/opencord/voltha-go/db/model/data_revision.go
new file mode 100644
index 0000000..0763d09
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/data_revision.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/json"
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+)
+
+// DataRevision stores the data associated to a revision along with its calculated checksum hash value
+type DataRevision struct {
+	Data interface{}
+	Hash string
+}
+
+// NewDataRevision creates a new instance of a DataRevision structure
+func NewDataRevision(root *root, data interface{}) *DataRevision {
+	dr := DataRevision{}
+	dr.Data = data
+	dr.Hash = dr.hashData(root, data)
+
+	return &dr
+}
+
+func (dr *DataRevision) hashData(root *root, data interface{}) string {
+	var buffer bytes.Buffer
+
+	if IsProtoMessage(data) {
+		if pbdata, err := proto.Marshal(data.(proto.Message)); err != nil {
+			log.Debugf("problem to marshal protobuf data --> err: %s", err.Error())
+		} else {
+			buffer.Write(pbdata)
+			// To ensure uniqueness in case data is nil, also include data type
+			buffer.Write([]byte(reflect.TypeOf(data).String()))
+		}
+
+	} else if reflect.ValueOf(data).IsValid() {
+		dataObj := reflect.New(reflect.TypeOf(data).Elem())
+		if json, err := json.Marshal(dataObj.Interface()); err != nil {
+			log.Debugf("problem to marshal data --> err: %s", err.Error())
+		} else {
+			buffer.Write(json)
+		}
+	} else {
+		dataObj := reflect.New(reflect.TypeOf(data).Elem())
+		buffer.Write(dataObj.Bytes())
+	}
+
+	// Add the root pointer that owns the current data for extra uniqueness
+	rootPtr := fmt.Sprintf("%p", root)
+	buffer.Write([]byte(rootPtr))
+
+	return fmt.Sprintf("%x", md5.Sum(buffer.Bytes()))[:12]
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/event_bus.go b/vendor/github.com/opencord/voltha-go/db/model/event_bus.go
new file mode 100644
index 0000000..335d43f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/event_bus.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"encoding/json"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/voltha"
+)
+
+// EventBus contains the details required to communicate with the event bus mechanism
+type EventBus struct {
+	client *EventBusClient
+	topic  string
+}
+
+// ignoredCallbacks keeps a list of callbacks that should not be advertised on the event bus
+var (
+	ignoredCallbacks = map[CallbackType]struct{}{
+		PRE_ADD:         {},
+		GET:             {},
+		POST_LISTCHANGE: {},
+		PRE_REMOVE:      {},
+		PRE_UPDATE:      {},
+	}
+)
+
+// NewEventBus creates a new instance of the EventBus structure
+func NewEventBus() *EventBus {
+	bus := &EventBus{
+		client: NewEventBusClient(),
+		topic:  "model-change-events",
+	}
+	return bus
+}
+
+// Advertise will publish the provided information to the event bus
+func (bus *EventBus) Advertise(args ...interface{}) interface{} {
+	eventType := args[0].(CallbackType)
+	hash := args[1].(string)
+	data := args[2:]
+
+	if _, ok := ignoredCallbacks[eventType]; ok {
+		log.Debugf("ignoring event - type:%s, data:%+v", eventType, data)
+	}
+	var kind voltha.ConfigEventType_ConfigEventType
+	switch eventType {
+	case POST_ADD:
+		kind = voltha.ConfigEventType_add
+	case POST_REMOVE:
+		kind = voltha.ConfigEventType_remove
+	default:
+		kind = voltha.ConfigEventType_update
+	}
+
+	var msg []byte
+	var err error
+	if IsProtoMessage(data) {
+		if msg, err = proto.Marshal(data[0].(proto.Message)); err != nil {
+			log.Debugf("problem marshalling proto data: %+v, err:%s", data[0], err.Error())
+		}
+	} else if data[0] != nil {
+		if msg, err = json.Marshal(data[0]); err != nil {
+			log.Debugf("problem marshalling json data: %+v, err:%s", data[0], err.Error())
+		}
+	} else {
+		log.Debugf("no data to advertise : %+v", data[0])
+	}
+
+	event := voltha.ConfigEvent{
+		Type: kind,
+		Hash: hash,
+		Data: string(msg),
+	}
+
+	bus.client.Publish(bus.topic, event)
+
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/event_bus_client.go b/vendor/github.com/opencord/voltha-go/db/model/event_bus_client.go
new file mode 100644
index 0000000..c9c1314
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/event_bus_client.go
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/voltha"
+)
+
+// EventBusClient is an abstraction layer structure to communicate with an event bus mechanism
+type EventBusClient struct {
+}
+
+// NewEventBusClient creates a new EventBusClient instance
+func NewEventBusClient() *EventBusClient {
+	return &EventBusClient{}
+}
+
+// Publish sends a event to the bus
+func (ebc *EventBusClient) Publish(topic string, event voltha.ConfigEvent) {
+	log.Debugf("publishing event:%+v, topic:%s\n", event, topic)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/merge.go b/vendor/github.com/opencord/voltha-go/db/model/merge.go
new file mode 100644
index 0000000..c59dda4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/merge.go
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
+func revisionsAreEqual(a, b []Revision) bool {
+	// If one is nil, the other must also be nil.
+	if (a == nil) != (b == nil) {
+		return false
+	}
+
+	if len(a) != len(b) {
+		return false
+	}
+
+	for i := range a {
+		if a[i] != b[i] {
+			return false
+		}
+	}
+
+	return true
+}
+
+type changeAnalysis struct {
+	KeyMap1     map[string]int
+	KeyMap2     map[string]int
+	AddedKeys   map[string]struct{}
+	RemovedKeys map[string]struct{}
+	ChangedKeys map[string]struct{}
+}
+
+func newChangeAnalysis(lst1, lst2 []Revision, keyName string) *changeAnalysis {
+	changes := &changeAnalysis{}
+
+	changes.KeyMap1 = make(map[string]int)
+	changes.KeyMap2 = make(map[string]int)
+
+	changes.AddedKeys = make(map[string]struct{})
+	changes.RemovedKeys = make(map[string]struct{})
+	changes.ChangedKeys = make(map[string]struct{})
+
+	for i, rev := range lst1 {
+		_, v := GetAttributeValue(rev.GetData(), keyName, 0)
+		changes.KeyMap1[v.String()] = i
+	}
+	for i, rev := range lst2 {
+		_, v := GetAttributeValue(rev.GetData(), keyName, 0)
+		changes.KeyMap2[v.String()] = i
+	}
+	for v := range changes.KeyMap2 {
+		if _, ok := changes.KeyMap1[v]; !ok {
+			changes.AddedKeys[v] = struct{}{}
+		}
+	}
+	for v := range changes.KeyMap1 {
+		if _, ok := changes.KeyMap2[v]; !ok {
+			changes.RemovedKeys[v] = struct{}{}
+		}
+	}
+	for v := range changes.KeyMap1 {
+		if _, ok := changes.KeyMap2[v]; ok && lst1[changes.KeyMap1[v]].GetHash() != lst2[changes.KeyMap2[v]].GetHash() {
+			changes.ChangedKeys[v] = struct{}{}
+		}
+	}
+
+	return changes
+}
+
+// Merge3Way takes care of combining the revision contents of the same data set
+func Merge3Way(
+	forkRev, srcRev, dstRev Revision,
+	mergeChildFunc func(Revision) Revision,
+	dryRun bool) (rev Revision, changes []ChangeTuple) {
+
+	log.Debugw("3-way-merge-request", log.Fields{"dryRun": dryRun})
+
+	var configChanged bool
+	var revsToDiscard []Revision
+
+	if dstRev.GetConfig() == forkRev.GetConfig() {
+		configChanged = dstRev.GetConfig() != srcRev.GetConfig()
+	} else {
+		if dstRev.GetConfig().Hash != srcRev.GetConfig().Hash {
+			log.Error("config-collision")
+		}
+		configChanged = true
+	}
+
+	//newChildren := reflect.ValueOf(dstRev.GetAllChildren()).Interface().(map[string][]Revision)
+	newChildren := make(map[string][]Revision)
+	for entryName, childrenEntry := range dstRev.GetAllChildren() {
+		//newRev.Children[entryName] = append(newRev.Children[entryName], childrenEntry...)
+		newChildren[entryName] = make([]Revision, len(childrenEntry))
+		copy(newChildren[entryName], childrenEntry)
+	}
+
+	childrenFields := ChildrenFields(forkRev.GetData())
+
+	for fieldName, field := range childrenFields {
+		forkList := forkRev.GetChildren(fieldName)
+		srcList := srcRev.GetChildren(fieldName)
+		dstList := dstRev.GetChildren(fieldName)
+
+		if revisionsAreEqual(dstList, srcList) {
+			for _, rev := range srcList {
+				mergeChildFunc(rev)
+			}
+			continue
+		}
+
+		if field.Key == "" {
+			if revisionsAreEqual(dstList, forkList) {
+				if !revisionsAreEqual(srcList, forkList) {
+					log.Error("we should not be here")
+				} else {
+					for _, rev := range srcList {
+						newChildren[fieldName] = append(newChildren[fieldName], mergeChildFunc(rev))
+					}
+					if field.IsContainer {
+						changes = append(
+							changes, ChangeTuple{POST_LISTCHANGE,
+								NewOperationContext("", nil, fieldName, ""), nil},
+						)
+					}
+				}
+			} else {
+				if !revisionsAreEqual(srcList, forkList) {
+					log.Error("cannot merge - single child node or un-keyed children list has changed")
+				}
+			}
+		} else {
+			if revisionsAreEqual(dstList, forkList) {
+				src := newChangeAnalysis(forkList, srcList, field.Key)
+
+				newList := make([]Revision, len(srcList))
+				copy(newList, srcList)
+
+				for key := range src.AddedKeys {
+					idx := src.KeyMap2[key]
+					newRev := mergeChildFunc(newList[idx])
+
+					// FIXME: newRev may come back as nil... exclude those entries for now
+					if newRev != nil {
+						newList[idx] = newRev
+						changes = append(changes, ChangeTuple{POST_ADD, newList[idx].GetData(), newRev.GetData()})
+					}
+				}
+				for key := range src.RemovedKeys {
+					oldRev := forkList[src.KeyMap1[key]]
+					revsToDiscard = append(revsToDiscard, oldRev)
+					changes = append(changes, ChangeTuple{POST_REMOVE, oldRev.GetData(), nil})
+				}
+				for key := range src.ChangedKeys {
+					idx := src.KeyMap2[key]
+					newRev := mergeChildFunc(newList[idx])
+
+					// FIXME: newRev may come back as nil... exclude those entries for now
+					if newRev != nil {
+						newList[idx] = newRev
+					}
+				}
+
+				if !dryRun {
+					newChildren[fieldName] = newList
+				}
+			} else {
+				src := newChangeAnalysis(forkList, srcList, field.Key)
+				dst := newChangeAnalysis(forkList, dstList, field.Key)
+
+				newList := make([]Revision, len(dstList))
+				copy(newList, dstList)
+
+				for key := range src.AddedKeys {
+					if _, exists := dst.AddedKeys[key]; exists {
+						childDstRev := dstList[dst.KeyMap2[key]]
+						childSrcRev := srcList[src.KeyMap2[key]]
+						if childDstRev.GetHash() == childSrcRev.GetHash() {
+							mergeChildFunc(childDstRev)
+						} else {
+							log.Error("conflict error - revision has been added is different")
+						}
+					} else {
+						newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
+						newList = append(newList, newRev)
+						changes = append(changes, ChangeTuple{POST_ADD, srcList[src.KeyMap2[key]], newRev.GetData()})
+					}
+				}
+				for key := range src.ChangedKeys {
+					if _, removed := dst.RemovedKeys[key]; removed {
+						log.Error("conflict error - revision has been removed")
+					} else if _, changed := dst.ChangedKeys[key]; changed {
+						childDstRev := dstList[dst.KeyMap2[key]]
+						childSrcRev := srcList[src.KeyMap2[key]]
+						if childDstRev.GetHash() == childSrcRev.GetHash() {
+							mergeChildFunc(childSrcRev)
+						} else if childDstRev.GetConfig().Hash != childSrcRev.GetConfig().Hash {
+							log.Error("conflict error - revision has been changed and is different")
+						} else {
+							newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
+							newList[dst.KeyMap2[key]] = newRev
+						}
+					} else {
+						newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
+						newList[dst.KeyMap2[key]] = newRev
+					}
+				}
+
+				// TODO: how do i sort this map in reverse order?
+				for key := range src.RemovedKeys {
+					if _, changed := dst.ChangedKeys[key]; changed {
+						log.Error("conflict error - revision has changed")
+					}
+					if _, removed := dst.RemovedKeys[key]; !removed {
+						dstIdx := dst.KeyMap2[key]
+						oldRev := newList[dstIdx]
+						revsToDiscard = append(revsToDiscard, oldRev)
+
+						copy(newList[dstIdx:], newList[dstIdx+1:])
+						newList[len(newList)-1] = nil
+						newList = newList[:len(newList)-1]
+
+						changes = append(changes, ChangeTuple{POST_REMOVE, oldRev.GetData(), nil})
+					}
+				}
+
+				if !dryRun {
+					newChildren[fieldName] = newList
+				}
+			}
+		}
+	}
+
+	if !dryRun && len(newChildren) > 0 {
+		if configChanged {
+			rev = srcRev
+		} else {
+			rev = dstRev
+		}
+
+		for _, discarded := range revsToDiscard {
+			discarded.Drop("", true)
+		}
+
+		// FIXME: Do not discard the latest value for now
+		//dstRev.GetBranch().GetLatest().Drop("", configChanged)
+		rev = rev.UpdateAllChildren(newChildren, dstRev.GetBranch())
+
+		if configChanged {
+			changes = append(changes, ChangeTuple{POST_UPDATE, dstRev.GetBranch().GetLatest().GetData(), rev.GetData()})
+		}
+		return rev, changes
+	}
+
+	return nil, nil
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/model.go b/vendor/github.com/opencord/voltha-go/db/model/model.go
new file mode 100644
index 0000000..18ff905
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/model.go
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
+func init() {
+	log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
+	defer log.CleanUp()
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/node.go b/vendor/github.com/opencord/voltha-go/db/model/node.go
new file mode 100644
index 0000000..2a9309c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/node.go
@@ -0,0 +1,992 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+// TODO: proper error handling
+// TODO: proper logging
+
+import (
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"runtime/debug"
+	"strings"
+	"sync"
+)
+
+// When a branch has no transaction id, everything gets stored in NONE
+const (
+	NONE string = "none"
+)
+
+// Node interface is an abstraction of the node data structure
+type Node interface {
+	MakeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple)
+
+	// CRUD functions
+	Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision
+	Get(path string, hash string, depth int, deep bool, txid string) interface{}
+	Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision
+	Remove(path string, txid string, makeBranch MakeBranchFunction) Revision
+
+	MakeBranch(txid string) *Branch
+	DeleteBranch(txid string)
+	MergeBranch(txid string, dryRun bool) (Revision, error)
+
+	MakeTxBranch() string
+	DeleteTxBranch(txid string)
+	FoldTxBranch(txid string)
+
+	CreateProxy(path string, exclusive bool) *Proxy
+	GetProxy() *Proxy
+}
+
+type node struct {
+	sync.RWMutex
+	Root      *root
+	Type      interface{}
+	Branches  map[string]*Branch
+	Tags      map[string]Revision
+	Proxy     *Proxy
+	EventBus  *EventBus
+	AutoPrune bool
+}
+
+// ChangeTuple holds details of modifications made to a revision
+type ChangeTuple struct {
+	Type         CallbackType
+	PreviousData interface{}
+	LatestData   interface{}
+}
+
+// NewNode creates a new instance of the node data structure
+func NewNode(root *root, initialData interface{}, autoPrune bool, txid string) *node {
+	n := &node{}
+
+	n.Root = root
+	n.Branches = make(map[string]*Branch)
+	n.Tags = make(map[string]Revision)
+	n.Proxy = nil
+	n.EventBus = nil
+	n.AutoPrune = autoPrune
+
+	if IsProtoMessage(initialData) {
+		n.Type = reflect.ValueOf(initialData).Interface()
+		dataCopy := proto.Clone(initialData.(proto.Message))
+		n.initialize(dataCopy, txid)
+	} else if reflect.ValueOf(initialData).IsValid() {
+		// FIXME: this block does not reflect the original implementation
+		// it should be checking if the provided initial_data is already a type!??!
+		// it should be checked before IsProtoMessage
+		n.Type = reflect.ValueOf(initialData).Interface()
+	} else {
+		// not implemented error
+		log.Errorf("cannot process initial data - %+v", initialData)
+	}
+
+	return n
+}
+
+// MakeNode creates a new node in the tree
+func (n *node) MakeNode(data interface{}, txid string) *node {
+	return NewNode(n.Root, data, true, txid)
+}
+
+// MakeRevision create a new revision of the node in the tree
+func (n *node) MakeRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	return n.GetRoot().MakeRevision(branch, data, children)
+}
+
+// makeLatest will mark the revision of a node as being the latest
+func (n *node) makeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple) {
+	n.Lock()
+	defer n.Unlock()
+
+	branch.AddRevision(revision)
+
+	if branch.GetLatest() == nil || revision.GetHash() != branch.GetLatest().GetHash() {
+		branch.SetLatest(revision)
+	}
+
+	if changeAnnouncement != nil && branch.Txid == "" {
+		if n.Proxy != nil {
+			for _, change := range changeAnnouncement {
+				//log.Debugw("invoking callback",
+				//	log.Fields{
+				//		"callbacks":    n.Proxy.getCallbacks(change.Type),
+				//		"type":         change.Type,
+				//		"previousData": change.PreviousData,
+				//		"latestData":   change.LatestData,
+				//	})
+				n.Root.AddCallback(
+					n.Proxy.InvokeCallbacks,
+					change.Type,
+					true,
+					change.PreviousData,
+					change.LatestData)
+			}
+		}
+
+		//for _, change := range changeAnnouncement {
+		//log.Debugf("sending notification - changeType: %+v, previous:%+v, latest: %+v",
+		//	change.Type,
+		//	change.PreviousData,
+		//	change.LatestData)
+		//n.Root.AddNotificationCallback(
+		//	n.makeEventBus().Advertise,
+		//	change.Type,
+		//	revision.GetHash(),
+		//	change.PreviousData,
+		//	change.LatestData)
+		//}
+	}
+}
+
+// Latest returns the latest revision of node with or without the transaction id
+func (n *node) Latest(txid ...string) Revision {
+	var branch *Branch
+
+	if len(txid) > 0 && txid[0] != "" {
+		if branch = n.GetBranch(txid[0]); branch != nil {
+			return branch.GetLatest()
+		}
+	} else if branch = n.GetBranch(NONE); branch != nil {
+		return branch.GetLatest()
+	}
+	return nil
+}
+
+// initialize prepares the content of a node along with its possible ramifications
+func (n *node) initialize(data interface{}, txid string) {
+	n.Lock()
+	children := make(map[string][]Revision)
+	for fieldName, field := range ChildrenFields(n.Type) {
+		_, fieldValue := GetAttributeValue(data, fieldName, 0)
+
+		if fieldValue.IsValid() {
+			if field.IsContainer {
+				if field.Key != "" {
+					for i := 0; i < fieldValue.Len(); i++ {
+						v := fieldValue.Index(i)
+
+						if rev := n.MakeNode(v.Interface(), txid).Latest(txid); rev != nil {
+							children[fieldName] = append(children[fieldName], rev)
+						}
+
+						// TODO: The following logic was ported from v1.0.  Need to verify if it is required
+						//var keysSeen []string
+						//_, key := GetAttributeValue(v.Interface(), field.Key, 0)
+						//for _, k := range keysSeen {
+						//	if k == key.String() {
+						//		//log.Errorf("duplicate key - %s", k)
+						//	}
+						//}
+						//keysSeen = append(keysSeen, key.String())
+					}
+
+				} else {
+					for i := 0; i < fieldValue.Len(); i++ {
+						v := fieldValue.Index(i)
+						if newNodeRev := n.MakeNode(v.Interface(), txid).Latest(); newNodeRev != nil {
+							children[fieldName] = append(children[fieldName], newNodeRev)
+						}
+					}
+				}
+			} else {
+				if newNodeRev := n.MakeNode(fieldValue.Interface(), txid).Latest(); newNodeRev != nil {
+					children[fieldName] = append(children[fieldName], newNodeRev)
+				}
+			}
+		} else {
+			log.Errorf("field is invalid - %+v", fieldValue)
+		}
+	}
+	n.Unlock()
+
+	branch := NewBranch(n, "", nil, n.AutoPrune)
+	rev := n.MakeRevision(branch, data, children)
+	n.makeLatest(branch, rev, nil)
+
+	if txid == "" {
+		n.SetBranch(NONE, branch)
+	} else {
+		n.SetBranch(txid, branch)
+	}
+}
+
+// findRevByKey retrieves a specific revision from a node tree
+func (n *node) findRevByKey(revs []Revision, keyName string, value interface{}) (int, Revision) {
+	n.Lock()
+	defer n.Unlock()
+
+	for i, rev := range revs {
+		dataValue := reflect.ValueOf(rev.GetData())
+		dataStruct := GetAttributeStructure(rev.GetData(), keyName, 0)
+
+		fieldValue := dataValue.Elem().FieldByName(dataStruct.Name)
+
+		a := fmt.Sprintf("%s", fieldValue.Interface())
+		b := fmt.Sprintf("%s", value)
+		if a == b {
+			return i, revs[i]
+		}
+	}
+
+	return -1, nil
+}
+
+// Get retrieves the data from a node tree that resides at the specified path
+func (n *node) List(path string, hash string, depth int, deep bool, txid string) interface{} {
+	log.Debugw("node-list-request", log.Fields{"path": path, "hash": hash, "depth": depth, "deep": deep, "txid": txid})
+	if deep {
+		depth = -1
+	}
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+
+	var branch *Branch
+	var rev Revision
+
+	if branch = n.GetBranch(txid); txid == "" || branch == nil {
+		branch = n.GetBranch(NONE)
+	}
+
+	if hash != "" {
+		rev = branch.GetRevision(hash)
+	} else {
+		rev = branch.GetLatest()
+	}
+
+	var result interface{}
+	var prList []interface{}
+	if pr := rev.LoadFromPersistence(path, txid); pr != nil {
+		for _, revEntry := range pr {
+			prList = append(prList, revEntry.GetData())
+		}
+		result = prList
+	}
+
+	return result
+}
+
+// Get retrieves the data from a node tree that resides at the specified path
+func (n *node) Get(path string, hash string, depth int, reconcile bool, txid string) interface{} {
+	log.Debugw("node-get-request", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile,
+		"txid": txid})
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+
+	var branch *Branch
+	var rev Revision
+
+	if branch = n.GetBranch(txid); txid == "" || branch == nil {
+		branch = n.GetBranch(NONE)
+	}
+
+	if hash != "" {
+		rev = branch.GetRevision(hash)
+	} else {
+		rev = branch.GetLatest()
+	}
+
+	var result interface{}
+
+	// If there is not request to reconcile, try to get it from memory
+	if !reconcile {
+		if result = n.getPath(rev.GetBranch().GetLatest(), path, depth);
+			result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+			return result
+		}
+	}
+
+	// If we got to this point, we are either trying to reconcile with the db or
+	// or we simply failed at getting information from memory
+	if n.Root.KvStore != nil {
+		var prList []interface{}
+		if pr := rev.LoadFromPersistence(path, txid); pr != nil && len(pr) > 0 {
+			// Did we receive a single or multiple revisions?
+			if len(pr) > 1 {
+				for _, revEntry := range pr {
+					prList = append(prList, revEntry.GetData())
+				}
+				result = prList
+			} else {
+				result = pr[0].GetData()
+			}
+		}
+	}
+
+	return result
+}
+
+// getPath traverses the specified path and retrieves the data associated to it
+func (n *node) getPath(rev Revision, path string, depth int) interface{} {
+	if path == "" {
+		return n.getData(rev, depth)
+	}
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	names := ChildrenFields(n.Type)
+	field := names[name]
+
+	if field != nil && field.IsContainer {
+		children := make([]Revision, len(rev.GetChildren(name)))
+		copy(children, rev.GetChildren(name))
+
+		if field.Key != "" {
+			if path != "" {
+				partition = strings.SplitN(path, "/", 2)
+				key := partition[0]
+				path = ""
+				keyValue := field.KeyFromStr(key)
+				if _, childRev := n.findRevByKey(children, field.Key, keyValue); childRev == nil {
+					return nil
+				} else {
+					childNode := childRev.GetNode()
+					return childNode.getPath(childRev, path, depth)
+				}
+			} else {
+				var response []interface{}
+				for _, childRev := range children {
+					childNode := childRev.GetNode()
+					value := childNode.getData(childRev, depth)
+					response = append(response, value)
+				}
+				return response
+			}
+		} else {
+			var response []interface{}
+			if path != "" {
+				// TODO: raise error
+				return response
+			}
+			for _, childRev := range children {
+				childNode := childRev.GetNode()
+				value := childNode.getData(childRev, depth)
+				response = append(response, value)
+			}
+			return response
+		}
+	}
+
+	childRev := rev.GetChildren(name)[0]
+	childNode := childRev.GetNode()
+	return childNode.getPath(childRev, path, depth)
+}
+
+// getData retrieves the data from a node revision
+func (n *node) getData(rev Revision, depth int) interface{} {
+	msg := rev.GetBranch().GetLatest().Get(depth)
+	var modifiedMsg interface{}
+
+	if n.GetProxy() != nil {
+		log.Debugw("invoking-get-callbacks", log.Fields{"data": msg})
+		if modifiedMsg = n.GetProxy().InvokeCallbacks(GET, false, msg); modifiedMsg != nil {
+			msg = modifiedMsg
+		}
+
+	}
+
+	return msg
+}
+
+// Update changes the content of a node at the specified path with the provided data
+func (n *node) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+	log.Debugw("node-update-request", log.Fields{"path": path, "strict": strict, "txid": txid, "makeBranch": makeBranch})
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+
+	var branch *Branch
+	if txid == "" {
+		branch = n.GetBranch(NONE)
+	} else if branch = n.GetBranch(txid); branch == nil {
+		branch = makeBranch(n)
+	}
+
+	if branch.GetLatest() != nil {
+		log.Debugf("Branch data : %+v, Passed data: %+v", branch.GetLatest().GetData(), data)
+	}
+	if path == "" {
+		return n.doUpdate(branch, data, strict)
+	}
+
+	rev := branch.GetLatest()
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+	var children []Revision
+
+	if field == nil {
+		return n.doUpdate(branch, data, strict)
+	}
+
+	if field.IsContainer {
+		if path == "" {
+			log.Errorf("cannot update a list")
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+			keyValue := field.KeyFromStr(key)
+
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+
+			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+			childNode := childRev.GetNode()
+
+			// Save proxy in child node to ensure callbacks are called later on
+			// only assign in cases of non sub-folder proxies, i.e. "/"
+			if childNode.Proxy == nil && n.Proxy != nil && n.Proxy.getFullPath() == "" {
+				childNode.Proxy = n.Proxy
+			}
+
+			newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+
+			if newChildRev.GetHash() == childRev.GetHash() {
+				if newChildRev != childRev {
+					log.Debug("clear-hash - %s %+v", newChildRev.GetHash(), newChildRev)
+					newChildRev.ClearHash()
+				}
+				return branch.GetLatest()
+			}
+
+			_, newKey := GetAttributeValue(newChildRev.GetData(), field.Key, 0)
+
+			_newKeyType := fmt.Sprintf("%s", newKey)
+			_keyValueType := fmt.Sprintf("%s", keyValue)
+
+			if _newKeyType != _keyValueType {
+				log.Errorf("cannot change key field")
+			}
+
+			// Prefix the hash value with the data type (e.g. devices, logical_devices, adapters)
+			newChildRev.SetName(name + "/" + _keyValueType)
+			children[idx] = newChildRev
+
+			updatedRev := rev.UpdateChildren(name, children, branch)
+
+			branch.GetLatest().Drop(txid, false)
+			n.makeLatest(branch, updatedRev, nil)
+
+			return newChildRev
+
+		} else {
+			log.Errorf("cannot index into container with no keys")
+		}
+	} else {
+		childRev := rev.GetChildren(name)[0]
+		childNode := childRev.GetNode()
+		newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+		updatedRev := rev.UpdateChildren(name, []Revision{newChildRev}, branch)
+		rev.Drop(txid, false)
+		n.makeLatest(branch, updatedRev, nil)
+
+		return newChildRev
+	}
+
+	return nil
+}
+
+func (n *node) doUpdate(branch *Branch, data interface{}, strict bool) Revision {
+	log.Debugf("Comparing types - expected: %+v, actual: %+v &&&&&& %s", reflect.ValueOf(n.Type).Type(),
+		reflect.TypeOf(data),
+		string(debug.Stack()))
+
+	if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
+		// TODO raise error
+		log.Errorf("data does not match type: %+v", n.Type)
+		return nil
+	}
+
+	// TODO: validate that this actually works
+	//if n.hasChildren(data) {
+	//	return nil
+	//}
+
+	if n.GetProxy() != nil {
+		log.Debug("invoking proxy PRE_UPDATE Callbacks")
+		n.GetProxy().InvokeCallbacks(PRE_UPDATE, false, branch.GetLatest(), data)
+	}
+
+	if branch.GetLatest().GetData().(proto.Message).String() != data.(proto.Message).String() {
+		if strict {
+			// TODO: checkAccessViolations(data, Branch.GetLatest.data)
+			log.Debugf("checking access violations")
+		}
+
+		rev := branch.GetLatest().UpdateData(data, branch)
+		changes := []ChangeTuple{{POST_UPDATE, branch.GetLatest().GetData(), rev.GetData()}}
+
+		// FIXME VOL-1293: the following statement corrupts the kv when using a subproxy (commenting for now)
+		// FIXME VOL-1293 cont'd: need to figure out the required conditions otherwise we are not cleaning up entries
+		//branch.GetLatest().Drop(branch.Txid, false)
+
+		n.makeLatest(branch, rev, changes)
+
+		return rev
+	}
+
+	return branch.GetLatest()
+}
+
+// Add inserts a new node at the specified path with the provided data
+func (n *node) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+	log.Debugw("node-add-request", log.Fields{"path": path, "txid": txid, "makeBranch": makeBranch})
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+	if path == "" {
+		// TODO raise error
+		log.Errorf("cannot add for non-container mode")
+		return nil
+	}
+
+	var branch *Branch
+	if txid == "" {
+		branch = n.GetBranch(NONE)
+	} else if branch = n.GetBranch(txid); branch == nil {
+		branch = makeBranch(n)
+	}
+
+	rev := branch.GetLatest()
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+
+	var children []Revision
+
+	if field.IsContainer {
+		if path == "" {
+			if field.Key != "" {
+				if n.GetProxy() != nil {
+					log.Debug("invoking proxy PRE_ADD Callbacks")
+					n.GetProxy().InvokeCallbacks(PRE_ADD, false, data)
+				}
+
+				children = make([]Revision, len(rev.GetChildren(name)))
+				copy(children, rev.GetChildren(name))
+
+				_, key := GetAttributeValue(data, field.Key, 0)
+
+				if _, exists := n.findRevByKey(children, field.Key, key.String()); exists != nil {
+					// TODO raise error
+					log.Warnw("duplicate-key-found", log.Fields{"key": key.String()})
+					return exists
+				}
+				childRev := n.MakeNode(data, "").Latest()
+
+				// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
+				childRev.SetName(name + "/" + key.String())
+
+				// Create watch for <component>/<key>
+				childRev.SetupWatch(childRev.GetName())
+
+				children = append(children, childRev)
+				rev = rev.UpdateChildren(name, children, branch)
+				changes := []ChangeTuple{{POST_ADD, nil, childRev.GetData()}}
+
+				rev.Drop(txid, false)
+				n.makeLatest(branch, rev, changes)
+
+				return childRev
+			}
+			log.Errorf("cannot add to non-keyed container")
+
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+			keyValue := field.KeyFromStr(key)
+
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+
+			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+
+			childNode := childRev.GetNode()
+			newChildRev := childNode.Add(path, data, txid, makeBranch)
+
+			// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
+			childRev.SetName(name + "/" + keyValue.(string))
+
+			children[idx] = newChildRev
+
+			rev = rev.UpdateChildren(name, children, branch)
+			rev.Drop(txid, false)
+			n.makeLatest(branch, rev.GetBranch().GetLatest(), nil)
+
+			return newChildRev
+		} else {
+			log.Errorf("cannot add to non-keyed container")
+		}
+	} else {
+		log.Errorf("cannot add to non-container field")
+	}
+
+	return nil
+}
+
+// Remove eliminates a node at the specified path
+func (n *node) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+	log.Debugw("node-remove-request", log.Fields{"path": path, "txid": txid, "makeBranch": makeBranch})
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+	if path == "" {
+		// TODO raise error
+		log.Errorf("cannot remove for non-container mode")
+	}
+	var branch *Branch
+	if txid == "" {
+		branch = n.GetBranch(NONE)
+	} else if branch = n.GetBranch(txid); branch == nil {
+		branch = makeBranch(n)
+	}
+
+	rev := branch.GetLatest()
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+	var children []Revision
+	postAnnouncement := []ChangeTuple{}
+
+	if field.IsContainer {
+		if path == "" {
+			log.Errorw("cannot-remove-without-key", log.Fields{"name": name, "key": path})
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+
+			keyValue := field.KeyFromStr(key)
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+
+			if path != "" {
+				idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+				childNode := childRev.GetNode()
+				if childNode.Proxy == nil {
+					childNode.Proxy = n.Proxy
+				}
+				newChildRev := childNode.Remove(path, txid, makeBranch)
+				children[idx] = newChildRev
+				rev.SetChildren(name, children)
+				branch.GetLatest().Drop(txid, false)
+				n.makeLatest(branch, rev, nil)
+				return nil
+			}
+
+			if idx, childRev := n.findRevByKey(children, field.Key, keyValue); childRev != nil {
+				if n.GetProxy() != nil {
+					data := childRev.GetData()
+					n.GetProxy().InvokeCallbacks(PRE_REMOVE, false, data)
+					postAnnouncement = append(postAnnouncement, ChangeTuple{POST_REMOVE, data, nil})
+				} else {
+					postAnnouncement = append(postAnnouncement, ChangeTuple{POST_REMOVE, childRev.GetData(), nil})
+				}
+
+				childRev.StorageDrop(txid, true)
+				children = append(children[:idx], children[idx+1:]...)
+				rev.SetChildren(name, children)
+
+				branch.GetLatest().Drop(txid, false)
+				n.makeLatest(branch, rev, postAnnouncement)
+
+				return rev
+			} else {
+				log.Errorw("failed-to-find-revision", log.Fields{"name": name, "key": keyValue.(string)})
+			}
+		}
+		log.Errorw("cannot-add-to-non-keyed-container", log.Fields{"name": name, "path": path, "fieldKey": field.Key})
+
+	} else {
+		log.Errorw("cannot-add-to-non-container-field", log.Fields{"name": name, "path": path})
+	}
+
+	return nil
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Branching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+// MakeBranchFunction is a type for function references intented to create a branch
+type MakeBranchFunction func(*node) *Branch
+
+// MakeBranch creates a new branch for the provided transaction id
+func (n *node) MakeBranch(txid string) *Branch {
+	branchPoint := n.GetBranch(NONE).GetLatest()
+	branch := NewBranch(n, txid, branchPoint, true)
+	n.SetBranch(txid, branch)
+	return branch
+}
+
+// DeleteBranch removes a branch with the specified id
+func (n *node) DeleteBranch(txid string) {
+	n.Lock()
+	defer n.Unlock()
+	delete(n.Branches, txid)
+}
+
+func (n *node) mergeChild(txid string, dryRun bool) func(Revision) Revision {
+	f := func(rev Revision) Revision {
+		childBranch := rev.GetBranch()
+
+		if childBranch.Txid == txid {
+			rev, _ = childBranch.Node.MergeBranch(txid, dryRun)
+		}
+
+		return rev
+	}
+	return f
+}
+
+// MergeBranch will integrate the contents of a transaction branch within the latest branch of a given node
+func (n *node) MergeBranch(txid string, dryRun bool) (Revision, error) {
+	srcBranch := n.GetBranch(txid)
+	dstBranch := n.GetBranch(NONE)
+
+	forkRev := srcBranch.Origin
+	srcRev := srcBranch.GetLatest()
+	dstRev := dstBranch.GetLatest()
+
+	rev, changes := Merge3Way(forkRev, srcRev, dstRev, n.mergeChild(txid, dryRun), dryRun)
+
+	if !dryRun {
+		if rev != nil {
+			rev.SetName(dstRev.GetName())
+			n.makeLatest(dstBranch, rev, changes)
+		}
+		n.DeleteBranch(txid)
+	}
+
+	// TODO: return proper error when one occurs
+	return rev, nil
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diff utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+//func (n *node) diff(hash1, hash2, txid string) {
+//	branch := n.Branches[txid]
+//	rev1 := branch.GetHash(hash1)
+//	rev2 := branch.GetHash(hash2)
+//
+//	if rev1.GetHash() == rev2.GetHash() {
+//		// empty patch
+//	} else {
+//		// translate data to json and generate patch
+//		patch, err := jsonpatch.MakePatch(rev1.GetData(), rev2.GetData())
+//		patch.
+//	}
+//}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tag utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+// TODO: is tag mgmt used in the python implementation? Need to validate
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Internals ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+func (n *node) hasChildren(data interface{}) bool {
+	for fieldName, field := range ChildrenFields(n.Type) {
+		_, fieldValue := GetAttributeValue(data, fieldName, 0)
+
+		if (field.IsContainer && fieldValue.Len() > 0) || !fieldValue.IsNil() {
+			log.Error("cannot update external children")
+			return true
+		}
+	}
+
+	return false
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ node Proxy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+// CreateProxy returns a reference to a sub-tree of the data model
+func (n *node) CreateProxy(path string, exclusive bool) *Proxy {
+	return n.createProxy(path, path, n, exclusive)
+}
+
+func (n *node) createProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+	if path == "" {
+		return n.makeProxy(path, fullPath, parentNode, exclusive)
+	}
+
+	rev := n.GetBranch(NONE).GetLatest()
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+	if field.IsContainer {
+		if path == "" {
+			//log.Error("cannot proxy a container field")
+			newNode := n.MakeNode(reflect.New(field.ClassType.Elem()).Interface(), "")
+			return newNode.makeProxy(path, fullPath, parentNode, exclusive)
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+			keyValue := field.KeyFromStr(key)
+			var children []Revision
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+			if _, childRev := n.findRevByKey(children, field.Key, keyValue); childRev != nil {
+				childNode := childRev.GetNode()
+				return childNode.createProxy(path, fullPath, n, exclusive)
+			}
+		} else {
+			log.Error("cannot index into container with no keys")
+		}
+	} else {
+		childRev := rev.GetChildren(name)[0]
+		childNode := childRev.GetNode()
+		return childNode.createProxy(path, fullPath, n, exclusive)
+	}
+
+	log.Warnf("Cannot create proxy - latest rev:%s, all revs:%+v", rev.GetHash(), n.GetBranch(NONE).Revisions)
+	return nil
+}
+
+func (n *node) makeProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
+	n.Lock()
+	defer n.Unlock()
+	r := &root{
+		node:                  n,
+		Callbacks:             n.Root.GetCallbacks(),
+		NotificationCallbacks: n.Root.GetNotificationCallbacks(),
+		DirtyNodes:            n.Root.DirtyNodes,
+		KvStore:               n.Root.KvStore,
+		Loading:               n.Root.Loading,
+		RevisionClass:         n.Root.RevisionClass,
+	}
+
+	if n.Proxy == nil {
+		n.Proxy = NewProxy(r, n, parentNode, path, fullPath, exclusive)
+	} else {
+		if n.Proxy.Exclusive {
+			log.Error("node is already owned exclusively")
+		}
+	}
+
+	return n.Proxy
+}
+
+func (n *node) makeEventBus() *EventBus {
+	n.Lock()
+	defer n.Unlock()
+	if n.EventBus == nil {
+		n.EventBus = NewEventBus()
+	}
+	return n.EventBus
+}
+
+func (n *node) SetProxy(proxy *Proxy) {
+	n.Lock()
+	defer n.Unlock()
+	n.Proxy = proxy
+}
+
+func (n *node) GetProxy() *Proxy {
+	n.Lock()
+	defer n.Unlock()
+	return n.Proxy
+}
+
+func (n *node) GetBranch(key string) *Branch {
+	n.Lock()
+	defer n.Unlock()
+
+	if n.Branches != nil {
+		if branch, exists := n.Branches[key]; exists {
+			return branch
+		}
+	}
+	return nil
+}
+
+func (n *node) SetBranch(key string, branch *Branch) {
+	n.Lock()
+	defer n.Unlock()
+	n.Branches[key] = branch
+}
+
+func (n *node) GetRoot() *root {
+	n.Lock()
+	defer n.Unlock()
+	return n.Root
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
new file mode 100644
index 0000000..3c39e01
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+import (
+	"bytes"
+	"crypto/md5"
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type revCacheSingleton struct {
+	sync.RWMutex
+	Cache map[string]interface{}
+}
+
+var revCacheInstance *revCacheSingleton
+var revCacheOnce sync.Once
+
+func GetRevCache() *revCacheSingleton {
+	revCacheOnce.Do(func() {
+		revCacheInstance = &revCacheSingleton{Cache: make(map[string]interface{})}
+	})
+	return revCacheInstance
+}
+
+type NonPersistedRevision struct {
+	mutex    sync.RWMutex
+	Root     *root
+	Config   *DataRevision
+	Children map[string][]Revision
+	Hash     string
+	Branch   *Branch
+	WeakRef  string
+	Name     string
+}
+
+func NewNonPersistedRevision(root *root, branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	r := &NonPersistedRevision{}
+	r.Root = root
+	r.Branch = branch
+	r.Config = NewDataRevision(root, data)
+	r.Children = children
+	r.Hash = r.hashContent()
+	return r
+}
+
+func (npr *NonPersistedRevision) SetConfig(config *DataRevision) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Config = config
+}
+
+func (npr *NonPersistedRevision) GetConfig() *DataRevision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Config
+}
+
+func (npr *NonPersistedRevision) SetAllChildren(children map[string][]Revision) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Children = children
+}
+
+func (npr *NonPersistedRevision) SetChildren(name string, children []Revision) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	if _, exists := npr.Children[name]; exists {
+		npr.Children[name] = children
+	}
+}
+
+func (npr *NonPersistedRevision) GetAllChildren() map[string][]Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Children
+}
+
+func (npr *NonPersistedRevision) GetChildren(name string) []Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	if _, exists := npr.Children[name]; exists {
+		return npr.Children[name]
+	}
+	return nil
+}
+
+func (npr *NonPersistedRevision) SetHash(hash string) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Hash = hash
+}
+
+func (npr *NonPersistedRevision) GetHash() string {
+	//npr.mutex.Lock()
+	//defer npr.mutex.Unlock()
+	return npr.Hash
+}
+
+func (npr *NonPersistedRevision) ClearHash() {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Hash = ""
+}
+
+func (npr *NonPersistedRevision) GetName() string {
+	//npr.mutex.Lock()
+	//defer npr.mutex.Unlock()
+	return npr.Name
+}
+
+func (npr *NonPersistedRevision) SetName(name string) {
+	//npr.mutex.Lock()
+	//defer npr.mutex.Unlock()
+	npr.Name = name
+}
+func (npr *NonPersistedRevision) SetBranch(branch *Branch) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Branch = branch
+}
+
+func (npr *NonPersistedRevision) GetBranch() *Branch {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Branch
+}
+
+func (npr *NonPersistedRevision) GetData() interface{} {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	if npr.Config == nil {
+		return nil
+	}
+	return npr.Config.Data
+}
+
+func (npr *NonPersistedRevision) GetNode() *node {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Branch.Node
+}
+
+func (npr *NonPersistedRevision) Finalize(skipOnExist bool) {
+	GetRevCache().Lock()
+	defer GetRevCache().Unlock()
+
+	if !skipOnExist {
+		npr.Hash = npr.hashContent()
+	}
+	if _, exists := GetRevCache().Cache[npr.Hash]; !exists {
+		GetRevCache().Cache[npr.Hash] = npr
+	}
+	if _, exists := GetRevCache().Cache[npr.Config.Hash]; !exists {
+		GetRevCache().Cache[npr.Config.Hash] = npr.Config
+	} else {
+		npr.Config = GetRevCache().Cache[npr.Config.Hash].(*DataRevision)
+	}
+}
+
+func (npr *NonPersistedRevision) hashContent() string {
+	var buffer bytes.Buffer
+	var childrenKeys []string
+
+	if npr.Config != nil {
+		buffer.WriteString(npr.Config.Hash)
+	}
+
+	for key := range npr.Children {
+		childrenKeys = append(childrenKeys, key)
+	}
+
+	sort.Strings(childrenKeys)
+
+	if len(npr.Children) > 0 {
+		// Loop through sorted Children keys
+		for _, key := range childrenKeys {
+			for _, child := range npr.Children[key] {
+				if child != nil && child.GetHash() != "" {
+					buffer.WriteString(child.GetHash())
+				}
+			}
+		}
+	}
+
+	return fmt.Sprintf("%x", md5.Sum(buffer.Bytes()))[:12]
+}
+
+func (npr *NonPersistedRevision) Get(depth int) interface{} {
+	// 1. Clone the data to avoid any concurrent access issues
+	// 2. The current rev might still be pointing to an old config
+	//    thus, force the revision to get its latest value
+	latestRev := npr.GetBranch().GetLatest()
+	originalData := proto.Clone(latestRev.GetData().(proto.Message))
+
+	data := originalData
+	// Get back to the interface type
+	//data := reflect.ValueOf(originalData).Interface()
+
+	if depth != 0 {
+		for fieldName, field := range ChildrenFields(latestRev.GetData()) {
+			childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
+			if field.IsContainer {
+				for _, rev := range latestRev.GetChildren(fieldName) {
+					childData := rev.Get(depth - 1)
+					foundEntry := false
+					for i := 0; i < childDataHolder.Len(); i++ {
+						cdh_if := childDataHolder.Index(i).Interface()
+						if cdh_if.(proto.Message).String() == childData.(proto.Message).String() {
+							foundEntry = true
+							break
+						}
+					}
+					if !foundEntry {
+						// avoid duplicates by adding it only if the child was not found in the holder
+						childDataHolder = reflect.Append(childDataHolder, reflect.ValueOf(childData))
+					}
+				}
+			} else {
+				if revs := latestRev.GetChildren(fieldName); revs != nil && len(revs) > 0 {
+					rev := latestRev.GetChildren(fieldName)[0]
+					if rev != nil {
+						childData := rev.Get(depth - 1)
+						if reflect.TypeOf(childData) == reflect.TypeOf(childDataHolder.Interface()) {
+							childDataHolder = reflect.ValueOf(childData)
+						}
+					}
+				}
+			}
+			// Merge child data with cloned object
+			reflect.ValueOf(data).Elem().FieldByName(childDataName).Set(childDataHolder)
+		}
+	}
+
+	result := data
+
+	if result != nil {
+		// We need to send back a copy of the retrieved object
+		result = proto.Clone(data.(proto.Message))
+	}
+
+	return result
+}
+
+func (npr *NonPersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+
+	if npr.Config.Data != nil && npr.Config.hashData(npr.Root, data) == npr.Config.Hash {
+		log.Debugw("stored-data-matches-latest", log.Fields{"stored": npr.Config.Data, "provided": data})
+		return npr
+	}
+
+	newRev := NonPersistedRevision{}
+	newRev.Config = NewDataRevision(npr.Root, data)
+	newRev.Hash = npr.Hash
+	newRev.Branch = branch
+
+	newRev.Children = make(map[string][]Revision)
+	for entryName, childrenEntry := range npr.Children {
+		newRev.Children[entryName] = append(newRev.Children[entryName], childrenEntry...)
+	}
+
+	newRev.Finalize(false)
+
+	return &newRev
+}
+
+func (npr *NonPersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+
+	updatedRev := npr
+
+	// Verify if the map contains already contains an entry matching the name value
+	// If so, we need to retain the contents of that entry and merge them with the provided children revision list
+	if _, exists := updatedRev.Children[name]; exists {
+		// Go through all child hashes and save their index within the map
+		existChildMap := make(map[string]int)
+		for i, child := range updatedRev.Children[name] {
+			existChildMap[child.GetHash()] = i
+		}
+
+		for _, newChild := range children {
+			if _, childExists := existChildMap[newChild.GetHash()]; !childExists {
+				// revision is not present in the existing list... add it
+				updatedRev.Children[name] = append(updatedRev.Children[name], newChild)
+			} else {
+				// replace
+				updatedRev.Children[name][existChildMap[newChild.GetHash()]] = newChild
+			}
+		}
+	} else {
+		// Map entry does not exist, thus just create a new entry and assign the provided revisions
+		updatedRev.Children[name] = make([]Revision, len(children))
+		copy(updatedRev.Children[name], children)
+	}
+
+	updatedRev.Config = NewDataRevision(npr.Root, npr.Config.Data)
+	updatedRev.Hash = npr.Hash
+	updatedRev.Branch = branch
+	updatedRev.Finalize(false)
+
+	return updatedRev
+}
+
+func (npr *NonPersistedRevision) UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+
+	newRev := npr
+	newRev.Config = npr.Config
+	newRev.Hash = npr.Hash
+	newRev.Branch = branch
+	newRev.Children = make(map[string][]Revision)
+	for entryName, childrenEntry := range children {
+		newRev.Children[entryName] = append(newRev.Children[entryName], childrenEntry...)
+	}
+	newRev.Finalize(false)
+
+	return newRev
+}
+
+func (npr *NonPersistedRevision) Drop(txid string, includeConfig bool) {
+	GetRevCache().Lock()
+	defer GetRevCache().Unlock()
+
+	if includeConfig {
+		delete(GetRevCache().Cache, npr.Config.Hash)
+	}
+	delete(GetRevCache().Cache, npr.Hash)
+}
+
+func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+	// stub... required by interface
+	return nil
+}
+
+func (npr *NonPersistedRevision) SetupWatch(key string) {
+	// stub ... required by interface
+}
+
+func (pr *NonPersistedRevision) StorageDrop(txid string, includeConfig bool) {
+	// stub ... required by interface
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
new file mode 100644
index 0000000..c2a6c64
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"bytes"
+	"compress/gzip"
+	"encoding/hex"
+	"github.com/golang/protobuf/proto"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
+	"reflect"
+	"runtime/debug"
+	"strings"
+	"sync"
+)
+
+// PersistedRevision holds information of revision meant to be saved in a persistent storage
+type PersistedRevision struct {
+	Revision
+	Compress bool
+
+	events    chan *kvstore.Event `json:"-"`
+	kvStore   *Backend            `json:"-"`
+	mutex     sync.RWMutex        `json:"-"`
+	isStored  bool
+	isWatched bool
+}
+
+// NewPersistedRevision creates a new instance of a PersistentRevision structure
+func NewPersistedRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	pr := &PersistedRevision{}
+	pr.kvStore = branch.Node.GetRoot().KvStore
+	pr.Revision = NewNonPersistedRevision(nil, branch, data, children)
+	return pr
+}
+
+// Finalize is responsible of saving the revision in the persistent storage
+func (pr *PersistedRevision) Finalize(skipOnExist bool) {
+	pr.store(skipOnExist)
+}
+
+type revData struct {
+	Children map[string][]string
+	Config   string
+}
+
+func (pr *PersistedRevision) store(skipOnExist bool) {
+	if pr.GetBranch().Txid != "" {
+		return
+	}
+
+	if pair, _ := pr.kvStore.Get(pr.GetName()); pair != nil && skipOnExist {
+		log.Debugw("revision-config-already-exists", log.Fields{"hash": pr.GetHash(), "name": pr.GetName()})
+		return
+	}
+
+	if blob, err := proto.Marshal(pr.GetConfig().Data.(proto.Message)); err != nil {
+		// TODO report error
+	} else {
+		if pr.Compress {
+			var b bytes.Buffer
+			w := gzip.NewWriter(&b)
+			w.Write(blob)
+			w.Close()
+			blob = b.Bytes()
+		}
+
+		if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
+			log.Warnw("problem-storing-revision-config",
+				log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+		} else {
+			log.Debugw("storing-revision-config", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			pr.isStored = true
+		}
+	}
+}
+
+func (pr *PersistedRevision) SetupWatch(key string) {
+	if pr.events == nil {
+		pr.events = make(chan *kvstore.Event)
+
+		log.Debugw("setting-watch", log.Fields{"key": key, "revision-hash": pr.GetHash()})
+
+		pr.SetName(key)
+		pr.events = pr.kvStore.CreateWatch(key)
+
+		pr.isWatched = true
+
+		// Start watching
+		go pr.startWatching()
+	}
+}
+
+func (pr *PersistedRevision) updateInMemory(data interface{}) {
+	pr.mutex.Lock()
+	defer pr.mutex.Unlock()
+
+	var pac *proxyAccessControl
+	var pathLock string
+
+	//
+	// If a proxy exists for this revision, use it to lock access to the path
+	// and prevent simultaneous updates to the object in memory
+	//
+	if pr.GetNode().GetProxy() != nil {
+		pathLock, _ = pr.GetNode().GetProxy().parseForControlledPath(pr.GetNode().GetProxy().getFullPath())
+
+		// If the proxy already has a request in progress, then there is no need to process the watch
+		log.Debugw("update-in-memory--checking-pathlock", log.Fields{"key": pr.GetHash(), "pathLock": pathLock})
+		if PAC().IsReserved(pathLock) {
+			switch pr.GetNode().GetRoot().GetProxy().Operation {
+			case PROXY_ADD:
+				fallthrough
+			case PROXY_REMOVE:
+				fallthrough
+			case PROXY_UPDATE:
+				log.Debugw("update-in-memory--skipping", log.Fields{"key": pr.GetHash(), "path": pr.GetNode().GetProxy().getFullPath()})
+				return
+			default:
+				log.Debugw("update-in-memory--operation", log.Fields{"operation": pr.GetNode().GetRoot().GetProxy().Operation})
+			}
+		} else {
+			log.Debugw("update-in-memory--path-not-locked", log.Fields{"key": pr.GetHash(), "path": pr.GetNode().GetProxy().getFullPath()})
+		}
+
+		log.Debugw("update-in-memory--reserve-and-lock", log.Fields{"key": pr.GetHash(), "path": pathLock})
+
+		pac = PAC().ReservePath(pr.GetNode().GetProxy().getFullPath(), pr.GetNode().GetProxy(), pathLock)
+		pac.SetProxy(pr.GetNode().GetProxy())
+		pac.lock()
+
+		defer log.Debugw("update-in-memory--release-and-unlock", log.Fields{"key": pr.GetHash(), "path": pathLock})
+		defer pac.unlock()
+		defer PAC().ReleasePath(pathLock)
+	}
+
+	//
+	// Update the object in memory through a transaction
+	// This will allow for the object to be subsequently merged with any changes
+	// that might have occurred in memory
+	//
+
+	log.Debugw("update-in-memory--custom-transaction", log.Fields{"key": pr.GetHash()})
+
+	// Prepare the transaction
+	branch := pr.GetBranch()
+	latest := branch.GetLatest()
+	txidBin, _ := uuid.New().MarshalBinary()
+	txid := hex.EncodeToString(txidBin)[:12]
+
+	makeBranch := func(node *node) *Branch {
+		return node.MakeBranch(txid)
+	}
+
+	// Apply the update in a transaction branch
+	updatedRev := latest.GetNode().Update("", data, false, txid, makeBranch)
+	updatedRev.SetName(latest.GetName())
+
+	// Merge the transaction branch in memory
+	if mergedRev, _ := latest.GetNode().MergeBranch(txid, false); mergedRev != nil {
+		branch.SetLatest(mergedRev)
+	}
+}
+
+func (pr *PersistedRevision) startWatching() {
+	log.Debugw("starting-watch", log.Fields{"key": pr.GetHash()})
+
+StopWatchLoop:
+	for {
+		select {
+		case event, ok := <-pr.events:
+			if !ok {
+				log.Errorw("event-channel-failure: stopping watch loop", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+				break StopWatchLoop
+			}
+
+			log.Debugw("received-event", log.Fields{"type": event.EventType, "watch": pr.GetName()})
+
+			switch event.EventType {
+			case kvstore.DELETE:
+				log.Debugw("delete-from-memory", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+				pr.Revision.Drop("", true)
+				break StopWatchLoop
+
+			case kvstore.PUT:
+				log.Debugw("update-in-memory", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+
+				if dataPair, err := pr.kvStore.Get(pr.GetName()); err != nil || dataPair == nil {
+					log.Errorw("update-in-memory--key-retrieval-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
+				} else {
+					data := reflect.New(reflect.TypeOf(pr.GetData()).Elem())
+
+					if err := proto.Unmarshal(dataPair.Value.([]byte), data.Interface().(proto.Message)); err != nil {
+						log.Errorw("update-in-memory--unmarshal-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
+					} else {
+						pr.updateInMemory(data.Interface())
+					}
+				}
+
+			default:
+				log.Debugw("unhandled-event", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "type": event.EventType})
+			}
+		}
+	}
+
+	log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+}
+
+func (pr *PersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+	log.Debugw("loading-from-persistence", log.Fields{"path": path, "txid": txid})
+
+	var response []Revision
+	var rev Revision
+
+	rev = pr
+
+	if pr.kvStore != nil && path != "" {
+		blobMap, _ := pr.kvStore.List(path)
+
+		partition := strings.SplitN(path, "/", 2)
+		name := partition[0]
+
+		if len(partition) < 2 {
+			path = ""
+		} else {
+			path = partition[1]
+		}
+
+		field := ChildrenFields(rev.GetBranch().Node.Type)[name]
+
+		if field != nil && field.IsContainer {
+			var children []Revision
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+			existChildMap := make(map[string]int)
+			for i, child := range rev.GetChildren(name) {
+				existChildMap[child.GetHash()] = i
+			}
+
+			for _, blob := range blobMap {
+				output := blob.Value.([]byte)
+
+				data := reflect.New(field.ClassType.Elem())
+
+				if err := proto.Unmarshal(output, data.Interface().(proto.Message)); err != nil {
+					log.Errorw(
+						"loading-from-persistence--failed-to-unmarshal",
+						log.Fields{"path": path, "txid": txid, "error": err},
+					)
+				} else if field.Key != "" {
+					var key reflect.Value
+					var keyValue interface{}
+					var keyStr string
+
+					if path == "" {
+						// e.g. /logical_devices --> path="" name=logical_devices key=""
+						_, key = GetAttributeValue(data.Interface(), field.Key, 0)
+						keyStr = key.String()
+
+					} else {
+						// e.g.
+						// /logical_devices/abcde --> path="abcde" name=logical_devices
+						// /logical_devices/abcde/image_downloads --> path="abcde/image_downloads" name=logical_devices
+
+						partition := strings.SplitN(path, "/", 2)
+						key := partition[0]
+						if len(partition) < 2 {
+							path = ""
+						} else {
+							path = partition[1]
+						}
+						keyValue = field.KeyFromStr(key)
+						keyStr = keyValue.(string)
+
+						if idx, childRev := rev.GetBranch().Node.findRevByKey(children, field.Key, keyValue); childRev != nil {
+							// Key is memory, continue recursing path
+							if newChildRev := childRev.LoadFromPersistence(path, txid); newChildRev != nil {
+								children[idx] = newChildRev[0]
+
+								rev := rev.UpdateChildren(name, rev.GetChildren(name), rev.GetBranch())
+								rev.GetBranch().Node.makeLatest(rev.GetBranch(), rev, nil)
+
+								response = append(response, newChildRev[0])
+								continue
+							}
+						}
+					}
+
+					childRev := rev.GetBranch().Node.MakeNode(data.Interface(), txid).Latest(txid)
+					childRev.SetName(name + "/" + keyStr)
+
+					// Do not process a child that is already in memory
+					if _, childExists := existChildMap[childRev.GetHash()]; !childExists {
+						// Create watch for <component>/<key>
+						childRev.SetupWatch(childRev.GetName())
+
+						children = append(children, childRev)
+						rev = rev.UpdateChildren(name, children, rev.GetBranch())
+
+						rev.GetBranch().Node.makeLatest(rev.GetBranch(), rev, nil)
+					}
+					response = append(response, childRev)
+					continue
+				}
+			}
+		}
+	}
+
+	return response
+}
+
+// UpdateData modifies the information in the data model and saves it in the persistent storage
+func (pr *PersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+	log.Debugw("updating-persisted-data", log.Fields{"hash": pr.GetHash()})
+
+	newNPR := pr.Revision.UpdateData(data, branch)
+
+	newPR := &PersistedRevision{
+		Revision: newNPR,
+		Compress: pr.Compress,
+		kvStore:  pr.kvStore,
+	}
+
+	return newPR
+}
+
+// UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
+func (pr *PersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+	log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
+
+	newNPR := pr.Revision.UpdateChildren(name, children, branch)
+
+	newPR := &PersistedRevision{
+		Revision: newNPR,
+		Compress: pr.Compress,
+		kvStore:  pr.kvStore,
+	}
+
+	return newPR
+}
+
+// UpdateAllChildren modifies the children for all components of a revision and saves it in the peristent storage
+func (pr *PersistedRevision) UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision {
+	log.Debugw("updating-all-persisted-children", log.Fields{"hash": pr.GetHash()})
+
+	newNPR := pr.Revision.UpdateAllChildren(children, branch)
+
+	newPR := &PersistedRevision{
+		Revision: newNPR,
+		Compress: pr.Compress,
+		kvStore:  pr.kvStore,
+	}
+
+	return newPR
+}
+
+// Drop takes care of eliminating a revision hash that is no longer needed
+// and its associated config when required
+func (pr *PersistedRevision) Drop(txid string, includeConfig bool) {
+	pr.Revision.Drop(txid, includeConfig)
+}
+
+// Drop takes care of eliminating a revision hash that is no longer needed
+// and its associated config when required
+func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
+	log.Debugw("dropping-revision",
+		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "stack": string(debug.Stack())})
+
+	pr.mutex.Lock()
+	defer pr.mutex.Unlock()
+	if pr.kvStore != nil && txid == "" {
+		if pr.isStored {
+			if pr.isWatched {
+				pr.kvStore.DeleteWatch(pr.GetName(), pr.events)
+				pr.isWatched = false
+			}
+
+			if err := pr.kvStore.Delete(pr.GetName()); err != nil {
+				log.Errorw("failed-to-remove-revision", log.Fields{"hash": pr.GetHash(), "error": err.Error()})
+			} else {
+				pr.isStored = false
+			}
+		}
+
+	} else {
+		if includeConfig {
+			log.Debugw("attempted-to-remove-transacted-revision-config", log.Fields{"hash": pr.GetConfig().Hash, "txid": txid})
+		}
+		log.Debugw("attempted-to-remove-transacted-revision", log.Fields{"hash": pr.GetHash(), "txid": txid})
+	}
+
+	pr.Revision.Drop(txid, includeConfig)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/profiling.go b/vendor/github.com/opencord/voltha-go/db/model/profiling.go
new file mode 100644
index 0000000..874b035
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/profiling.go
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"sync"
+)
+
+// Profiling is used to store performance details collected at runtime
+type profiling struct {
+	sync.RWMutex
+	DatabaseRetrieveTime  float64
+	DatabaseRetrieveCount int
+	InMemoryModelTime     float64
+	InMemoryModelCount    int
+	InMemoryProcessTime   float64
+	DatabaseStoreTime     float64
+	InMemoryLockTime      float64
+	InMemoryLockCount     int
+}
+
+var profilingInstance *profiling
+var profilingOnce sync.Once
+
+// GetProfiling returns a singleton instance of the Profiling structure
+func GetProfiling() *profiling {
+	profilingOnce.Do(func() {
+		profilingInstance = &profiling{}
+	})
+	return profilingInstance
+}
+
+// AddToDatabaseRetrieveTime appends a time period to retrieve data from the database
+func (p *profiling) AddToDatabaseRetrieveTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.DatabaseRetrieveTime += period
+	p.DatabaseRetrieveCount++
+}
+
+// AddToInMemoryModelTime appends a time period to construct/deconstruct data in memory
+func (p *profiling) AddToInMemoryModelTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.InMemoryModelTime += period
+	p.InMemoryModelCount++
+}
+
+// AddToInMemoryProcessTime appends a time period to process data
+func (p *profiling) AddToInMemoryProcessTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.InMemoryProcessTime += period
+}
+
+// AddToDatabaseStoreTime appends a time period to store data in the database
+func (p *profiling) AddToDatabaseStoreTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.DatabaseStoreTime += period
+}
+
+// AddToInMemoryLockTime appends a time period when a code block was locked
+func (p *profiling) AddToInMemoryLockTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.InMemoryLockTime += period
+	p.InMemoryLockCount++
+}
+
+// Reset initializes the profile counters
+func (p *profiling) Reset() {
+	p.Lock()
+	defer p.Unlock()
+
+	p.DatabaseRetrieveTime = 0
+	p.DatabaseRetrieveCount = 0
+	p.InMemoryModelTime = 0
+	p.InMemoryModelCount = 0
+	p.InMemoryProcessTime = 0
+	p.DatabaseStoreTime = 0
+	p.InMemoryLockTime = 0
+	p.InMemoryLockCount = 0
+}
+
+// Report will provide the current profile counter status
+func (p *profiling) Report() {
+	p.Lock()
+	defer p.Unlock()
+
+	log.Infof("[ Profiling Report ]")
+	log.Infof("Database Retrieval : %f", p.DatabaseRetrieveTime)
+	log.Infof("Database Retrieval Count : %d", p.DatabaseRetrieveCount)
+	log.Infof("Avg Database Retrieval : %f", p.DatabaseRetrieveTime/float64(p.DatabaseRetrieveCount))
+	log.Infof("In-Memory Modeling : %f", p.InMemoryModelTime)
+	log.Infof("In-Memory Modeling Count: %d", p.InMemoryModelCount)
+	log.Infof("Avg In-Memory Modeling : %f", p.InMemoryModelTime/float64(p.InMemoryModelCount))
+	log.Infof("In-Memory Locking : %f", p.InMemoryLockTime)
+	log.Infof("In-Memory Locking Count: %d", p.InMemoryLockCount)
+	log.Infof("Avg In-Memory Locking : %f", p.InMemoryLockTime/float64(p.InMemoryLockCount))
+
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/proxy.go b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
new file mode 100644
index 0000000..86d426a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"crypto/md5"
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+)
+
+// OperationContext holds details on the information used during an operation
+type OperationContext struct {
+	Path      string
+	Data      interface{}
+	FieldName string
+	ChildKey  string
+}
+
+// NewOperationContext instantiates a new OperationContext structure
+func NewOperationContext(path string, data interface{}, fieldName string, childKey string) *OperationContext {
+	oc := &OperationContext{
+		Path:      path,
+		Data:      data,
+		FieldName: fieldName,
+		ChildKey:  childKey,
+	}
+	return oc
+}
+
+// Update applies new data to the context structure
+func (oc *OperationContext) Update(data interface{}) *OperationContext {
+	oc.Data = data
+	return oc
+}
+
+// Proxy holds the information for a specific location with the data model
+type Proxy struct {
+	sync.RWMutex
+	Root       *root
+	Node       *node
+	ParentNode *node
+	Path       string
+	FullPath   string
+	Exclusive  bool
+	Callbacks  map[CallbackType]map[string]*CallbackTuple
+	Operation  ProxyOperation
+}
+
+// NewProxy instantiates a new proxy to a specific location
+func NewProxy(root *root, node *node, parentNode *node, path string, fullPath string, exclusive bool) *Proxy {
+	callbacks := make(map[CallbackType]map[string]*CallbackTuple)
+	if fullPath == "/" {
+		fullPath = ""
+	}
+	p := &Proxy{
+		Root:       root,
+		Node:       node,
+		ParentNode: parentNode,
+		Exclusive:  exclusive,
+		Path:       path,
+		FullPath:   fullPath,
+		Callbacks:  callbacks,
+	}
+	return p
+}
+
+// GetRoot returns the root attribute of the proxy
+func (p *Proxy) GetRoot() *root {
+	return p.Root
+}
+
+// getPath returns the path attribute of the proxy
+func (p *Proxy) getPath() string {
+	return p.Path
+}
+
+// getFullPath returns the full path attribute of the proxy
+func (p *Proxy) getFullPath() string {
+	return p.FullPath
+}
+
+// getCallbacks returns the full list of callbacks associated to the proxy
+func (p *Proxy) getCallbacks(callbackType CallbackType) map[string]*CallbackTuple {
+	if cb, exists := p.Callbacks[callbackType]; exists {
+		return cb
+	}
+	return nil
+}
+
+// getCallback returns a specific callback matching the type and function hash
+func (p *Proxy) getCallback(callbackType CallbackType, funcHash string) *CallbackTuple {
+	p.Lock()
+	defer p.Unlock()
+	if tuple, exists := p.Callbacks[callbackType][funcHash]; exists {
+		return tuple
+	}
+	return nil
+}
+
+// setCallbacks applies a callbacks list to a type
+func (p *Proxy) setCallbacks(callbackType CallbackType, callbacks map[string]*CallbackTuple) {
+	p.Lock()
+	defer p.Unlock()
+	p.Callbacks[callbackType] = callbacks
+}
+
+// setCallback applies a callback to a type and hash value
+func (p *Proxy) setCallback(callbackType CallbackType, funcHash string, tuple *CallbackTuple) {
+	p.Lock()
+	defer p.Unlock()
+	p.Callbacks[callbackType][funcHash] = tuple
+}
+
+// DeleteCallback removes a callback matching the type and hash
+func (p *Proxy) DeleteCallback(callbackType CallbackType, funcHash string) {
+	p.Lock()
+	defer p.Unlock()
+	delete(p.Callbacks[callbackType], funcHash)
+}
+
+// CallbackType is an enumerated value to express when a callback should be executed
+type ProxyOperation uint8
+
+// Enumerated list of callback types
+const (
+	PROXY_GET ProxyOperation = iota
+	PROXY_LIST
+	PROXY_ADD
+	PROXY_UPDATE
+	PROXY_REMOVE
+)
+
+// parseForControlledPath verifies if a proxy path matches a pattern
+// for locations that need to be access controlled.
+func (p *Proxy) parseForControlledPath(path string) (pathLock string, controlled bool) {
+	// TODO: Add other path prefixes that may need control
+	if strings.HasPrefix(path, "/devices") ||
+		strings.HasPrefix(path, "/logical_devices") ||
+		strings.HasPrefix(path, "/adapters") {
+
+		split := strings.SplitN(path, "/", -1)
+		switch len(split) {
+		case 2:
+			controlled = false
+			pathLock = ""
+			break
+		case 3:
+			fallthrough
+		default:
+			pathLock = fmt.Sprintf("%s/%s", split[1], split[2])
+			controlled = true
+		}
+	}
+	return pathLock, controlled
+}
+
+// List will retrieve information from the data model at the specified path location
+// A list operation will force access to persistence storage
+func (p *Proxy) List(path string, depth int, deep bool, txid string) interface{} {
+	var effectivePath string
+	if path == "/" {
+		effectivePath = p.getFullPath()
+	} else {
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+	pac.SetProxy(p)
+
+	rv := pac.List(path, depth, deep, txid, controlled)
+
+	return rv
+}
+
+// Get will retrieve information from the data model at the specified path location
+func (p *Proxy) Get(path string, depth int, deep bool, txid string) interface{} {
+	var effectivePath string
+	if path == "/" {
+		effectivePath = p.getFullPath()
+	} else {
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+	pac.SetProxy(p)
+
+	rv := pac.Get(path, depth, deep, txid, controlled)
+
+	return rv
+}
+
+// Update will modify information in the data model at the specified location with the provided data
+func (p *Proxy) Update(path string, data interface{}, strict bool, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s, Controlled: %b", path, effectivePath, fullPath, pathLock, controlled)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_UPDATE
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--update", log.Fields{"operation":p.Operation})
+
+	return pac.Update(fullPath, data, strict, txid, controlled)
+}
+
+// AddWithID will insert new data at specified location.
+// This method also allows the user to specify the ID of the data entry to ensure
+// that access control is active while inserting the information.
+func (p *Proxy) AddWithID(path string, id string, data interface{}, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path + "/" + id
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+
+	pac := PAC().ReservePath(path, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_ADD
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--add", log.Fields{"operation":p.Operation})
+
+	return pac.Add(fullPath, data, txid, controlled)
+}
+
+// Add will insert new data at specified location.
+func (p *Proxy) Add(path string, data interface{}, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+
+	pac := PAC().ReservePath(path, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_ADD
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--add", log.Fields{"operation":p.Operation})
+
+	return pac.Add(fullPath, data, txid, controlled)
+}
+
+// Remove will delete an entry at the specified location
+func (p *Proxy) Remove(path string, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_REMOVE
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--remove", log.Fields{"operation":p.Operation})
+
+	return pac.Remove(fullPath, txid, controlled)
+}
+
+// OpenTransaction creates a new transaction branch to isolate operations made to the data model
+func (p *Proxy) OpenTransaction() *Transaction {
+	txid := p.GetRoot().MakeTxBranch()
+	return NewTransaction(p, txid)
+}
+
+// commitTransaction will apply and merge modifications made in the transaction branch to the data model
+func (p *Proxy) commitTransaction(txid string) {
+	p.GetRoot().FoldTxBranch(txid)
+}
+
+// cancelTransaction will terminate a transaction branch along will all changes within it
+func (p *Proxy) cancelTransaction(txid string) {
+	p.GetRoot().DeleteTxBranch(txid)
+}
+
+// CallbackFunction is a type used to define callback functions
+type CallbackFunction func(args ...interface{}) interface{}
+
+// CallbackTuple holds the function and arguments details of a callback
+type CallbackTuple struct {
+	callback CallbackFunction
+	args     []interface{}
+}
+
+// Execute will process the a callback with its provided arguments
+func (tuple *CallbackTuple) Execute(contextArgs []interface{}) interface{} {
+	args := []interface{}{}
+
+	for _, ta := range tuple.args {
+		args = append(args, ta)
+	}
+
+	if contextArgs != nil {
+		for _, ca := range contextArgs {
+			args = append(args, ca)
+		}
+	}
+
+	return tuple.callback(args...)
+}
+
+// RegisterCallback associates a callback to the proxy
+func (p *Proxy) RegisterCallback(callbackType CallbackType, callback CallbackFunction, args ...interface{}) {
+	if p.getCallbacks(callbackType) == nil {
+		p.setCallbacks(callbackType, make(map[string]*CallbackTuple))
+	}
+	funcName := runtime.FuncForPC(reflect.ValueOf(callback).Pointer()).Name()
+	log.Debugf("value of function: %s", funcName)
+	funcHash := fmt.Sprintf("%x", md5.Sum([]byte(funcName)))[:12]
+
+	p.setCallback(callbackType, funcHash, &CallbackTuple{callback, args})
+}
+
+// UnregisterCallback removes references to a callback within a proxy
+func (p *Proxy) UnregisterCallback(callbackType CallbackType, callback CallbackFunction, args ...interface{}) {
+	if p.getCallbacks(callbackType) == nil {
+		log.Errorf("no such callback type - %s", callbackType.String())
+		return
+	}
+
+	funcName := runtime.FuncForPC(reflect.ValueOf(callback).Pointer()).Name()
+	funcHash := fmt.Sprintf("%x", md5.Sum([]byte(funcName)))[:12]
+
+	log.Debugf("value of function: %s", funcName)
+
+	if p.getCallback(callbackType, funcHash) == nil {
+		log.Errorf("function with hash value: '%s' not registered with callback type: '%s'", funcHash, callbackType)
+		return
+	}
+
+	p.DeleteCallback(callbackType, funcHash)
+}
+
+func (p *Proxy) invoke(callback *CallbackTuple, context []interface{}) (result interface{}, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			errStr := fmt.Sprintf("callback error occurred: %+v", r)
+			err = errors.New(errStr)
+			log.Error(errStr)
+		}
+	}()
+
+	result = callback.Execute(context)
+
+	return result, err
+}
+
+// InvokeCallbacks executes all callbacks associated to a specific type
+func (p *Proxy) InvokeCallbacks(args ...interface{}) (result interface{}) {
+	callbackType := args[0].(CallbackType)
+	proceedOnError := args[1].(bool)
+	context := args[2:]
+
+	var err error
+
+	if callbacks := p.getCallbacks(callbackType); callbacks != nil {
+		p.Lock()
+		for _, callback := range callbacks {
+			if result, err = p.invoke(callback, context); err != nil {
+				if !proceedOnError {
+					log.Info("An error occurred.  Stopping callback invocation")
+					break
+				}
+				log.Info("An error occurred.  Invoking next callback")
+			}
+		}
+		p.Unlock()
+	}
+
+	return result
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/proxy_access_control.go b/vendor/github.com/opencord/voltha-go/db/model/proxy_access_control.go
new file mode 100644
index 0000000..66d3222
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/proxy_access_control.go
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"sync"
+	"time"
+)
+
+type singletonProxyAccessControl struct {
+	sync.RWMutex
+	cache         sync.Map
+	reservedCount int
+}
+
+var instanceProxyAccessControl *singletonProxyAccessControl
+var onceProxyAccessControl sync.Once
+
+// PAC provides access to the proxy access control singleton instance
+func PAC() *singletonProxyAccessControl {
+	onceProxyAccessControl.Do(func() {
+		instanceProxyAccessControl = &singletonProxyAccessControl{}
+	})
+	return instanceProxyAccessControl
+}
+
+// IsReserved will verify if access control is active for a specific path within the model
+func (singleton *singletonProxyAccessControl) IsReserved(pathLock string) bool {
+	singleton.Lock()
+	defer singleton.Unlock()
+
+	_, exists := singleton.cache.Load(pathLock)
+	log.Debugw("is-reserved", log.Fields{"pathLock": pathLock, "exists": exists})
+
+	return exists
+}
+
+// ReservePath will apply access control for a specific path within the model
+func (singleton *singletonProxyAccessControl) ReservePath(path string, proxy *Proxy, pathLock string) *proxyAccessControl {
+	singleton.Lock()
+	defer singleton.Unlock()
+	singleton.reservedCount++
+	if pac, exists := singleton.cache.Load(pathLock); !exists {
+		log.Debugf("Creating new PAC entry for path:%s pathLock:%s", path, pathLock)
+		newPac := NewProxyAccessControl(proxy, pathLock)
+		singleton.cache.Store(pathLock, newPac)
+		return newPac
+	} else {
+		log.Debugf("Re-using existing PAC entry for path:%s pathLock:%s", path, pathLock)
+		return pac.(*proxyAccessControl)
+	}
+}
+
+// ReleasePath will remove access control for a specific path within the model
+func (singleton *singletonProxyAccessControl) ReleasePath(pathLock string) {
+	singleton.Lock()
+	defer singleton.Unlock()
+
+	singleton.reservedCount--
+
+	if singleton.reservedCount == 0 {
+		singleton.cache.Delete(pathLock)
+	}
+}
+
+// ProxyAccessControl is the abstraction interface to the base proxyAccessControl structure
+type ProxyAccessControl interface {
+	Get(path string, depth int, deep bool, txid string, control bool) interface{}
+	Update(path string, data interface{}, strict bool, txid string, control bool) interface{}
+	Add(path string, data interface{}, txid string, control bool) interface{}
+	Remove(path string, txid string, control bool) interface{}
+	SetProxy(proxy *Proxy)
+}
+
+// proxyAccessControl holds details of the path and proxy that requires access control
+type proxyAccessControl struct {
+	sync.RWMutex
+	Proxy    *Proxy
+	PathLock chan struct{}
+	Path     string
+
+	start time.Time
+	stop  time.Time
+}
+
+// NewProxyAccessControl creates a new instance of an access control structure
+func NewProxyAccessControl(proxy *Proxy, path string) *proxyAccessControl {
+	return &proxyAccessControl{
+		Proxy:    proxy,
+		Path:     path,
+		PathLock: make(chan struct{}, 1),
+	}
+}
+
+// lock will prevent access to a model path
+func (pac *proxyAccessControl) lock() {
+	pac.PathLock <- struct{}{}
+	pac.setStart(time.Now())
+}
+
+// unlock will release control of a model path
+func (pac *proxyAccessControl) unlock() {
+	<-pac.PathLock
+	pac.setStop(time.Now())
+	GetProfiling().AddToInMemoryLockTime(pac.getStop().Sub(pac.getStart()).Seconds())
+}
+
+// getStart is used for profiling purposes and returns the time at which access control was applied
+func (pac *proxyAccessControl) getStart() time.Time {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.start
+}
+
+// getStart is used for profiling purposes and returns the time at which access control was removed
+func (pac *proxyAccessControl) getStop() time.Time {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.stop
+}
+
+// getPath returns the access controlled path
+func (pac *proxyAccessControl) getPath() string {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.Path
+}
+
+// getProxy returns the proxy used to reach a specific location in the data model
+func (pac *proxyAccessControl) getProxy() *Proxy {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.Proxy
+}
+
+// setStart is for profiling purposes and applies a start time value at which access control was started
+func (pac *proxyAccessControl) setStart(time time.Time) {
+	pac.Lock()
+	defer pac.Unlock()
+	pac.start = time
+}
+
+// setStop is for profiling purposes and applies a stop time value at which access control was stopped
+func (pac *proxyAccessControl) setStop(time time.Time) {
+	pac.Lock()
+	defer pac.Unlock()
+	pac.stop = time
+}
+
+// SetProxy is used to changed the proxy object of an access controlled path
+func (pac *proxyAccessControl) SetProxy(proxy *Proxy) {
+	pac.Lock()
+	defer pac.Unlock()
+	pac.Proxy = proxy
+}
+
+// List retrieves data linked to a data model path
+func (pac *proxyAccessControl) List(path string, depth int, deep bool, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	// FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
+	// The data traversal through reflection currently corrupts the content
+
+	return pac.getProxy().GetRoot().List(path, "", depth, deep, txid)
+}
+
+// Get retrieves data linked to a data model path
+func (pac *proxyAccessControl) Get(path string, depth int, deep bool, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	// FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
+	// The data traversal through reflection currently corrupts the content
+	return pac.getProxy().GetRoot().Get(path, "", 0, deep, txid)
+}
+
+// Update changes the content of the data model at the specified location with the provided data
+func (pac *proxyAccessControl) Update(path string, data interface{}, strict bool, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	result := pac.getProxy().GetRoot().Update(path, data, strict, txid, nil)
+
+	if result != nil {
+		return result.GetData()
+	}
+	return nil
+}
+
+// Add creates a new data model entry at the specified location with the provided data
+func (pac *proxyAccessControl) Add(path string, data interface{}, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--add", log.Fields{"path": path, "fullPath": pac.Path})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--add", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	result := pac.getProxy().GetRoot().Add(path, data, txid, nil)
+
+	if result != nil {
+		return result.GetData()
+	}
+	return nil
+}
+
+// Remove discards information linked to the data model path
+func (pac *proxyAccessControl) Remove(path string, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	return pac.getProxy().GetRoot().Remove(path, txid, nil)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/revision.go b/vendor/github.com/opencord/voltha-go/db/model/revision.go
new file mode 100644
index 0000000..2c10137
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/revision.go
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+type Revision interface {
+	Finalize(bool)
+	SetConfig(revision *DataRevision)
+	GetConfig() *DataRevision
+	Drop(txid string, includeConfig bool)
+	StorageDrop(txid string, includeConfig bool)
+	SetChildren(name string, children []Revision)
+	GetChildren(name string) []Revision
+	SetAllChildren(children map[string][]Revision)
+	GetAllChildren() map[string][]Revision
+	SetHash(hash string)
+	GetHash() string
+	ClearHash()
+	SetupWatch(key string)
+	SetName(name string)
+	GetName() string
+	SetBranch(branch *Branch)
+	GetBranch() *Branch
+	Get(int) interface{}
+	GetData() interface{}
+	GetNode() *node
+	LoadFromPersistence(path string, txid string) []Revision
+	UpdateData(data interface{}, branch *Branch) Revision
+	UpdateChildren(name string, children []Revision, branch *Branch) Revision
+	UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/root.go b/vendor/github.com/opencord/voltha-go/db/model/root.go
new file mode 100644
index 0000000..8f9e001
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/root.go
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"encoding/hex"
+	"encoding/json"
+	"github.com/golang/protobuf/proto"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"sync"
+)
+
+// Root is used to provide an abstraction to the base root structure
+type Root interface {
+	Node
+
+	ExecuteCallbacks()
+	AddCallback(callback CallbackFunction, args ...interface{})
+	AddNotificationCallback(callback CallbackFunction, args ...interface{})
+}
+
+// root points to the top of the data model tree or sub-tree identified by a proxy
+type root struct {
+	*node
+
+	Callbacks             []CallbackTuple
+	NotificationCallbacks []CallbackTuple
+
+	DirtyNodes    map[string][]*node
+	KvStore       *Backend
+	Loading       bool
+	RevisionClass interface{}
+
+	mutex sync.RWMutex
+}
+
+// NewRoot creates an new instance of a root object
+func NewRoot(initialData interface{}, kvStore *Backend) *root {
+	root := &root{}
+
+	root.KvStore = kvStore
+	root.DirtyNodes = make(map[string][]*node)
+	root.Loading = false
+
+	// If there is no storage in place just revert to
+	// a non persistent mechanism
+	if kvStore != nil {
+		root.RevisionClass = reflect.TypeOf(PersistedRevision{})
+	} else {
+		root.RevisionClass = reflect.TypeOf(NonPersistedRevision{})
+	}
+
+	root.Callbacks = []CallbackTuple{}
+	root.NotificationCallbacks = []CallbackTuple{}
+
+	root.node = NewNode(root, initialData, false, "")
+
+	return root
+}
+
+// MakeTxBranch creates a new transaction branch
+func (r *root) MakeTxBranch() string {
+	txidBin, _ := uuid.New().MarshalBinary()
+	txid := hex.EncodeToString(txidBin)[:12]
+
+	r.DirtyNodes[txid] = []*node{r.node}
+	r.node.MakeBranch(txid)
+
+	return txid
+}
+
+// DeleteTxBranch removes a transaction branch
+func (r *root) DeleteTxBranch(txid string) {
+	for _, dirtyNode := range r.DirtyNodes[txid] {
+		dirtyNode.DeleteBranch(txid)
+	}
+	delete(r.DirtyNodes, txid)
+	delete(r.node.Branches, txid)
+}
+
+// FoldTxBranch will merge the contents of a transaction branch with the root object
+func (r *root) FoldTxBranch(txid string) {
+	// Start by doing a dry run of the merge
+	// If that fails, it bails out and the branch is deleted
+	if _, err := r.node.MergeBranch(txid, true); err != nil {
+		// Merge operation fails
+		r.DeleteTxBranch(txid)
+	} else {
+		r.node.MergeBranch(txid, false)
+		r.ExecuteCallbacks()
+		r.DeleteTxBranch(txid)
+	}
+}
+
+// ExecuteCallbacks will invoke all the callbacks linked to root object
+func (r *root) ExecuteCallbacks() {
+	r.mutex.Lock()
+	log.Debugf("ExecuteCallbacks has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("ExecuteCallbacks released the ROOT lock : %+v", r)
+	for len(r.Callbacks) > 0 {
+		callback := r.Callbacks[0]
+		r.Callbacks = r.Callbacks[1:]
+		go callback.Execute(nil)
+	}
+	for len(r.NotificationCallbacks) > 0 {
+		callback := r.NotificationCallbacks[0]
+		r.NotificationCallbacks = r.NotificationCallbacks[1:]
+		go callback.Execute(nil)
+	}
+}
+
+func (r *root) hasCallbacks() bool {
+	return len(r.Callbacks) == 0
+}
+
+// getCallbacks returns the available callbacks
+func (r *root) GetCallbacks() []CallbackTuple {
+	r.mutex.Lock()
+	log.Debugf("getCallbacks has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("getCallbacks released the ROOT lock : %+v", r)
+	return r.Callbacks
+}
+
+// getCallbacks returns the available notification callbacks
+func (r *root) GetNotificationCallbacks() []CallbackTuple {
+	r.mutex.Lock()
+	log.Debugf("GetNotificationCallbacks has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("GetNotificationCallbacks released the ROOT lock : %+v", r)
+	return r.NotificationCallbacks
+}
+
+// AddCallback inserts a new callback with its arguments
+func (r *root) AddCallback(callback CallbackFunction, args ...interface{}) {
+	r.mutex.Lock()
+	log.Debugf("AddCallback has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("AddCallback released the ROOT lock : %+v", r)
+	r.Callbacks = append(r.Callbacks, CallbackTuple{callback, args})
+}
+
+// AddNotificationCallback inserts a new notification callback with its arguments
+func (r *root) AddNotificationCallback(callback CallbackFunction, args ...interface{}) {
+	r.mutex.Lock()
+	log.Debugf("AddNotificationCallback has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("AddNotificationCallback released the ROOT lock : %+v", r)
+	r.NotificationCallbacks = append(r.NotificationCallbacks, CallbackTuple{callback, args})
+}
+
+func (r *root) syncParent(childRev Revision, txid string) {
+	data := proto.Clone(r.Proxy.ParentNode.Latest().GetData().(proto.Message))
+
+	for fieldName, _ := range ChildrenFields(data) {
+		childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
+		if reflect.TypeOf(childRev.GetData()) == reflect.TypeOf(childDataHolder.Interface()) {
+			childDataHolder = reflect.ValueOf(childRev.GetData())
+			reflect.ValueOf(data).Elem().FieldByName(childDataName).Set(childDataHolder)
+		}
+	}
+
+	r.Proxy.ParentNode.Latest().SetConfig(NewDataRevision(r.Proxy.ParentNode.Root, data))
+	r.Proxy.ParentNode.Latest(txid).Finalize(false)
+}
+
+
+// Update modifies the content of an object at a given path with the provided data
+func (r *root) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+	var result Revision
+
+	if makeBranch != nil {
+		// TODO: raise error
+	}
+
+	if r.hasCallbacks() {
+		// TODO: raise error
+	}
+
+	if txid != "" {
+		trackDirty := func(node *node) *Branch {
+			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
+			return node.MakeBranch(txid)
+		}
+		result = r.node.Update(path, data, strict, txid, trackDirty)
+	} else {
+		result = r.node.Update(path, data, strict, "", nil)
+	}
+
+	if result != nil {
+		if r.Proxy.FullPath != r.Proxy.Path {
+			r.syncParent(result, txid)
+		} else {
+			result.Finalize(false)
+		}
+	}
+
+	r.node.GetRoot().ExecuteCallbacks()
+
+	return result
+}
+
+// Add creates a new object at the given path with the provided data
+func (r *root) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+	var result Revision
+
+	if makeBranch != nil {
+		// TODO: raise error
+	}
+
+	if r.hasCallbacks() {
+		// TODO: raise error
+	}
+
+	if txid != "" {
+		trackDirty := func(node *node) *Branch {
+			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
+			return node.MakeBranch(txid)
+		}
+		result = r.node.Add(path, data, txid, trackDirty)
+	} else {
+		result = r.node.Add(path, data, "", nil)
+	}
+
+	if result != nil {
+		result.Finalize(true)
+		r.node.GetRoot().ExecuteCallbacks()
+	}
+	return result
+}
+
+// Remove discards an object at a given path
+func (r *root) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+	var result Revision
+
+	if makeBranch != nil {
+		// TODO: raise error
+	}
+
+	if r.hasCallbacks() {
+		// TODO: raise error
+	}
+
+	if txid != "" {
+		trackDirty := func(node *node) *Branch {
+			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
+			return node.MakeBranch(txid)
+		}
+		result = r.node.Remove(path, txid, trackDirty)
+	} else {
+		result = r.node.Remove(path, "", nil)
+	}
+
+	r.node.GetRoot().ExecuteCallbacks()
+
+	return result
+}
+
+// MakeLatest updates a branch with the latest node revision
+func (r *root) MakeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple) {
+	r.makeLatest(branch, revision, changeAnnouncement)
+}
+
+func (r *root) MakeRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	if r.RevisionClass.(reflect.Type) == reflect.TypeOf(PersistedRevision{}) {
+		return NewPersistedRevision(branch, data, children)
+	}
+
+	return NewNonPersistedRevision(r, branch, data, children)
+}
+
+func (r *root) makeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple) {
+	r.node.makeLatest(branch, revision, changeAnnouncement)
+
+	if r.KvStore != nil && branch.Txid == "" {
+		tags := make(map[string]string)
+		for k, v := range r.node.Tags {
+			tags[k] = v.GetHash()
+		}
+		data := &rootData{
+			Latest: branch.GetLatest().GetHash(),
+			Tags:   tags,
+		}
+		if blob, err := json.Marshal(data); err != nil {
+			// TODO report error
+		} else {
+			log.Debugf("Changing root to : %s", string(blob))
+			if err := r.KvStore.Put("root", blob); err != nil {
+				log.Errorf("failed to properly put value in kvstore - err: %s", err.Error())
+			}
+		}
+	}
+}
+
+type rootData struct {
+	Latest string            `json:latest`
+	Tags   map[string]string `json:tags`
+}
\ No newline at end of file
diff --git a/vendor/github.com/opencord/voltha-go/db/model/transaction.go b/vendor/github.com/opencord/voltha-go/db/model/transaction.go
new file mode 100644
index 0000000..fa8de1d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/transaction.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
+type Transaction struct {
+	proxy *Proxy
+	txid  string
+}
+
+func NewTransaction(proxy *Proxy, txid string) *Transaction {
+	tx := &Transaction{
+		proxy: proxy,
+		txid:  txid,
+	}
+	return tx
+}
+func (t *Transaction) Get(path string, depth int, deep bool) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	// TODO: need to review the return values at the different layers!!!!!
+	return t.proxy.Get(path, depth, deep, t.txid)
+}
+func (t *Transaction) Update(path string, data interface{}, strict bool) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	return t.proxy.Update(path, data, strict, t.txid)
+}
+func (t *Transaction) Add(path string, data interface{}) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	return t.proxy.Add(path, data, t.txid)
+}
+func (t *Transaction) Remove(path string) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	return t.proxy.Remove(path, t.txid)
+}
+func (t *Transaction) Cancel() {
+	t.proxy.cancelTransaction(t.txid)
+	t.txid = ""
+}
+func (t *Transaction) Commit() {
+	t.proxy.commitTransaction(t.txid)
+	t.txid = ""
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/utils.go b/vendor/github.com/opencord/voltha-go/db/model/utils.go
new file mode 100644
index 0000000..f0fd618
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/utils.go
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"bytes"
+	"encoding/gob"
+	"reflect"
+	"strings"
+)
+
+// IsProtoMessage determines if the specified implements proto.Message type
+func IsProtoMessage(object interface{}) bool {
+	var ok = false
+
+	if object != nil {
+		st := reflect.TypeOf(object)
+		_, ok = st.MethodByName("ProtoMessage")
+	}
+	return ok
+}
+
+// FindOwnerType will traverse a data structure and find the parent type of the specified object
+func FindOwnerType(obj reflect.Value, name string, depth int, found bool) reflect.Type {
+ 	prefix := ""
+	for d:=0; d< depth; d++ {
+		prefix += ">>"
+	}
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		if found {
+			return obj.Type()
+		}
+
+		t := obj.Type().Elem()
+		n := reflect.New(t)
+
+		if rc := FindOwnerType(n.Elem(), name, depth+1, found); rc != nil {
+			return rc
+		}
+
+	case reflect.Struct:
+		if found {
+			return obj.Type()
+		}
+
+		for i := 0; i < obj.NumField(); i++ {
+			v := reflect.Indirect(obj)
+
+			json := strings.Split(v.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return FindOwnerType(obj.Field(i), name, depth+1, true)
+			}
+
+			if rc := FindOwnerType(obj.Field(i), name, depth+1, found); rc != nil {
+				return rc
+			}
+		}
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < n.Elem().Len(); i++ {
+			if found {
+				return reflect.ValueOf(n.Elem().Index(i).Interface()).Type()
+			}
+		}
+
+		for i := 0; i < obj.Len(); i++ {
+			if found {
+				return obj.Index(i).Type()
+			}
+
+			if rc := FindOwnerType(obj.Index(i), name, depth+1, found); rc != nil {
+				return rc
+			}
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return nil
+}
+
+// FindKeyOwner will traverse a structure to find the owner type of the specified name
+func FindKeyOwner(iface interface{}, name string, depth int) interface{} {
+	obj := reflect.ValueOf(iface)
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		t := obj.Type().Elem()
+		n := reflect.New(t)
+
+		if rc := FindKeyOwner(n.Elem().Interface(), name, depth+1); rc != nil {
+			return rc
+		}
+
+	case reflect.Struct:
+		for i := 0; i < obj.NumField(); i++ {
+			json := strings.Split(obj.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return obj.Type().Field(i).Type
+			}
+
+			if rc := FindKeyOwner(obj.Field(i).Interface(), name, depth+1); rc != nil {
+				return rc
+			}
+		}
+
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < n.Elem().Len(); i++ {
+			if rc := FindKeyOwner(n.Elem().Index(i).Interface(), name, depth+1); rc != nil {
+				return rc
+			}
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return nil
+}
+
+// GetAttributeValue traverse a structure to find the value of an attribute
+// FIXME: Need to figure out if GetAttributeValue and GetAttributeStructure can become one
+// Code is repeated in both, but outputs have a different purpose
+// Left as-is for now to get things working
+func GetAttributeValue(data interface{}, name string, depth int) (string, reflect.Value) {
+	var attribName string
+	var attribValue reflect.Value
+	obj := reflect.ValueOf(data)
+
+	if !obj.IsValid() {
+		return attribName, attribValue
+	}
+
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		if obj.IsNil() {
+			return attribName, attribValue
+		}
+
+		if attribName, attribValue = GetAttributeValue(obj.Elem().Interface(), name, depth+1); attribValue.IsValid() {
+			return attribName, attribValue
+		}
+
+	case reflect.Struct:
+		for i := 0; i < obj.NumField(); i++ {
+			json := strings.Split(obj.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return obj.Type().Field(i).Name, obj.Field(i)
+			}
+
+			if obj.Field(i).IsValid() {
+				if attribName, attribValue = GetAttributeValue(obj.Field(i).Interface(), name, depth+1); attribValue.IsValid() {
+					return attribName, attribValue
+				}
+			}
+		}
+
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < obj.Len(); i++ {
+			if attribName, attribValue = GetAttributeValue(obj.Index(i).Interface(), name, depth+1); attribValue.IsValid() {
+				return attribName, attribValue
+			}
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return attribName, attribValue
+
+}
+
+// GetAttributeStructure will traverse a structure to find the data structure for the named attribute
+// FIXME: See GetAttributeValue(...) comment
+func GetAttributeStructure(data interface{}, name string, depth int) reflect.StructField {
+	var result reflect.StructField
+	obj := reflect.ValueOf(data)
+
+	if !obj.IsValid() {
+		return result
+	}
+
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		t := obj.Type().Elem()
+		n := reflect.New(t)
+
+		if rc := GetAttributeStructure(n.Elem().Interface(), name, depth+1); rc.Name != "" {
+			return rc
+		}
+
+	case reflect.Struct:
+		for i := 0; i < obj.NumField(); i++ {
+			v := reflect.Indirect(obj)
+			json := strings.Split(obj.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return v.Type().Field(i)
+			}
+
+			if obj.Field(i).IsValid() {
+				if rc := GetAttributeStructure(obj.Field(i).Interface(), name, depth+1); rc.Name != "" {
+					return rc
+				}
+			}
+		}
+
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < obj.Len(); i++ {
+			if rc := GetAttributeStructure(obj.Index(i).Interface(), name, depth+1); rc.Name != "" {
+				return rc
+			}
+
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return result
+
+}
+
+func clone2(a interface{}) interface{} {
+	b := reflect.ValueOf(a)
+	buff := new(bytes.Buffer)
+	enc := gob.NewEncoder(buff)
+	dec := gob.NewDecoder(buff)
+	enc.Encode(a)
+	dec.Decode(b.Elem().Interface())
+
+	return b.Interface()
+}
+
+func clone(a, b interface{}) interface{} {
+	buff := new(bytes.Buffer)
+	enc := gob.NewEncoder(buff)
+	dec := gob.NewDecoder(buff)
+	enc.Encode(a)
+	dec.Decode(b)
+	return b
+}