VOL-1558 Update vendored voltha-go and other items

Result of running dep ensure.  golang openolt now
builds.

Also update dockerfile to used specific alpine version

Change-Id: I1e5407e25bb0636a241a0650d1e44e5df567f44b
diff --git a/vendor/github.com/boljen/go-bitmap/LICENSE b/vendor/github.com/boljen/go-bitmap/LICENSE
new file mode 100644
index 0000000..13cc28c
--- /dev/null
+++ b/vendor/github.com/boljen/go-bitmap/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Bol Christophe
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/boljen/go-bitmap/README.md b/vendor/github.com/boljen/go-bitmap/README.md
new file mode 100644
index 0000000..5ff5eba
--- /dev/null
+++ b/vendor/github.com/boljen/go-bitmap/README.md
@@ -0,0 +1,30 @@
+# Bitmap (Go)
+
+Package bitmap implements (thread-safe) bitmap functions and abstractions
+
+## Install
+
+    go get github.com/boljen/go-bitmap
+
+## Documentation
+
+See [godoc](https://godoc.org/github.com/boljen/go-bitmap)
+
+## Example
+
+    package main
+
+    import (
+        "fmt"
+        "github.com/boljen/go-bitmap"
+    )
+
+    func main() {
+        bm := bitmap.New(100)
+        bm.Set(0, true)
+        fmt.Println(bm.Get(0))
+    }
+
+## License
+
+MIT
diff --git a/vendor/github.com/boljen/go-bitmap/atomic.go b/vendor/github.com/boljen/go-bitmap/atomic.go
new file mode 100644
index 0000000..f04d76e
--- /dev/null
+++ b/vendor/github.com/boljen/go-bitmap/atomic.go
@@ -0,0 +1,64 @@
+package bitmap
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+var oobPanic = "SetAtomic not allowed on a bitmapSlice of cap() < 4"
+
+// SetAtomic is similar to Set except that it performs the operation atomically.
+func SetAtomic(bitmap []byte, targetBit int, targetValue bool) {
+	ov := (*[1]uint32)(unsafe.Pointer(&bitmap[targetBit/32]))[:]
+	SetAtomicUint32(ov, targetBit%32, targetValue)
+}
+
+// SetAtomic is similar to Set except that it performs the operation atomically.
+// It needs a bitmapSlice where the capacity is at least 4 bytes.
+func _SetAtomic(bitmapSlice []byte, targetBit int, targetValue bool) {
+	targetByteIndex := targetBit / 8
+	targetBitIndex := targetBit % 8
+	targetOffset := 0
+
+	// SetAtomic needs to modify 4 bytes of data so we panic when the slice
+	// doesn't have a capacity of at least 4 bytes.
+	if cap(bitmapSlice) < 4 {
+		panic(oobPanic)
+	}
+
+	// Calculate the Offset of the targetByte inside the 4-byte atomic batch.
+	// This is needed to ensure that atomic operations can happen as long as
+	// the bitmapSlice equals 4 bytes or more.
+	if cap(bitmapSlice) < targetByteIndex+3 {
+		targetOffset = cap(bitmapSlice) - targetByteIndex
+	}
+
+	// This gets a pointer to the memory of 4 bytes inside the bitmapSlice.
+	// It stores this pointer as an *uint32 so that it can be used to
+	// execute sync.atomic operations.
+	targetBytePointer := (*uint32)(unsafe.Pointer(&bitmapSlice[targetByteIndex-targetOffset]))
+
+	for {
+		// localValue is a copy of the uint32 value at *targetBytePointer.
+		// It's used to check whether the targetBit must be updated,
+		// and if so, to construct the new value for targetBytePointer.
+		localValue := atomic.LoadUint32(targetBytePointer)
+
+		// This "neutralizes" the uint32 conversion by getting a pointer to the
+		// 4-byte array stored undereneath the uint32.
+		targetByteCopyPointer := (*[4]byte)(unsafe.Pointer(&localValue))
+
+		// Work is done when targetBit is already set to targetValue.
+		if GetBit(targetByteCopyPointer[targetOffset], targetBitIndex) == targetValue {
+			return
+		}
+
+		// Modify the targetBit and update memory so that the targetBit is the only bit
+		// that has been modified in the batch.
+		referenceValue := localValue
+		SetBitRef(&targetByteCopyPointer[targetOffset], targetBitIndex, targetValue)
+		if atomic.CompareAndSwapUint32(targetBytePointer, referenceValue, localValue) {
+			break
+		}
+	}
+}
diff --git a/vendor/github.com/boljen/go-bitmap/bitmap.go b/vendor/github.com/boljen/go-bitmap/bitmap.go
new file mode 100644
index 0000000..dfe5cc2
--- /dev/null
+++ b/vendor/github.com/boljen/go-bitmap/bitmap.go
@@ -0,0 +1,200 @@
+// Package bitmap implements (thread-safe) bitmap functions and abstractions.
+//
+// Installation
+//
+// 	  go get github.com/boljen/go-bitmap
+package bitmap
+
+import "sync"
+
+var (
+	tA = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
+	tB = [8]byte{254, 253, 251, 247, 239, 223, 191, 127}
+)
+
+func dataOrCopy(d []byte, c bool) []byte {
+	if !c {
+		return d
+	}
+	ndata := make([]byte, len(d))
+	copy(ndata, d)
+	return ndata
+}
+
+// NewSlice creates a new byteslice with length l (in bits).
+// The actual size in bits might be up to 7 bits larger because
+// they are stored in a byteslice.
+func NewSlice(l int) []byte {
+	remainder := l % 8
+	if remainder != 0 {
+		remainder = 1
+	}
+	return make([]byte, l/8+remainder)
+}
+
+// Get returns the value of bit i from map m.
+// It doesn't check the bounds of the slice.
+func Get(m []byte, i int) bool {
+	return m[i/8]&tA[i%8] != 0
+}
+
+// Set sets bit i of map m to value v.
+// It doesn't check the bounds of the slice.
+func Set(m []byte, i int, v bool) {
+	index := i / 8
+	bit := i % 8
+	if v {
+		m[index] = m[index] | tA[bit]
+	} else {
+		m[index] = m[index] & tB[bit]
+	}
+}
+
+// GetBit returns the value of bit i of byte b.
+// The bit index must be between 0 and 7.
+func GetBit(b byte, i int) bool {
+	return b&tA[i] != 0
+}
+
+// SetBit sets bit i of byte b to value v.
+// The bit index must be between 0 and 7.
+func SetBit(b byte, i int, v bool) byte {
+	if v {
+		return b | tA[i]
+	}
+	return b & tB[i]
+}
+
+// SetBitRef sets bit i of byte *b to value v.
+func SetBitRef(b *byte, i int, v bool) {
+	if v {
+		*b = *b | tA[i]
+	} else {
+		*b = *b & tB[i]
+	}
+}
+
+// Len returns the length (in bits) of the provided byteslice.
+// It will always be a multipile of 8 bits.
+func Len(m []byte) int {
+	return len(m) * 8
+}
+
+// Bitmap is a byteslice with bitmap functions.
+// Creating one form existing data is as simple as bitmap := Bitmap(data).
+type Bitmap []byte
+
+// New creates a new Bitmap instance with length l (in bits).
+func New(l int) Bitmap {
+	return NewSlice(l)
+}
+
+// Len wraps around the Len function.
+func (b Bitmap) Len() int {
+	return Len(b)
+}
+
+// Get wraps around the Get function.
+func (b Bitmap) Get(i int) bool {
+	return Get(b, i)
+}
+
+// Set wraps around the Set function.
+func (b Bitmap) Set(i int, v bool) {
+	Set(b, i, v)
+}
+
+// Data returns the data of the bitmap.
+// If copy is false the actual underlying slice will be returned.
+func (b Bitmap) Data(copy bool) []byte {
+	return dataOrCopy(b, copy)
+}
+
+// Threadsafe implements thread-safe read- and write locking for the bitmap.
+type Threadsafe struct {
+	bm Bitmap
+	mu sync.RWMutex
+}
+
+// TSFromData creates a new Threadsafe using the provided data.
+// If copy is true the actual slice will be used.
+func TSFromData(data []byte, copy bool) *Threadsafe {
+	return &Threadsafe{
+		bm: Bitmap(dataOrCopy(data, copy)),
+	}
+}
+
+// NewTS creates a new Threadsafe instance.
+func NewTS(length int) *Threadsafe {
+	return &Threadsafe{
+		bm: New(length),
+	}
+}
+
+// Data returns the data of the bitmap.
+// If copy is false the actual underlying slice will be returned.
+func (b *Threadsafe) Data(copy bool) []byte {
+	b.mu.RLock()
+	data := dataOrCopy(b.bm, copy)
+	b.mu.RUnlock()
+	return data
+}
+
+// Len wraps around the Len function.
+func (b Threadsafe) Len() int {
+	b.mu.RLock()
+	l := b.bm.Len()
+	b.mu.RUnlock()
+	return l
+}
+
+// Get wraps around the Get function.
+func (b Threadsafe) Get(i int) bool {
+	b.mu.RLock()
+	v := b.bm.Get(i)
+	b.mu.RUnlock()
+	return v
+}
+
+// Set wraps around the Set function.
+func (b Threadsafe) Set(i int, v bool) {
+	b.mu.Lock()
+	b.bm.Set(i, v)
+	b.mu.Unlock()
+}
+
+// Concurrent is a bitmap implementation that achieves thread-safety
+// using atomic operations along with some unsafe.
+// It performs atomic operations on 32bits of data.
+type Concurrent []byte
+
+// NewConcurrent returns a concurrent bitmap.
+// It will create a bitmap
+func NewConcurrent(l int) Concurrent {
+	remainder := l % 8
+	if remainder != 0 {
+		remainder = 1
+	}
+	return make([]byte, l/8+remainder, l/8+remainder+3)
+}
+
+// Get wraps around the Get function.
+func (c Concurrent) Get(b int) bool {
+	return Get(c, b)
+}
+
+// Set wraps around the SetAtomic function.
+func (c Concurrent) Set(b int, v bool) {
+	SetAtomic(c, b, v)
+}
+
+// Len wraps around the Len function.
+func (c Concurrent) Len() int {
+	return Len(c)
+}
+
+// Data returns the data of the bitmap.
+// If copy is false the actual underlying slice will be returned.
+func (c Concurrent) Data(copy bool) []byte {
+	return dataOrCopy(c, copy)
+}
diff --git a/vendor/github.com/boljen/go-bitmap/uintmap.go b/vendor/github.com/boljen/go-bitmap/uintmap.go
new file mode 100644
index 0000000..72cbf4a
--- /dev/null
+++ b/vendor/github.com/boljen/go-bitmap/uintmap.go
@@ -0,0 +1,32 @@
+package bitmap
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// SetAtomicUint32 sets the target bit to the target value inside the uint32
+// encded bitmap.
+func SetAtomicUint32(bitmap []uint32, targetBit int, targetValue bool) {
+	targetIndex := targetBit / 32
+	BitOffset := targetBit % 32
+
+	for {
+		localValue := atomic.LoadUint32(&bitmap[targetIndex])
+		targetBytes := (*[4]byte)(unsafe.Pointer(&localValue))[:]
+		if Get(targetBytes, BitOffset) == targetValue {
+			return
+		}
+		referenceValue := localValue
+		Set(targetBytes, BitOffset, targetValue)
+		if atomic.CompareAndSwapUint32(&bitmap[targetIndex], referenceValue, localValue) {
+			break
+		}
+	}
+}
+
+// GetAtomicUint32 gets the target bit from an uint32 encoded bitmap.
+func GetAtomicUint32(bitmap []uint32, targetBit int) bool {
+	data := (*[4]byte)(unsafe.Pointer(&bitmap[targetBit/32]))[:]
+	return Get(data, targetBit%32)
+}
diff --git a/vendor/github.com/cevaris/ordered_map/.gitignore b/vendor/github.com/cevaris/ordered_map/.gitignore
new file mode 100644
index 0000000..4af4e59
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/.gitignore
@@ -0,0 +1,5 @@
+*.test
+*~
+
+.idea
+*.iml
diff --git a/vendor/github.com/cevaris/ordered_map/.travis.yml b/vendor/github.com/cevaris/ordered_map/.travis.yml
new file mode 100644
index 0000000..193242f
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/.travis.yml
@@ -0,0 +1,19 @@
+---
+language: go
+
+go:
+  - tip
+  - 1.12
+  - 1.11
+  - 1.10
+  - 1.9
+  - 1.8
+  - 1.7
+  - 1.6
+  - 1.5
+  - 1.4
+  - 1.3
+
+install:
+  - make
+  - make test
diff --git a/vendor/github.com/cevaris/ordered_map/LICENSE.md b/vendor/github.com/cevaris/ordered_map/LICENSE.md
new file mode 100644
index 0000000..4cb9b14
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2016 Adam Cardenas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cevaris/ordered_map/Makefile b/vendor/github.com/cevaris/ordered_map/Makefile
new file mode 100644
index 0000000..099e53b
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/Makefile
@@ -0,0 +1,10 @@
+all: build install 
+
+build:
+	go build
+
+install:
+	go install
+
+test:
+	go test -v *.go
diff --git a/vendor/github.com/cevaris/ordered_map/README.md b/vendor/github.com/cevaris/ordered_map/README.md
new file mode 100644
index 0000000..bc3e366
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/README.md
@@ -0,0 +1,113 @@
+# Ordered Map for golang
+
+[![Build Status](https://travis-ci.org/cevaris/ordered_map.svg?branch=master)](https://travis-ci.org/cevaris/ordered_map)
+
+**OrderedMap** is a Python port of OrderedDict implemented in golang. Golang's builtin `map` purposefully randomizes the iteration of stored key/values. **OrderedMap** struct preserves inserted key/value pairs; such that on iteration, key/value pairs are received in inserted (first in, first out) order.
+
+
+## Features
+- Full support Key/Value for all data types
+- Exposes an Iterator that iterates in order of insertion
+- Full Get/Set/Delete map interface
+- Supports Golang v1.3 through v1.12
+
+## Download and Install 
+  
+`go get https://github.com/cevaris/ordered_map.git`
+
+
+## Examples
+
+### Create, Get, Set, Delete
+
+```go
+package main
+
+import (
+    "fmt"
+    "github.com/cevaris/ordered_map"
+)
+
+func main() {
+
+    // Init new OrderedMap
+    om := ordered_map.NewOrderedMap()
+
+    // Set key
+    om.Set("a", 1)
+    om.Set("b", 2)
+    om.Set("c", 3)
+    om.Set("d", 4)
+
+    // Same interface as builtin map
+    if val, ok := om.Get("b"); ok == true {
+        // Found key "b"
+        fmt.Println(val)
+    }
+
+    // Delete a key
+    om.Delete("c")
+
+    // Failed Get lookup becase we deleted "c"
+    if _, ok := om.Get("c"); ok == false {
+        // Did not find key "c"
+        fmt.Println("c not found")
+    }
+    
+    fmt.Println(om)
+}
+```
+
+
+### Iterator
+
+```go
+n := 100
+om := ordered_map.NewOrderedMap()
+
+for i := 0; i < n; i++ {
+    // Insert data into OrderedMap
+    om.Set(i, fmt.Sprintf("%d", i * i))
+}
+
+// Iterate though values
+// - Values iteration are in insert order
+// - Returned in a key/value pair struct
+iter := om.IterFunc()
+for kv, ok := iter(); ok; kv, ok = iter() {
+    fmt.Println(kv, kv.Key, kv.Value)
+}
+```
+
+### Custom Structs
+
+```go
+om := ordered_map.NewOrderedMap()
+om.Set("one", &MyStruct{1, 1.1})
+om.Set("two", &MyStruct{2, 2.2})
+om.Set("three", &MyStruct{3, 3.3})
+
+fmt.Println(om)
+// Ouput: OrderedMap[one:&{1 1.1},  two:&{2 2.2},  three:&{3 3.3}, ]
+```
+  
+## For Development
+
+Git clone project 
+
+`git clone https://github.com/cevaris/ordered_map.git`  
+  
+Build and install project
+
+`make`
+
+Run tests 
+
+`make test`
+
+
+
+
+
+
+
diff --git a/vendor/github.com/cevaris/ordered_map/key_pair.go b/vendor/github.com/cevaris/ordered_map/key_pair.go
new file mode 100644
index 0000000..88afbcf
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/key_pair.go
@@ -0,0 +1,16 @@
+package ordered_map
+
+import "fmt"
+
+type KVPair struct {
+	Key   interface{}
+	Value interface{}
+}
+
+func (k *KVPair) String() string {
+	return fmt.Sprintf("%v:%v", k.Key, k.Value)
+}
+
+func (kv1 *KVPair) Compare(kv2 *KVPair) bool {
+	return kv1.Key == kv2.Key && kv1.Value == kv2.Value
+}
\ No newline at end of file
diff --git a/vendor/github.com/cevaris/ordered_map/node.go b/vendor/github.com/cevaris/ordered_map/node.go
new file mode 100644
index 0000000..ad0d142
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/node.go
@@ -0,0 +1,62 @@
+package ordered_map
+
+import (
+	"fmt"
+	"bytes"
+)
+
+type node struct {
+	Prev  *node
+	Next  *node
+	Value interface{}
+}
+
+func newRootNode() *node {
+	root := &node{}
+	root.Prev = root
+	root.Next = root
+	return root
+}
+
+func newNode(prev *node, next *node, key interface{}) *node {
+	return &node{Prev: prev, Next: next, Value: key}
+}
+
+func (n *node) Add(value string) {
+	root := n
+	last := root.Prev
+	last.Next = newNode(last, n, value)
+	root.Prev = last.Next
+}
+
+func (n *node) String() string {
+	var buffer bytes.Buffer
+	if n.Value == "" {
+		// Need to sentinel
+		var curr *node
+		root := n
+		curr = root.Next
+		for curr != root {
+			buffer.WriteString(fmt.Sprintf("%s, ", curr.Value))
+			curr = curr.Next
+		}
+	} else {
+		// Else, print pointer value
+		buffer.WriteString(fmt.Sprintf("%p, ", &n))
+	}
+	return fmt.Sprintf("LinkList[%v]", buffer.String())
+}
+
+func (n *node) IterFunc() func() (string, bool) {
+	var curr *node
+	root := n
+	curr = root.Next
+	return func() (string, bool) {
+		for curr != root {
+			tmp := curr.Value.(string)
+			curr = curr.Next
+			return tmp, true
+		}
+		return "", false
+	}
+}
diff --git a/vendor/github.com/cevaris/ordered_map/ordered_map.go b/vendor/github.com/cevaris/ordered_map/ordered_map.go
new file mode 100644
index 0000000..4116cd8
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/ordered_map.go
@@ -0,0 +1,121 @@
+package ordered_map
+
+import (
+	"fmt"
+)
+
+type OrderedMap struct {
+	store  map[interface{}]interface{}
+	mapper map[interface{}]*node
+	root   *node
+}
+
+func NewOrderedMap() *OrderedMap {
+	om := &OrderedMap{
+		store:  make(map[interface{}]interface{}),
+		mapper: make(map[interface{}]*node),
+		root:   newRootNode(),
+	}
+	return om
+}
+
+func NewOrderedMapWithArgs(args []*KVPair) *OrderedMap {
+	om := NewOrderedMap()
+	om.update(args)
+	return om
+}
+
+func (om *OrderedMap) update(args []*KVPair) {
+	for _, pair := range args {
+		om.Set(pair.Key, pair.Value)
+	}
+}
+
+func (om *OrderedMap) Set(key interface{}, value interface{}) {
+	if _, ok := om.store[key]; ok == false {
+		root := om.root
+		last := root.Prev
+		last.Next = newNode(last, root, key)
+		root.Prev = last.Next
+		om.mapper[key] = last.Next
+	}
+	om.store[key] = value
+}
+
+func (om *OrderedMap) Get(key interface{}) (interface{}, bool) {
+	val, ok := om.store[key]
+	return val, ok
+}
+
+func (om *OrderedMap) Delete(key interface{}) {
+	_, ok := om.store[key]
+	if ok {
+		delete(om.store, key)
+	}
+	root, rootFound := om.mapper[key]
+	if rootFound {
+		prev := root.Prev
+		next := root.Next
+		prev.Next = next
+		next.Prev = prev
+		delete(om.mapper, key)
+	}
+}
+
+func (om *OrderedMap) String() string {
+	builder := make([]string, len(om.store))
+
+	var index int = 0
+	iter := om.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		val, _ := om.Get(kv.Key)
+		builder[index] = fmt.Sprintf("%v:%v, ", kv.Key, val)
+		index++
+	}
+	return fmt.Sprintf("OrderedMap%v", builder)
+}
+
+func (om *OrderedMap) Iter() <-chan *KVPair {
+	println("Iter() method is deprecated!. Use IterFunc() instead.")
+	return om.UnsafeIter()
+}
+
+/*
+Beware, Iterator leaks goroutines if we do not fully traverse the map.
+For most cases, `IterFunc()` should work as an iterator.
+ */
+func (om *OrderedMap) UnsafeIter() <-chan *KVPair {
+	keys := make(chan *KVPair)
+	go func() {
+		defer close(keys)
+		var curr *node
+		root := om.root
+		curr = root.Next
+		for curr != root {
+			v, _ := om.store[curr.Value]
+			keys <- &KVPair{curr.Value, v}
+			curr = curr.Next
+		}
+	}()
+	return keys
+}
+
+func (om *OrderedMap) IterFunc() func() (*KVPair, bool) {
+	var curr *node
+	root := om.root
+	curr = root.Next
+	return func() (*KVPair, bool) {
+		for curr != root {
+			tmp := curr
+			curr = curr.Next
+			v, _ := om.store[tmp.Value]
+			return &KVPair{tmp.Value, v}, true
+		}
+		return nil, false
+	}
+}
+
+func (om *OrderedMap) Len() int {
+	return len(om.store)
+}
+
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
new file mode 100644
index 0000000..ac7e51b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
@@ -0,0 +1,93 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
+	r, err := gzip.NewReader(bytes.NewReader(gz))
+	if err != nil {
+		return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+	}
+	defer r.Close()
+
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+	}
+
+	fd := new(protobuf.FileDescriptorProto)
+	if err := proto.Unmarshal(b, fd); err != nil {
+		return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+	}
+
+	return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
+	gz, path := msg.Descriptor()
+	fd, err := extractFile(gz)
+	if err != nil {
+		panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+	}
+
+	md = fd.MessageType[path[0]]
+	for _, i := range path[1:] {
+		md = md.NestedType[i]
+	}
+	return fd, md
+}
diff --git a/vendor/github.com/gyuho/goraph/.travis.yml b/vendor/github.com/gyuho/goraph/.travis.yml
new file mode 100644
index 0000000..4228e24
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+sudo: false
+
+go:
+- 1.6
+- tip
+
+script:
+- ./test
+
diff --git a/vendor/github.com/gyuho/goraph/LICENSE b/vendor/github.com/gyuho/goraph/LICENSE
new file mode 100644
index 0000000..f7303ba
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Gyu-Ho Lee
+Copyright (c) 2016 Google Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/gyuho/goraph/README.md b/vendor/github.com/gyuho/goraph/README.md
new file mode 100644
index 0000000..0b5b590
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/README.md
@@ -0,0 +1,32 @@
+## goraph [![Build Status](https://img.shields.io/travis/gyuho/goraph.svg?style=flat-square)](https://travis-ci.org/gyuho/goraph) [![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/gyuho/goraph)
+
+Package goraph implements graph data structure and algorithms.
+
+```
+go get -v gopkg.in/gyuho/goraph.v2;
+```
+
+<br>
+I have tutorials and visualizations of graph, tree algorithms:
+
+- [**_Binary search tree_**](https://github.com/gyuho/learn/tree/master/doc/binary_search_tree)
+- [**_Go: heap, priority queue_**](https://github.com/gyuho/learn/tree/master/doc/go_heap_priority_queue)
+- [**_Go: red black tree_**](https://github.com/gyuho/learn/tree/master/doc/go_red_black_tree)
+- [**_Go: b-tree_**](https://github.com/gyuho/learn/tree/master/doc/go_b_tree)
+- [**_Go: graph, interface_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_interface)
+- [**_Go: graph, traversal_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_traversal)
+- [**_Go: graph, shortest path_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_shortest_path)
+- [**_Go: graph, topological sort_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_topological_sort)
+- [**_Go: graph, minimum spanning tree_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_minimum_spanning_tree)
+- [**_Go: graph, strongly connected components_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_strongly_connected_components)
+
+<br>
+For fast query and retrieval, please check out  <a href="http://google-opensource.blogspot.co.uk/2014/06/cayley-graphs-in-go.html" target="_blank">Cayley</a>.
+
+
+<br>
+<a href="http://www.youtube.com/watch?v=ImMnYq2zP4Y" target="_blank"><img src="http://img.youtube.com/vi/ImMnYq2zP4Y/0.jpg"></a>
+
+- <a href="https://www.youtube.com/channel/UCWzSgIp_DYRQnEsJuH32Fww" target="_blank">Please visit my YouTube Channel</a>
+- <a href="https://www.youtube.com/watch?v=NdfIfxTsVDo&list=PLT6aABhFfinvsSn1H195JLuHaXNS6UVhf" target="_blank">`Tree`, `Graph` Theory Algorithms (Playlist)</a>
+- <a href="https://www.youtube.com/watch?v=ImMnYq2zP4Y&list=PLT6aABhFfinvsSn1H195JLuHaXNS6UVhf&index=4" target="_blank">`Graph` : BFS, DFS</a>
diff --git a/vendor/github.com/gyuho/goraph/disjoint_set.go b/vendor/github.com/gyuho/goraph/disjoint_set.go
new file mode 100644
index 0000000..3a8085f
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/disjoint_set.go
@@ -0,0 +1,67 @@
+package goraph
+
+import "sync"
+
+// DisjointSet implements disjoint set.
+// (https://en.wikipedia.org/wiki/Disjoint-set_data_structure)
+type DisjointSet struct {
+	represent string
+	members   map[string]struct{}
+}
+
+// Forests is a set of DisjointSet.
+type Forests struct {
+	mu   sync.Mutex // guards the following
+	data map[*DisjointSet]struct{}
+}
+
+// NewForests creates a new Forests.
+func NewForests() *Forests {
+	set := &Forests{}
+	set.data = make(map[*DisjointSet]struct{})
+	return set
+}
+
+// MakeDisjointSet creates a DisjointSet.
+func MakeDisjointSet(forests *Forests, name string) {
+	newDS := &DisjointSet{}
+	newDS.represent = name
+	members := make(map[string]struct{})
+	members[name] = struct{}{}
+	newDS.members = members
+	forests.mu.Lock()
+	defer forests.mu.Unlock()
+	forests.data[newDS] = struct{}{}
+}
+
+// FindSet returns the DisjointSet with the represent name.
+func FindSet(forests *Forests, name string) *DisjointSet {
+	forests.mu.Lock()
+	defer forests.mu.Unlock()
+	for data := range forests.data {
+		if data.represent == name {
+			return data
+		}
+		for k := range data.members {
+			if k == name {
+				return data
+			}
+		}
+	}
+	return nil
+}
+
+// Union unions two DisjointSet, with ds1's represent.
+func Union(forests *Forests, ds1, ds2 *DisjointSet) {
+	newDS := &DisjointSet{}
+	newDS.represent = ds1.represent
+	newDS.members = ds1.members
+	for k := range ds2.members {
+		newDS.members[k] = struct{}{}
+	}
+	forests.mu.Lock()
+	defer forests.mu.Unlock()
+	forests.data[newDS] = struct{}{}
+	delete(forests.data, ds1)
+	delete(forests.data, ds2)
+}
diff --git a/vendor/github.com/gyuho/goraph/doc.go b/vendor/github.com/gyuho/goraph/doc.go
new file mode 100644
index 0000000..191d299
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/doc.go
@@ -0,0 +1,2 @@
+// Package goraph implements graph data structure and algorithms.
+package goraph // import "github.com/gyuho/goraph"
diff --git a/vendor/github.com/gyuho/goraph/graph.go b/vendor/github.com/gyuho/goraph/graph.go
new file mode 100644
index 0000000..87f87c5
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/graph.go
@@ -0,0 +1,503 @@
+package goraph
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"sync"
+)
+
+// ID is unique identifier.
+type ID interface {
+	// String returns the string ID.
+	String() string
+}
+
+type StringID string
+
+func (s StringID) String() string {
+	return string(s)
+}
+
+// Node is vertex. The ID must be unique within the graph.
+type Node interface {
+	// ID returns the ID.
+	ID() ID
+	String() string
+}
+
+type node struct {
+	id string
+}
+
+var nodeCnt uint64
+
+func NewNode(id string) Node {
+	return &node{
+		id: id,
+	}
+}
+
+func (n *node) ID() ID {
+	return StringID(n.id)
+}
+
+func (n *node) String() string {
+	return n.id
+}
+
+// Edge connects between two Nodes.
+type Edge interface {
+	Source() Node
+	Target() Node
+	Weight() float64
+	String() string
+}
+
+// edge is an Edge from Source to Target.
+type edge struct {
+	src Node
+	tgt Node
+	wgt float64
+}
+
+func NewEdge(src, tgt Node, wgt float64) Edge {
+	return &edge{
+		src: src,
+		tgt: tgt,
+		wgt: wgt,
+	}
+}
+
+func (e *edge) Source() Node {
+	return e.src
+}
+
+func (e *edge) Target() Node {
+	return e.tgt
+}
+
+func (e *edge) Weight() float64 {
+	return e.wgt
+}
+
+func (e *edge) String() string {
+	return fmt.Sprintf("%s -- %.3f -→ %s\n", e.src, e.wgt, e.tgt)
+}
+
+type EdgeSlice []Edge
+
+func (e EdgeSlice) Len() int           { return len(e) }
+func (e EdgeSlice) Less(i, j int) bool { return e[i].Weight() < e[j].Weight() }
+func (e EdgeSlice) Swap(i, j int)      { e[i], e[j] = e[j], e[i] }
+
+// Graph describes the methods of graph operations.
+// It assumes that the identifier of a Node is unique.
+// And weight values is float64.
+type Graph interface {
+	// Init initializes a Graph.
+	Init()
+
+	// GetNodeCount returns the total number of nodes.
+	GetNodeCount() int
+
+	// GetNode finds the Node. It returns nil if the Node
+	// does not exist in the graph.
+	GetNode(id ID) Node
+
+	// GetNodes returns a map from node ID to
+	// empty struct value. Graph does not allow duplicate
+	// node ID or name.
+	GetNodes() map[ID]Node
+
+	// AddNode adds a node to a graph, and returns false
+	// if the node already existed in the graph.
+	AddNode(nd Node) bool
+
+	// DeleteNode deletes a node from a graph.
+	// It returns true if it got deleted.
+	// And false if it didn't get deleted.
+	DeleteNode(id ID) bool
+
+	// AddEdge adds an edge from nd1 to nd2 with the weight.
+	// It returns error if a node does not exist.
+	AddEdge(id1, id2 ID, weight float64) error
+
+	// ReplaceEdge replaces an edge from id1 to id2 with the weight.
+	ReplaceEdge(id1, id2 ID, weight float64) error
+
+	// DeleteEdge deletes an edge from id1 to id2.
+	DeleteEdge(id1, id2 ID) error
+
+	// GetWeight returns the weight from id1 to id2.
+	GetWeight(id1, id2 ID) (float64, error)
+
+	// GetSources returns the map of parent Nodes.
+	// (Nodes that come towards the argument vertex.)
+	GetSources(id ID) (map[ID]Node, error)
+
+	// GetTargets returns the map of child Nodes.
+	// (Nodes that go out of the argument vertex.)
+	GetTargets(id ID) (map[ID]Node, error)
+
+	// String describes the Graph.
+	String() string
+}
+
+// graph is an internal default graph type that
+// implements all methods in Graph interface.
+type graph struct {
+	mu sync.RWMutex // guards the following
+
+	// idToNodes stores all nodes.
+	idToNodes map[ID]Node
+
+	// nodeToSources maps a Node identifer to sources(parents) with edge weights.
+	nodeToSources map[ID]map[ID]float64
+
+	// nodeToTargets maps a Node identifer to targets(children) with edge weights.
+	nodeToTargets map[ID]map[ID]float64
+}
+
+// newGraph returns a new graph.
+func newGraph() *graph {
+	return &graph{
+		idToNodes:     make(map[ID]Node),
+		nodeToSources: make(map[ID]map[ID]float64),
+		nodeToTargets: make(map[ID]map[ID]float64),
+		//
+		// without this
+		// panic: assignment to entry in nil map
+	}
+}
+
+// NewGraph returns a new graph.
+func NewGraph() Graph {
+	return newGraph()
+}
+
+func (g *graph) Init() {
+	// (X) g = newGraph()
+	// this only updates the pointer
+	//
+	//
+	// (X) *g = *newGraph()
+	// assignment copies lock value
+
+	g.idToNodes = make(map[ID]Node)
+	g.nodeToSources = make(map[ID]map[ID]float64)
+	g.nodeToTargets = make(map[ID]map[ID]float64)
+}
+
+func (g *graph) GetNodeCount() int {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	return len(g.idToNodes)
+}
+
+func (g *graph) GetNode(id ID) Node {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	return g.idToNodes[id]
+}
+
+func (g *graph) GetNodes() map[ID]Node {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	return g.idToNodes
+}
+
+func (g *graph) unsafeExistID(id ID) bool {
+	_, ok := g.idToNodes[id]
+	return ok
+}
+
+func (g *graph) AddNode(nd Node) bool {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if g.unsafeExistID(nd.ID()) {
+		return false
+	}
+
+	id := nd.ID()
+	g.idToNodes[id] = nd
+	return true
+}
+
+func (g *graph) DeleteNode(id ID) bool {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id) {
+		return false
+	}
+
+	delete(g.idToNodes, id)
+
+	delete(g.nodeToTargets, id)
+	for _, smap := range g.nodeToTargets {
+		delete(smap, id)
+	}
+
+	delete(g.nodeToSources, id)
+	for _, smap := range g.nodeToSources {
+		delete(smap, id)
+	}
+
+	return true
+}
+
+func (g *graph) AddEdge(id1, id2 ID, weight float64) error {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id1) {
+		return fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		if v, ok2 := g.nodeToTargets[id1][id2]; ok2 {
+			g.nodeToTargets[id1][id2] = v + weight
+		} else {
+			g.nodeToTargets[id1][id2] = weight
+		}
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id2] = weight
+		g.nodeToTargets[id1] = tmap
+	}
+	if _, ok := g.nodeToSources[id2]; ok {
+		if v, ok2 := g.nodeToSources[id2][id1]; ok2 {
+			g.nodeToSources[id2][id1] = v + weight
+		} else {
+			g.nodeToSources[id2][id1] = weight
+		}
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id1] = weight
+		g.nodeToSources[id2] = tmap
+	}
+
+	return nil
+}
+
+func (g *graph) ReplaceEdge(id1, id2 ID, weight float64) error {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id1) {
+		return fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		g.nodeToTargets[id1][id2] = weight
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id2] = weight
+		g.nodeToTargets[id1] = tmap
+	}
+	if _, ok := g.nodeToSources[id2]; ok {
+		g.nodeToSources[id2][id1] = weight
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id1] = weight
+		g.nodeToSources[id2] = tmap
+	}
+	return nil
+}
+
+func (g *graph) DeleteEdge(id1, id2 ID) error {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id1) {
+		return fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		if _, ok := g.nodeToTargets[id1][id2]; ok {
+			delete(g.nodeToTargets[id1], id2)
+		}
+	}
+	if _, ok := g.nodeToSources[id2]; ok {
+		if _, ok := g.nodeToSources[id2][id1]; ok {
+			delete(g.nodeToSources[id2], id1)
+		}
+	}
+	return nil
+}
+
+func (g *graph) GetWeight(id1, id2 ID) (float64, error) {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	if !g.unsafeExistID(id1) {
+		return 0, fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return 0, fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		if v, ok := g.nodeToTargets[id1][id2]; ok {
+			return v, nil
+		}
+	}
+	return 0.0, fmt.Errorf("there is no edge from %s to %s", id1, id2)
+}
+
+func (g *graph) GetSources(id ID) (map[ID]Node, error) {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	if !g.unsafeExistID(id) {
+		return nil, fmt.Errorf("%s does not exist in the graph.", id)
+	}
+
+	rs := make(map[ID]Node)
+	if _, ok := g.nodeToSources[id]; ok {
+		for n := range g.nodeToSources[id] {
+			rs[n] = g.idToNodes[n]
+		}
+	}
+	return rs, nil
+}
+
+func (g *graph) GetTargets(id ID) (map[ID]Node, error) {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	if !g.unsafeExistID(id) {
+		return nil, fmt.Errorf("%s does not exist in the graph.", id)
+	}
+
+	rs := make(map[ID]Node)
+	if _, ok := g.nodeToTargets[id]; ok {
+		for n := range g.nodeToTargets[id] {
+			rs[n] = g.idToNodes[n]
+		}
+	}
+	return rs, nil
+}
+
+func (g *graph) String() string {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	buf := new(bytes.Buffer)
+	for id1, nd1 := range g.idToNodes {
+		nmap, _ := g.GetTargets(id1)
+		for id2, nd2 := range nmap {
+			weight, _ := g.GetWeight(id1, id2)
+			fmt.Fprintf(buf, "%s -- %.3f -→ %s\n", nd1, weight, nd2)
+		}
+	}
+	return buf.String()
+}
+
+// NewGraphFromJSON returns a new Graph from a JSON file.
+// Here's the sample JSON data:
+//
+//	{
+//	    "graph_00": {
+//	        "S": {
+//	            "A": 100,
+//	            "B": 14,
+//	            "C": 200
+//	        },
+//	        "A": {
+//	            "S": 15,
+//	            "B": 5,
+//	            "D": 20,
+//	            "T": 44
+//	        },
+//	        "B": {
+//	            "S": 14,
+//	            "A": 5,
+//	            "D": 30,
+//	            "E": 18
+//	        },
+//	        "C": {
+//	            "S": 9,
+//	            "E": 24
+//	        },
+//	        "D": {
+//	            "A": 20,
+//	            "B": 30,
+//	            "E": 2,
+//	            "F": 11,
+//	            "T": 16
+//	        },
+//	        "E": {
+//	            "B": 18,
+//	            "C": 24,
+//	            "D": 2,
+//	            "F": 6,
+//	            "T": 19
+//	        },
+//	        "F": {
+//	            "D": 11,
+//	            "E": 6,
+//	            "T": 6
+//	        },
+//	        "T": {
+//	            "A": 44,
+//	            "D": 16,
+//	            "F": 6,
+//	            "E": 19
+//	        }
+//	    },
+//	}
+//
+func NewGraphFromJSON(rd io.Reader, graphID string) (Graph, error) {
+	js := make(map[string]map[string]map[string]float64)
+	dec := json.NewDecoder(rd)
+	for {
+		if err := dec.Decode(&js); err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, err
+		}
+	}
+	if _, ok := js[graphID]; !ok {
+		return nil, fmt.Errorf("%s does not exist", graphID)
+	}
+	gmap := js[graphID]
+
+	g := newGraph()
+	for id1, mm := range gmap {
+		nd1 := g.GetNode(StringID(id1))
+		if nd1 == nil {
+			nd1 = NewNode(id1)
+			if ok := g.AddNode(nd1); !ok {
+				return nil, fmt.Errorf("%s already exists", nd1)
+			}
+		}
+		for id2, weight := range mm {
+			nd2 := g.GetNode(StringID(id2))
+			if nd2 == nil {
+				nd2 = NewNode(id2)
+				if ok := g.AddNode(nd2); !ok {
+					return nil, fmt.Errorf("%s already exists", nd2)
+				}
+			}
+			g.ReplaceEdge(nd1.ID(), nd2.ID(), weight)
+		}
+	}
+
+	return g, nil
+}
diff --git a/vendor/github.com/gyuho/goraph/minimum_spanning_tree.go b/vendor/github.com/gyuho/goraph/minimum_spanning_tree.go
new file mode 100644
index 0000000..a86e279
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/minimum_spanning_tree.go
@@ -0,0 +1,287 @@
+package goraph
+
+import (
+	"container/heap"
+	"math"
+	"sort"
+)
+
+// Kruskal finds the minimum spanning tree with disjoint-set data structure.
+// (http://en.wikipedia.org/wiki/Kruskal%27s_algorithm)
+//
+//	 0. Kruskal(G)
+//	 1.
+//	 2. 	A = ∅
+//	 3.
+//	 4. 	for each vertex v in G:
+//	 5. 		MakeDisjointSet(v)
+//	 6.
+//	 7. 	edges = get all edges
+//	 8. 	sort edges in ascending order of weight
+//	 9.
+//	10. 	for each edge (u, v) in edges:
+//	11. 		if FindSet(u) ≠ FindSet(v):
+//	12. 			A = A ∪ {(u, v)}
+//	13. 			Union(u, v)
+//	14.
+//	15. 	return A
+//
+func Kruskal(g Graph) (map[Edge]struct{}, error) {
+
+	// A = ∅
+	A := make(map[Edge]struct{})
+
+	// disjointSet maps a member Node to a represent.
+	// (https://en.wikipedia.org/wiki/Disjoint-set_data_structure)
+	forests := NewForests()
+
+	// for each vertex v in G:
+	for _, nd := range g.GetNodes() {
+		// MakeDisjointSet(v)
+		MakeDisjointSet(forests, nd.String())
+	}
+
+	// edges = get all edges
+	edges := []Edge{}
+	foundEdge := make(map[string]struct{})
+	for id1, nd1 := range g.GetNodes() {
+		tm, err := g.GetTargets(id1)
+		if err != nil {
+			return nil, err
+		}
+		for id2, nd2 := range tm {
+			weight, err := g.GetWeight(id1, id2)
+			if err != nil {
+				return nil, err
+			}
+			edge := NewEdge(nd1, nd2, weight)
+			if _, ok := foundEdge[edge.String()]; !ok {
+				edges = append(edges, edge)
+				foundEdge[edge.String()] = struct{}{}
+			}
+		}
+
+		sm, err := g.GetSources(id1)
+		if err != nil {
+			return nil, err
+		}
+		for id3, nd3 := range sm {
+			weight, err := g.GetWeight(id3, id1)
+			if err != nil {
+				return nil, err
+			}
+			edge := NewEdge(nd3, nd1, weight)
+			if _, ok := foundEdge[edge.String()]; !ok {
+				edges = append(edges, edge)
+				foundEdge[edge.String()] = struct{}{}
+			}
+		}
+	}
+
+	// sort edges in ascending order of weight
+	sort.Sort(EdgeSlice(edges))
+
+	// for each edge (u, v) in edges:
+	for _, edge := range edges {
+		// if FindSet(u) ≠ FindSet(v):
+		if FindSet(forests, edge.Source().String()).represent != FindSet(forests, edge.Target().String()).represent {
+
+			// A = A ∪ {(u, v)}
+			A[edge] = struct{}{}
+
+			// Union(u, v)
+			// overwrite v's represent with u's represent
+			Union(forests, FindSet(forests, edge.Source().String()), FindSet(forests, edge.Target().String()))
+		}
+	}
+
+	return A, nil
+}
+
+// Prim finds the minimum spanning tree with min-heap (priority queue).
+// (http://en.wikipedia.org/wiki/Prim%27s_algorithm)
+//
+//	 0. Prim(G, source)
+//	 1.
+//	 2. 	let Q be a priority queue
+//	 3. 	distance[source] = 0
+//	 4.
+//	 5. 	for each vertex v in G:
+//	 6.
+//	 7. 		if v ≠ source:
+//	 8. 			distance[v] = ∞
+//	 9. 			prev[v] = undefined
+//	10.
+//	11. 		Q.add_with_priority(v, distance[v])
+//	12.
+//	13.
+//	14. 	while Q is not empty:
+//	15.
+//	16. 		u = Q.extract_min()
+//	17.
+//	18. 		for each adjacent vertex v of u:
+//	19.
+//	21. 			if v ∈ Q and distance[v] > weight(u, v):
+//	22. 				distance[v] = weight(u, v)
+//	23. 				prev[v] = u
+//	24. 				Q.decrease_priority(v, weight(u, v))
+//	25.
+//	26.
+//	27. 	return tree from prev
+//
+func Prim(g Graph, src ID) (map[Edge]struct{}, error) {
+
+	// let Q be a priority queue
+	minHeap := &nodeDistanceHeap{}
+
+	// distance[source] = 0
+	distance := make(map[ID]float64)
+	distance[src] = 0.0
+
+	// for each vertex v in G:
+	for id := range g.GetNodes() {
+
+		// if v ≠ src:
+		if id != src {
+			// distance[v] = ∞
+			distance[id] = math.MaxFloat64
+
+			// prev[v] = undefined
+			// prev[v] = ""
+		}
+
+		// Q.add_with_priority(v, distance[v])
+		nds := nodeDistance{}
+		nds.id = id
+		nds.distance = distance[id]
+
+		heap.Push(minHeap, nds)
+	}
+
+	heap.Init(minHeap)
+	prev := make(map[ID]ID)
+
+	// while Q is not empty:
+	for minHeap.Len() != 0 {
+
+		// u = Q.extract_min()
+		u := heap.Pop(minHeap).(nodeDistance)
+		uID := u.id
+
+		// for each adjacent vertex v of u:
+		tm, err := g.GetTargets(uID)
+		if err != nil {
+			return nil, err
+		}
+		for vID := range tm {
+
+			isExist := false
+			for _, one := range *minHeap {
+				if vID == one.id {
+					isExist = true
+					break
+				}
+			}
+
+			// weight(u, v)
+			weight, err := g.GetWeight(uID, vID)
+			if err != nil {
+				return nil, err
+			}
+
+			// if v ∈ Q and distance[v] > weight(u, v):
+			if isExist && distance[vID] > weight {
+
+				// distance[v] = weight(u, v)
+				distance[vID] = weight
+
+				// prev[v] = u
+				prev[vID] = uID
+
+				// Q.decrease_priority(v, weight(u, v))
+				minHeap.updateDistance(vID, weight)
+				heap.Init(minHeap)
+			}
+		}
+
+		sm, err := g.GetSources(uID)
+		if err != nil {
+			return nil, err
+		}
+		vID := uID
+		for uID := range sm {
+
+			isExist := false
+			for _, one := range *minHeap {
+				if vID == one.id {
+					isExist = true
+					break
+				}
+			}
+
+			// weight(u, v)
+			weight, err := g.GetWeight(uID, vID)
+			if err != nil {
+				return nil, err
+			}
+
+			// if v ∈ Q and distance[v] > weight(u, v):
+			if isExist && distance[vID] > weight {
+
+				// distance[v] = weight(u, v)
+				distance[vID] = weight
+
+				// prev[v] = u
+				prev[vID] = uID
+
+				// Q.decrease_priority(v, weight(u, v))
+				minHeap.updateDistance(vID, weight)
+				heap.Init(minHeap)
+			}
+		}
+	}
+
+	tree := make(map[Edge]struct{})
+	for k, v := range prev {
+		weight, err := g.GetWeight(v, k)
+		if err != nil {
+			return nil, err
+		}
+		tree[NewEdge(g.GetNode(v), g.GetNode(k), weight)] = struct{}{}
+	}
+	return tree, nil
+}
+
+type nodeDistance struct {
+	id       ID
+	distance float64
+}
+
+// container.Heap's Interface needs sort.Interface, Push, Pop to be implemented
+
+// nodeDistanceHeap is a min-heap of nodeDistances.
+type nodeDistanceHeap []nodeDistance
+
+func (h nodeDistanceHeap) Len() int           { return len(h) }
+func (h nodeDistanceHeap) Less(i, j int) bool { return h[i].distance < h[j].distance } // Min-Heap
+func (h nodeDistanceHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+func (h *nodeDistanceHeap) Push(x interface{}) {
+	*h = append(*h, x.(nodeDistance))
+}
+
+func (h *nodeDistanceHeap) Pop() interface{} {
+	heapSize := len(*h)
+	lastNode := (*h)[heapSize-1]
+	*h = (*h)[0 : heapSize-1]
+	return lastNode
+}
+
+func (h *nodeDistanceHeap) updateDistance(id ID, val float64) {
+	for i := 0; i < len(*h); i++ {
+		if (*h)[i].id == id {
+			(*h)[i].distance = val
+			break
+		}
+	}
+}
diff --git a/vendor/github.com/gyuho/goraph/shortest_path.go b/vendor/github.com/gyuho/goraph/shortest_path.go
new file mode 100644
index 0000000..e6f405c
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/shortest_path.go
@@ -0,0 +1,348 @@
+package goraph
+
+import (
+	"container/heap"
+	"fmt"
+	"math"
+)
+
+// Dijkstra returns the shortest path using Dijkstra
+// algorithm with a min-priority queue. This algorithm
+// does not work with negative weight edges.
+// (https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm)
+//
+//	 0. Dijkstra(G, source, target)
+//	 1.
+//	 2. 	let Q be a priority queue
+//	 3. 	distance[source] = 0
+//	 4.
+//	 5. 	for each vertex v in G:
+//	 6.
+//	 7. 		if v ≠ source:
+//	 8. 			distance[v] = ∞
+//	 9. 			prev[v] = undefined
+//	10.
+//	11. 		Q.add_with_priority(v, distance[v])
+//	12.
+//	13. 	while Q is not empty:
+//	14.
+//	15. 		u = Q.extract_min()
+//	16. 		if u == target:
+//	17. 			break
+//	18.
+//	19. 		for each child vertex v of u:
+//	20.
+//	21. 			alt = distance[u] + weight(u, v)
+//	22. 			if distance[v] > alt:
+//	23. 				distance[v] = alt
+//	24. 				prev[v] = u
+//	25. 				Q.decrease_priority(v, alt)
+//	26.
+//	27. 		reheapify(Q)
+//	28.
+//	29.
+//	30. 	path = []
+//	31. 	u = target
+//	32. 	while prev[u] is defined:
+//	33. 		path.push_front(u)
+//	34. 		u = prev[u]
+//	35.
+//	36. 	return path, prev
+//
+func Dijkstra(g Graph, source, target ID) ([]ID, map[ID]float64, error) {
+	// let Q be a priority queue
+	minHeap := &nodeDistanceHeap{}
+
+	// distance[source] = 0
+	distance := make(map[ID]float64)
+	distance[source] = 0.0
+
+	// for each vertex v in G:
+	for id := range g.GetNodes() {
+		// if v ≠ source:
+		if id != source {
+			// distance[v] = ∞
+			distance[id] = math.MaxFloat64
+
+			// prev[v] = undefined
+			// prev[v] = ""
+		}
+
+		// Q.add_with_priority(v, distance[v])
+		nds := nodeDistance{}
+		nds.id = id
+		nds.distance = distance[id]
+
+		heap.Push(minHeap, nds)
+	}
+
+	heap.Init(minHeap)
+	prev := make(map[ID]ID)
+
+	// while Q is not empty:
+	for minHeap.Len() != 0 {
+
+		// u = Q.extract_min()
+		u := heap.Pop(minHeap).(nodeDistance)
+
+		// if u == target:
+		if u.id == target {
+			break
+		}
+
+		// for each child vertex v of u:
+		cmap, err := g.GetTargets(u.id)
+		if err != nil {
+			return nil, nil, err
+		}
+		for v := range cmap {
+
+			// alt = distance[u] + weight(u, v)
+			weight, err := g.GetWeight(u.id, v)
+			if err != nil {
+				return nil, nil, err
+			}
+			alt := distance[u.id] + weight
+
+			// if distance[v] > alt:
+			if distance[v] > alt {
+
+				// distance[v] = alt
+				distance[v] = alt
+
+				// prev[v] = u
+				prev[v] = u.id
+
+				// Q.decrease_priority(v, alt)
+				minHeap.updateDistance(v, alt)
+			}
+		}
+		heap.Init(minHeap)
+	}
+
+	// path = []
+	path := []ID{}
+
+	// u = target
+	u := target
+
+	// while prev[u] is defined:
+	for {
+		if _, ok := prev[u]; !ok {
+			break
+		}
+		// path.push_front(u)
+		temp := make([]ID, len(path)+1)
+		temp[0] = u
+		copy(temp[1:], path)
+		path = temp
+
+		// u = prev[u]
+		u = prev[u]
+	}
+
+	// add the source
+	temp := make([]ID, len(path)+1)
+	temp[0] = source
+	copy(temp[1:], path)
+	path = temp
+
+	return path, distance, nil
+}
+
+// BellmanFord returns the shortest path using Bellman-Ford algorithm
+// This algorithm works with negative weight edges.
+// Time complexity is O(|V||E|).
+// (http://courses.csail.mit.edu/6.006/spring11/lectures/lec15.pdf)
+// It returns error when there is a negative-weight cycle.
+// A negatively-weighted cycle adds up to infinite negative-weight.
+//
+//	 0. BellmanFord(G, source, target)
+//	 1.
+//	 2. 	distance[source] = 0
+//	 3.
+//	 4. 	for each vertex v in G:
+//	 5.
+//	 6. 		if v ≠ source:
+//	 7. 			distance[v] = ∞
+//	 8. 			prev[v] = undefined
+//	 9.
+//	10.
+//	11. 	for 1 to |V|-1:
+//	12.
+//	13. 		for every edge (u, v):
+//	14.
+//	15. 			alt = distance[u] + weight(u, v)
+//	16. 			if distance[v] > alt:
+//	17. 				distance[v] = alt
+//	18. 				prev[v] = u
+//	19.
+//	20.
+//	21. 	for every edge (u, v):
+//	22.
+//	23. 		alt = distance[u] + weight(u, v)
+//	24. 		if distance[v] > alt:
+//	25. 			there is a negative-weight cycle
+//	26.
+//	27.
+//	28. 	path = []
+//	29. 	u = target
+//	30. 	while prev[u] is defined:
+//	31. 		path.push_front(u)
+//	32. 		u = prev[u]
+//	33.
+//	34. 	return path, prev
+//
+func BellmanFord(g Graph, source, target ID) ([]ID, map[ID]float64, error) {
+	// distance[source] = 0
+	distance := make(map[ID]float64)
+	distance[source] = 0.0
+
+	// for each vertex v in G:
+	for id := range g.GetNodes() {
+
+		// if v ≠ source:
+		if id != source {
+			// distance[v] = ∞
+			distance[id] = math.MaxFloat64
+
+			// prev[v] = undefined
+			// prev[v] = ""
+		}
+	}
+
+	prev := make(map[ID]ID)
+
+	// for 1 to |V|-1:
+	for i := 1; i <= g.GetNodeCount()-1; i++ {
+
+		// for every edge (u, v):
+		for id := range g.GetNodes() {
+
+			cmap, err := g.GetTargets(id)
+			if err != nil {
+				return nil, nil, err
+			}
+			u := id
+			for v := range cmap {
+				// edge (u, v)
+				weight, err := g.GetWeight(u, v)
+				if err != nil {
+					return nil, nil, err
+				}
+
+				// alt = distance[u] + weight(u, v)
+				alt := distance[u] + weight
+
+				// if distance[v] > alt:
+				if distance[v] > alt {
+					// distance[v] = alt
+					distance[v] = alt
+
+					// prev[v] = u
+					prev[v] = u
+				}
+			}
+
+			pmap, err := g.GetSources(id)
+			if err != nil {
+				return nil, nil, err
+			}
+			v := id
+			for u := range pmap {
+				// edge (u, v)
+				weight, err := g.GetWeight(u, v)
+				if err != nil {
+					return nil, nil, err
+				}
+
+				// alt = distance[u] + weight(u, v)
+				alt := distance[u] + weight
+
+				// if distance[v] > alt:
+				if distance[v] > alt {
+					// distance[v] = alt
+					distance[v] = alt
+
+					// prev[v] = u
+					prev[v] = u
+				}
+			}
+		}
+	}
+
+	// for every edge (u, v):
+	for id := range g.GetNodes() {
+
+		cmap, err := g.GetTargets(id)
+		if err != nil {
+			return nil, nil, err
+		}
+		u := id
+		for v := range cmap {
+			// edge (u, v)
+			weight, err := g.GetWeight(u, v)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			// alt = distance[u] + weight(u, v)
+			alt := distance[u] + weight
+
+			// if distance[v] > alt:
+			if distance[v] > alt {
+				return nil, nil, fmt.Errorf("there is a negative-weight cycle: %v", g)
+			}
+		}
+
+		pmap, err := g.GetSources(id)
+		if err != nil {
+			return nil, nil, err
+		}
+		v := id
+		for u := range pmap {
+			// edge (u, v)
+			weight, err := g.GetWeight(u, v)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			// alt = distance[u] + weight(u, v)
+			alt := distance[u] + weight
+
+			// if distance[v] > alt:
+			if distance[v] > alt {
+				return nil, nil, fmt.Errorf("there is a negative-weight cycle: %v", g)
+			}
+		}
+	}
+
+	// path = []
+	path := []ID{}
+
+	// u = target
+	u := target
+
+	// while prev[u] is defined:
+	for {
+		if _, ok := prev[u]; !ok {
+			break
+		}
+		// path.push_front(u)
+		temp := make([]ID, len(path)+1)
+		temp[0] = u
+		copy(temp[1:], path)
+		path = temp
+
+		// u = prev[u]
+		u = prev[u]
+	}
+
+	// add the source
+	temp := make([]ID, len(path)+1)
+	temp[0] = source
+	copy(temp[1:], path)
+	path = temp
+
+	return path, distance, nil
+}
diff --git a/vendor/github.com/gyuho/goraph/strongly_connected_components.go b/vendor/github.com/gyuho/goraph/strongly_connected_components.go
new file mode 100644
index 0000000..b0a11a5
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/strongly_connected_components.go
@@ -0,0 +1,195 @@
+package goraph
+
+import "sync"
+
+// Tarjan finds the strongly connected components.
+// In the mathematics, a directed graph is "strongly connected"
+// if every vertex is reachable from every other node.
+// Therefore, a graph is strongly connected if there is a path
+// in each direction between each pair of node of a graph.
+// Then a pair of vertices u and v is strongly connected to each other
+// because there is a path in each direction.
+// "Strongly connected components" of an arbitrary graph
+// partition into sub-graphs that are themselves strongly connected.
+// That is, "strongly connected component" of a directed graph
+// is a sub-graph that is strongly connected.
+// Formally, "Strongly connected components" of a graph is a maximal
+// set of vertices C in G.V such that for all u, v ∈ C, there is a path
+// both from u to v, and from v to u.
+// (https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm)
+//
+//	 0. Tarjan(G):
+//	 1.
+//	 2. 	globalIndex = 0 // smallest unused index
+//	 3. 	let S be a stack
+//	 4. 	result = [][]
+//	 5.
+//	 6. 	for each vertex v in G:
+//	 7. 		if v.index is undefined:
+//	 8. 			tarjan(G, v, globalIndex, S, result)
+//	 9.
+//	10. 	return result
+//	11.
+//	12.
+//	13. tarjan(G, v, globalIndex, S, result):
+//	14.
+//	15. 	v.index = globalIndex
+//	16. 	v.lowLink = globalIndex
+//	17. 	globalIndex++
+//	18. 	S.push(v)
+//	19.
+//	20. 	for each child vertex w of v:
+//	21.
+//	22. 		if w.index is undefined:
+//	23. 			recursively tarjan(G, w, globalIndex, S, result)
+//	24. 			v.lowLink = min(v.lowLink, w.lowLink)
+//	25.
+//	26. 		else if w is in S:
+//	27. 			v.lowLink = min(v.lowLink, w.index)
+//	28.
+//	29. 	// if v is the root
+//	30. 	if v.lowLink == v.index:
+//	31.
+//	32. 		// start a new strongly connected component
+//	33. 		component = []
+//	34.
+//	35. 		while True:
+//	36.
+//	37. 			u = S.pop()
+//	38. 			component.push(u)
+//	39.
+//	40. 			if u == v:
+//	41. 				result.push(component)
+//	42. 				break
+//
+func Tarjan(g Graph) [][]ID {
+	d := newTarjanData()
+
+	// for each vertex v in G:
+	for v := range g.GetNodes() {
+		// if v.index is undefined:
+		if _, ok := d.index[v]; !ok {
+			// tarjan(G, v, globalIndex, S, result)
+			tarjan(g, v, d)
+		}
+	}
+	return d.result
+}
+
+type tarjanData struct {
+	mu sync.Mutex // guards the following
+
+	// globalIndex is the smallest unused index
+	globalIndex int
+
+	// index is an index of a node to record
+	// the order of being discovered.
+	index map[ID]int
+
+	// lowLink is the smallest index of any index
+	// reachable from v, including v itself.
+	lowLink map[ID]int
+
+	// S is the stack.
+	S []ID
+
+	// extra map to check if a vertex is in S.
+	smap map[ID]struct{}
+
+	result [][]ID
+}
+
+func newTarjanData() *tarjanData {
+	return &tarjanData{
+		globalIndex: 0,
+		index:       make(map[ID]int),
+		lowLink:     make(map[ID]int),
+		S:           []ID{},
+		smap:        make(map[ID]struct{}),
+		result:      [][]ID{},
+	}
+}
+
+func tarjan(
+	g Graph,
+	id ID,
+	data *tarjanData,
+) {
+	// This is not inherently parallelizable problem,
+	// but just to make sure.
+	data.mu.Lock()
+
+	// v.index = globalIndex
+	data.index[id] = data.globalIndex
+
+	// v.lowLink = globalIndex
+	data.lowLink[id] = data.globalIndex
+
+	// globalIndex++
+	data.globalIndex++
+
+	// S.push(v)
+	data.S = append(data.S, id)
+	data.smap[id] = struct{}{}
+
+	data.mu.Unlock()
+
+	// for each child vertex w of v:
+	cmap, err := g.GetTargets(id)
+	if err != nil {
+		panic(err)
+	}
+	for w := range cmap {
+
+		// if w.index is undefined:
+		if _, ok := data.index[w]; !ok {
+
+			// recursively tarjan(G, w, globalIndex, S, result)
+			tarjan(g, w, data)
+
+			// v.lowLink = min(v.lowLink, w.lowLink)
+			data.lowLink[id] = min(data.lowLink[id], data.lowLink[w])
+
+		} else if _, ok := data.smap[w]; ok {
+			// else if w is in S:
+
+			// v.lowLink = min(v.lowLink, w.index)
+			data.lowLink[id] = min(data.lowLink[id], data.index[w])
+		}
+	}
+
+	data.mu.Lock()
+	defer data.mu.Unlock()
+
+	// if v is the root
+	// if v.lowLink == v.index:
+	if data.lowLink[id] == data.index[id] {
+		// start a new strongly connected component
+		component := []ID{}
+
+		// while True:
+		for {
+
+			// u = S.pop()
+			u := data.S[len(data.S)-1]
+			data.S = data.S[:len(data.S)-1 : len(data.S)-1]
+			delete(data.smap, u)
+
+			// component.push(u)
+			component = append(component, u)
+
+			// if u == v:
+			if u == id {
+				data.result = append(data.result, component)
+				break
+			}
+		}
+	}
+}
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
diff --git a/vendor/github.com/gyuho/goraph/test b/vendor/github.com/gyuho/goraph/test
new file mode 100755
index 0000000..a7d8a6d
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/test
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+TEST=./...;
+FMT="*.go"
+
+echo "Running tests...";
+go test -v -cover -cpu 1,2,4 $TEST;
+go test -v -cover -cpu 1,2,4 -race $TEST;
+
+echo "Checking gofmt..."
+fmtRes=$(gofmt -l -s $FMT)
+if [ -n "${fmtRes}" ]; then
+	echo -e "gofmt checking failed:\n${fmtRes}"
+	exit 255
+fi
+
+echo "Checking govet..."
+vetRes=$(go vet $TEST)
+if [ -n "${vetRes}" ]; then
+	echo -e "govet checking failed:\n${vetRes}"
+	exit 255
+fi
+
+echo "Success";
diff --git a/vendor/github.com/gyuho/goraph/topological_sort.go b/vendor/github.com/gyuho/goraph/topological_sort.go
new file mode 100644
index 0000000..b63675a
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/topological_sort.go
@@ -0,0 +1,98 @@
+package goraph
+
+// TopologicalSort does topological sort(ordering) with DFS.
+// It returns true if the graph is a DAG (no cycle, with a topological sort).
+// False if the graph is not a DAG (cycle, with no topological sort).
+//
+//	 0. TopologicalSort(G)
+//	 1.
+//	 2. 	L = Empty list that will contain the sorted nodes
+//	 3. 	isDAG = true
+//	 4.
+//	 5. 	for each vertex v in G:
+//	 6.
+//	 7. 		if v.color == "white":
+//	 8.
+//	 9. 			topologicalSortVisit(v, L, isDAG)
+//	10.
+//	11.
+//	12.
+//	13.
+//	14. topologicalSortVisit(v, L, isDAG)
+//	15.
+//	16. 	if v.color == "gray":
+//	17. 		isDAG = false
+//	18. 		return
+//	19.
+//	20. 	if v.color == "white":
+//	21.
+//	22. 		v.color = "gray":
+//	23.
+//	24.			for each child vertex w of v:
+//	25. 			topologicalSortVisit(w, L, isDAG)
+//	26.
+//	27. 		v.color = "black"
+//	28.			L.push_front(v)
+//
+func TopologicalSort(g Graph) ([]ID, bool) {
+
+	// L = Empty list that will contain the sorted nodes
+	L := []ID{}
+	isDAG := true
+	color := make(map[ID]string)
+	for v := range g.GetNodes() {
+		color[v] = "white"
+	}
+
+	// for each vertex v in G:
+	for v := range g.GetNodes() {
+		// if v.color == "white":
+		if color[v] == "white" {
+			// topologicalSortVisit(v, L, isDAG)
+			topologicalSortVisit(g, v, &L, &isDAG, &color)
+		}
+	}
+
+	return L, isDAG
+}
+
+func topologicalSortVisit(
+	g Graph,
+	id ID,
+	L *[]ID,
+	isDAG *bool,
+	color *map[ID]string,
+) {
+
+	// if v.color == "gray":
+	if (*color)[id] == "gray" {
+		// isDAG = false
+		*isDAG = false
+		return
+	}
+
+	// if v.color == "white":
+	if (*color)[id] == "white" {
+		// v.color = "gray":
+		(*color)[id] = "gray"
+
+		// for each child vertex w of v:
+		cmap, err := g.GetTargets(id)
+		if err != nil {
+			panic(err)
+		}
+		for w := range cmap {
+			// topologicalSortVisit(w, L, isDAG)
+			topologicalSortVisit(g, w, L, isDAG, color)
+		}
+
+		// v.color = "black"
+		(*color)[id] = "black"
+
+		// L.push_front(v)
+		temp := make([]ID, len(*L)+1)
+		temp[0] = id
+		copy(temp[1:], *L)
+		*L = temp
+	}
+}
diff --git a/vendor/github.com/gyuho/goraph/traversal.go b/vendor/github.com/gyuho/goraph/traversal.go
new file mode 100644
index 0000000..fa45c29
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/traversal.go
@@ -0,0 +1,184 @@
+package goraph
+
+// BFS does breadth-first search, and returns the list of vertices.
+// (https://en.wikipedia.org/wiki/Breadth-first_search)
+//
+//	 0. BFS(G, v):
+//	 1.
+//	 2. 	let Q be a queue
+//	 3. 	Q.push(v)
+//	 4. 	label v as visited
+//	 5.
+//	 6. 	while Q is not empty:
+//	 7.
+//	 8. 		u = Q.dequeue()
+//	 9.
+//	10. 		for each vertex w adjacent to u:
+//	11.
+//	12. 			if w is not visited yet:
+//	13. 				Q.push(w)
+//	14. 				label w as visited
+//
+func BFS(g Graph, id ID) []ID {
+	if g.GetNode(id) == nil {
+		return nil
+	}
+
+	q := []ID{id}
+	visited := make(map[ID]bool)
+	visited[id] = true
+	rs := []ID{id}
+
+	// while Q is not empty:
+	for len(q) != 0 {
+
+		u := q[0]
+		q = q[1:len(q):len(q)]
+
+		// for each vertex w adjacent to u:
+		cmap, _ := g.GetTargets(u)
+		for _, w := range cmap {
+			// if w is not visited yet:
+			if _, ok := visited[w.ID()]; !ok {
+				q = append(q, w.ID())  // Q.push(w)
+				visited[w.ID()] = true // label w as visited
+
+				rs = append(rs, w)
+			}
+		}
+		pmap, _ := g.GetSources(u)
+		for _, w := range pmap {
+			// if w is not visited yet:
+			if _, ok := visited[w.ID()]; !ok {
+				q = append(q, w.ID())  // Q.push(w)
+				visited[w.ID()] = true // label w as visited
+
+				rs = append(rs, w.ID())
+			}
+		}
+	}
+
+	return rs
+}
+
+// DFS does depth-first search, and returns the list of vertices.
+// (https://en.wikipedia.org/wiki/Depth-first_search)
+//
+//	 0. DFS(G, v):
+//	 1.
+//	 2. 	let S be a stack
+//	 3. 	S.push(v)
+//	 4.
+//	 5. 	while S is not empty:
+//	 6.
+//	 7. 		u = S.pop()
+//	 8.
+//	 9. 		if u is not visited yet:
+//	10.
+//	11. 			label u as visited
+//	12.
+//	13. 			for each vertex w adjacent to u:
+//	14.
+//	15. 				if w is not visited yet:
+//	16. 					S.push(w)
+//
+func DFS(g Graph, id ID) []ID {
+	if g.GetNode(id) == nil {
+		return nil
+	}
+
+	s := []ID{id}
+	visited := make(map[ID]bool)
+	rs := []ID{}
+
+	// while S is not empty:
+	for len(s) != 0 {
+
+		u := s[len(s)-1]
+		s = s[:len(s)-1 : len(s)-1]
+
+		// if u is not visited yet:
+		if _, ok := visited[u]; !ok {
+			// label u as visited
+			visited[u] = true
+
+			rs = append(rs, u)
+
+			// for each vertex w adjacent to u:
+			cmap, _ := g.GetTargets(u)
+			for _, w := range cmap {
+				// if w is not visited yet:
+				if _, ok := visited[w.ID()]; !ok {
+					s = append(s, w.ID()) // S.push(w)
+				}
+			}
+			pmap, _ := g.GetSources(u)
+			for _, w := range pmap {
+				// if w is not visited yet:
+				if _, ok := visited[w.ID()]; !ok {
+					s = append(s, w.ID()) // S.push(w)
+				}
+			}
+		}
+	}
+
+	return rs
+}
+
+// DFSRecursion does depth-first search recursively.
+//
+//	 0. DFS(G, v):
+//	 1.
+//	 2. 	if v is visited:
+//	 3. 		return
+//	 4.
+//	 5. 	label v as visited
+//	 6.
+//	 7. 	for each vertex u adjacent to v:
+//	 8.
+//	 9. 		if u is not visited yet:
+//	10. 			recursive DFS(G, u)
+//
+func DFSRecursion(g Graph, id ID) []ID {
+	if g.GetNode(id) == nil {
+		return nil
+	}
+
+	visited := make(map[ID]bool)
+	rs := []ID{}
+
+	dfsRecursion(g, id, visited, &rs)
+
+	return rs
+}
+
+func dfsRecursion(g Graph, id ID, visited map[ID]bool, rs *[]ID) {
+	// base case of recursion
+	//
+	// if v is visited:
+	if _, ok := visited[id]; ok {
+		return
+	}
+
+	// label v as visited
+	visited[id] = true
+	*rs = append(*rs, id)
+
+	// for each vertex u adjacent to v:
+	cmap, _ := g.GetTargets(id)
+	for _, u := range cmap {
+		// if u is not visited yet:
+		if _, ok := visited[u.ID()]; !ok {
+			// recursive DFS(G, u)
+			dfsRecursion(g, u.ID(), visited, rs)
+		}
+	}
+	pmap, _ := g.GetSources(id)
+	for _, u := range pmap {
+		// if u is not visited yet:
+		if _, ok := visited[u.ID()]; !ok {
+			// recursive DFS(G, u)
+			dfsRecursion(g, u.ID(), visited, rs)
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-go/adapters/common/adapter_proxy.go
index 13b98b0..6c32422 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/adapter_proxy.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/adapter_proxy.go
@@ -86,9 +86,9 @@
 	}
 
 	// Set up the required rpc arguments
-	topic := kafka.Topic{Name: fromAdapter}
-	replyToTopic := kafka.Topic{Name: toAdapter}
-	rpc := "Process_inter_adapter_message"
+	topic := kafka.Topic{Name: toAdapter}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_inter_adapter_message"
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
 	log.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
index 137877f..738a77a 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
@@ -29,10 +29,10 @@
 )
 
 type CoreProxy struct {
-	kafkaICProxy *kafka.InterContainerProxy
-	adapterTopic string
-	coreTopic    string
-	deviceIdCoreMap map[string]string
+	kafkaICProxy        *kafka.InterContainerProxy
+	adapterTopic        string
+	coreTopic           string
+	deviceIdCoreMap     map[string]string
 	lockDeviceIdCoreMap sync.RWMutex
 
 }
@@ -233,4 +233,101 @@
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	log.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
+
+}
+
+func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
+	log.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
+	rpc := "GetDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	log.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		log.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
+	log.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
+	rpc := "GetChildDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 4)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	var cnt uint8 = 0
+	for k, v := range kwargs {
+		cnt += 1
+		if k == "serial_number" {
+			val := &ic.StrType{Val: v.(string)}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "onu_id" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "parent_port_no" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		}
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	log.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		log.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
 }
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
index 5b839c6..d16ad95 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
@@ -24,6 +24,7 @@
 	"github.com/opencord/voltha-go/kafka"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"github.com/opencord/voltha-protos/go/voltha"
+        "github.com/opencord/voltha-protos/go/openflow_13"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -134,6 +135,45 @@
 }
 
 func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
+        log.Debug("Update_flows_incrementally")
+	if len(args) < 3 {
+		log.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+        flows := &openflow_13.FlowChanges{}
+        groups := &openflow_13.FlowGroupChanges{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				log.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "group_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				log.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+        log.Debugw("Update_flows_incrementally",log.Fields{"flows":flows,"groups":groups})
+        //Invoke the adopt device on the adapter
+        if err := rhp.adapter.Update_flows_incrementally(device,flows,groups); err != nil {
+                return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+        }
 	return new(empty.Empty), nil
 }
 
diff --git a/vendor/github.com/opencord/voltha-go/common/log/log.go b/vendor/github.com/opencord/voltha-go/common/log/log.go
index 408158a..16fed74 100644
--- a/vendor/github.com/opencord/voltha-go/common/log/log.go
+++ b/vendor/github.com/opencord/voltha-go/common/log/log.go
@@ -359,11 +359,17 @@
 }
 
 //GetPackageLogLevel returns the current log level of a package.
-func GetPackageLogLevel(packageName string) (int, error) {
-	if cfg, ok := cfgs[packageName]; ok {
+func GetPackageLogLevel(packageName ...string) (int, error) {
+	var name string
+	if len(packageName) == 1 {
+		name = packageName[0]
+	} else {
+		name, _, _, _ = getCallerInfo()
+	}
+	if cfg, ok := cfgs[name]; ok {
 		return levelToInt(cfg.Level.Level()), nil
 	}
-	return 0, errors.New(fmt.Sprintf("unknown-package-%s", packageName))
+	return 0, errors.New(fmt.Sprintf("unknown-package-%s", name))
 }
 
 //SetLogLevel sets the log level for the logger corresponding to the caller's package
diff --git a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
new file mode 100755
index 0000000..2a3cae6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
@@ -0,0 +1,1113 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ponresourcemanager
+
+import (
+    "encoding/base64"
+    "encoding/json"
+    "errors"
+    "fmt"
+    "strconv"
+
+    bitmap "github.com/boljen/go-bitmap"
+    "github.com/opencord/voltha-go/common/log"
+    "github.com/opencord/voltha-go/db/kvstore"
+    "github.com/opencord/voltha-go/db/model"
+    tp "github.com/opencord/voltha-go/common/techprofile"
+)
+
+const (
+    //Constants to identify resource pool
+    UNI_ID     = "UNI_ID"
+    ONU_ID     = "ONU_ID"
+    ALLOC_ID   = "ALLOC_ID"
+    GEMPORT_ID = "GEMPORT_ID"
+    FLOW_ID    = "FLOW_ID"
+
+    //Constants for passing command line arugments
+    OLT_MODEL_ARG = "--olt_model"
+    PATH_PREFIX   = "service/voltha/resource_manager/{%s}"
+    /*The resource ranges for a given device model should be placed
+      at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+      path on the KV store.
+      If Resource Range parameters are to be read from the external KV store,
+      they are expected to be stored in the following format.
+      Note: All parameters are MANDATORY for now.
+      constants used as keys to reference the resource range parameters from
+      and external KV store.
+    */
+    UNI_ID_START_IDX      = "uni_id_start"
+    UNI_ID_END_IDX        = "uni_id_end"
+    ONU_ID_START_IDX      = "onu_id_start"
+    ONU_ID_END_IDX        = "onu_id_end"
+    ONU_ID_SHARED_IDX     = "onu_id_shared"
+    ALLOC_ID_START_IDX    = "alloc_id_start"
+    ALLOC_ID_END_IDX      = "alloc_id_end"
+    ALLOC_ID_SHARED_IDX   = "alloc_id_shared"
+    GEMPORT_ID_START_IDX  = "gemport_id_start"
+    GEMPORT_ID_END_IDX    = "gemport_id_end"
+    GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+    FLOW_ID_START_IDX     = "flow_id_start"
+    FLOW_ID_END_IDX       = "flow_id_end"
+    FLOW_ID_SHARED_IDX    = "flow_id_shared"
+    NUM_OF_PON_PORT       = "pon_ports"
+
+    /*
+       The KV store backend is initialized with a path prefix and we need to
+       provide only the suffix.
+    */
+    PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
+
+    //resource path suffix
+    //Path on the KV store for storing alloc id ranges and resource pool for a given interface
+    //Format: <device_id>/alloc_id_pool/<pon_intf_id>
+    ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
+    //Path on the KV store for storing gemport id ranges and resource pool for a given interface
+    //Format: <device_id>/gemport_id_pool/<pon_intf_id>
+    GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
+    //Path on the KV store for storing onu id ranges and resource pool for a given interface
+    //Format: <device_id>/onu_id_pool/<pon_intf_id>
+    ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
+    //Path on the KV store for storing flow id ranges and resource pool for a given interface
+    //Format: <device_id>/flow_id_pool/<pon_intf_id>
+    FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
+
+    //Path on the KV store for storing list of alloc IDs for a given ONU
+    //Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+    ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
+
+    //Path on the KV store for storing list of gemport IDs for a given ONU
+    //Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+    GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
+
+    //Path on the KV store for storing list of Flow IDs for a given ONU
+    //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+    FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
+
+    //Flow Id info: Use to store more metadata associated with the flow_id
+    //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+    FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
+
+    //Constants for internal usage.
+    PON_INTF_ID     = "pon_intf_id"
+    START_IDX       = "start_idx"
+    END_IDX         = "end_idx"
+    POOL            = "pool"
+    NUM_OF_PON_INTF = 16
+
+    KVSTORE_RETRY_TIMEOUT = 5
+)
+
+//type ResourceTypeIndex string
+//type ResourceType string
+
+type PONResourceManager struct {
+    //Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
+    Technology string
+    DeviceType string
+    DeviceID   string
+    Backend    string // ETCD, or consul
+    Host       string // host ip of the KV store
+    Port       int    // port number for the KV store
+    OLTModel   string
+    KVStore    *model.Backend
+    TechProfileMgr *tp.TechProfileMgr
+
+    // Below attribute, pon_resource_ranges, should be initialized
+    // by reading from KV store.
+    PonResourceRanges  map[string]interface{}
+    SharedResourceMgrs map[string]*PONResourceManager
+    SharedIdxByType    map[string]string
+    IntfIDs            []uint32 // list of pon interface IDs
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+    log.Infow("kv-store-type", log.Fields{"store": storeType})
+    switch storeType {
+    case "consul":
+        return kvstore.NewConsulClient(address, timeout)
+    case "etcd":
+        return kvstore.NewEtcdClient(address, timeout)
+    }
+    return nil, errors.New("unsupported-kv-store")
+}
+
+func SetKVClient(Technology string, Backend string, Host string, Port int) *model.Backend {
+    addr := Host + ":" + strconv.Itoa(Port)
+    // TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+    // issue between kv store and backend , core is not calling NewBackend directly
+    kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
+    if err != nil {
+        log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+        return nil
+    }
+    kvbackend := &model.Backend{
+        Client:     kvClient,
+        StoreType:  Backend,
+        Host:       Host,
+        Port:       Port,
+        Timeout:    KVSTORE_RETRY_TIMEOUT,
+        PathPrefix: fmt.Sprintf(PATH_PREFIX, Technology)}
+
+    return kvbackend
+}
+
+// NewPONResourceManager creates a new PON resource manager.
+func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Host string, Port int) (*PONResourceManager, error) {
+    var PONMgr PONResourceManager
+    PONMgr.Technology = Technology
+    PONMgr.DeviceType = DeviceType
+    PONMgr.DeviceID = DeviceID
+    PONMgr.Backend = Backend
+    PONMgr.Host = Host
+    PONMgr.Port = Port
+    PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port)
+    if PONMgr.KVStore == nil {
+        log.Error("KV Client initilization failed")
+        return nil, errors.New("Failed to init KV client")
+    }
+    // Initialize techprofile for this technology 
+    if PONMgr.TechProfileMgr,_ =  tp.NewTechProfile(&PONMgr);PONMgr.TechProfileMgr == nil{
+        log.Error("Techprofile initialization failed")
+        return nil,errors.New("Failed to init tech profile")
+    }
+    PONMgr.PonResourceRanges = make(map[string]interface{})
+    PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
+    PONMgr.SharedIdxByType = make(map[string]string)
+    PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
+    PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
+    PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
+    PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
+    PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
+    PONMgr.OLTModel = DeviceType
+    return &PONMgr, nil
+}
+
+/*
+  Initialize PON resource ranges with config fetched from kv store.
+  return boolean: True if PON resource ranges initialized else false
+  Try to initialize the PON Resource Ranges from KV store based on the
+  OLT model key, if available
+*/
+
+func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore() bool {
+    //Initialize PON resource ranges with config fetched from kv store.
+    //:return boolean: True if PON resource ranges initialized else false
+    // Try to initialize the PON Resource Ranges from KV store based on the
+    // OLT model key, if available
+    if PONRMgr.OLTModel == "" {
+        log.Error("Failed to get OLT model")
+        return false
+    }
+    Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
+    //get resource from kv store
+    Result, err := PONRMgr.KVStore.Get(Path)
+    if err != nil {
+        log.Debugf("Error in fetching resource %s from KV strore", Path)
+        return false
+    }
+    if Result == nil {
+        log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+        return false
+    }
+    //update internal ranges from kv ranges. If there are missing
+    // values in the KV profile, continue to use the defaults
+    Value, err := ToByte(Result.Value)
+    if err != nil {
+        log.Error("Failed to convert kvpair to byte string")
+        return false
+    }
+    if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
+        log.Error("Failed to Unmarshal json byte")
+        return false
+    }
+    log.Debug("Init resource ranges from kvstore success")
+    return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+    SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
+    /*
+       Update the ranges for all reosurce type in the intermnal maps
+       param: resource type start index
+       param: start ID
+       param: resource type end index
+       param: end ID
+       param: resource type shared index
+       param: shared pool id
+       param: global resource manager
+    */
+    log.Debugf("update ranges for %s, %d", StartIDx, StartID)
+
+    if StartID != 0 {
+        PONRMgr.PonResourceRanges[StartIDx] = StartID
+    }
+    if EndID != 0 {
+        PONRMgr.PonResourceRanges[EndIDx] = EndID
+    }
+    //if SharedPoolID != 0 {
+    PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
+    //}
+    if RMgr != nil {
+        PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
+    }
+}
+
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
+                                                                ONUIDEnd uint32,
+                                                                ONUIDSharedPoolID uint32,
+                                                                AllocIDStart uint32,
+                                                                AllocIDEnd uint32,
+                                                                AllocIDSharedPoolID uint32,
+                                                                GEMPortIDStart uint32,
+                                                                GEMPortIDEnd uint32,
+                                                                GEMPortIDSharedPoolID uint32,
+                                                                FlowIDStart uint32,
+                                                                FlowIDEnd uint32,
+                                                                FlowIDSharedPoolID uint32,
+                                                                UNIIDStart uint32,
+                                                                UNIIDEnd uint32,
+                                                                NoOfPONPorts uint32,
+                                                                IntfIDs []uint32) bool {
+
+    /*Initialize default PON resource ranges
+
+      :param onu_id_start_idx: onu id start index
+      :param onu_id_end_idx: onu id end index
+      :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+      :param alloc_id_start_idx: alloc id start index
+      :param alloc_id_end_idx: alloc id end index
+      :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+      :param gemport_id_start_idx: gemport id start index
+      :param gemport_id_end_idx: gemport id end index
+      :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+      :param flow_id_start_idx: flow id start index
+      :param flow_id_end_idx: flow id end index
+      :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+      :param num_of_pon_ports: number of PON ports
+      :param intf_ids: interfaces serviced by this manager
+    */
+    PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+    PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+    PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+    PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+    PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+    log.Debug("Initialize default range values")
+    var i uint32
+    if IntfIDs == nil {
+        for i = 0; i < NoOfPONPorts; i++ {
+            PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
+        }
+    } else {
+        PONRMgr.IntfIDs = IntfIDs
+    }
+    return true
+}
+
+func (PONRMgr *PONResourceManager) InitDeviceResourcePool() error {
+
+    //Initialize resource pool for all PON ports.
+
+    log.Debug("Init resource ranges")
+
+    var err error
+    for _, Intf := range PONRMgr.IntfIDs {
+        SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+        if SharedPoolID != 0 {
+            Intf = SharedPoolID
+        }
+        if err = PONRMgr.InitResourceIDPool(Intf, ONU_ID,
+            PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+            PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+            log.Error("Failed to init ONU ID resource pool")
+            return err 
+        }
+        if SharedPoolID != 0 {
+            break
+        }
+    }
+
+    for _, Intf := range PONRMgr.IntfIDs {
+        SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+        if SharedPoolID != 0 {
+            Intf = SharedPoolID
+        }
+        if err = PONRMgr.InitResourceIDPool(Intf, ALLOC_ID,
+            PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+            PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+            log.Error("Failed to init ALLOC ID resource pool ")
+            return err 
+        }
+        if SharedPoolID != 0 {
+            break
+        }
+    }
+    for _, Intf := range PONRMgr.IntfIDs {
+        SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+        if SharedPoolID != 0 {
+            Intf = SharedPoolID
+        }
+        if err = PONRMgr.InitResourceIDPool(Intf, GEMPORT_ID,
+            PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+            PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+            log.Error("Failed to init GEMPORT ID resource pool")
+            return err 
+        }
+        if SharedPoolID != 0 {
+            break
+        }
+    }
+
+    for _, Intf := range PONRMgr.IntfIDs {
+        SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+        if SharedPoolID != 0 {
+            Intf = SharedPoolID
+        }
+        if err = PONRMgr.InitResourceIDPool(Intf, FLOW_ID,
+            PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+            PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+            log.Error("Failed to init FLOW ID resource pool")
+            return err 
+        }
+        if SharedPoolID != 0 {
+            break
+        }
+    }
+    return err 
+}
+
+func (PONRMgr *PONResourceManager) InitResourceIDPool(Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
+
+    /*Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+      :param pon_intf_id: OLT PON interface id
+      :param resource_type: String to identify type of resource
+      :param start_idx: start index for onu id pool
+      :param end_idx: end index for onu id pool
+      :return boolean: True if resource id pool initialized else false
+    */
+
+    // delegate to the master instance if sharing enabled across instances
+    SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+    if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+        return SharedResourceMgr.InitResourceIDPool(Intf, ResourceType, StartID, EndID)
+    }
+
+    Path := PONRMgr.GetPath(Intf, ResourceType)
+    if Path == "" {
+        log.Errorf("Failed to get path for resource type %s", ResourceType)
+        return errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+    }
+
+    //In case of adapter reboot and reconciliation resource in kv store
+    //checked for its presence if not kv store update happens
+    Res, err := PONRMgr.GetResource(Path)
+    if (err == nil) && (Res != nil) {
+        log.Debugf("Resource %s already present in store ", Path)
+        return nil
+    } else {
+        FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID)
+        if err != nil {
+            log.Errorf("Failed to format resource")
+            return err
+        }
+        // Add resource as json in kv store.
+        err = PONRMgr.KVStore.Put(Path, FormatResult)
+        if err == nil {
+            log.Debug("Successfuly posted to kv store")
+            return err
+        }
+    }
+
+    log.Debug("Error initializing pool")
+
+    return err
+}
+
+func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32) ([]byte, error) {
+    /*
+       Format resource as json.
+       :param pon_intf_id: OLT PON interface id
+       :param start_idx: start index for id pool
+       :param end_idx: end index for id pool
+       :return dictionary: resource formatted as map
+    */
+    // Format resource as json to be stored in backend store
+    Resource := make(map[string]interface{})
+    Resource[PON_INTF_ID] = IntfID
+    Resource[START_IDX] = StartIDx
+    Resource[END_IDX] = EndIDx
+    /*
+       Resource pool stored in backend store as binary string.
+       Tracking the resource allocation will be done by setting the bits \
+       in the byte array. The index set will be the resource number allocated.
+    */
+    var TSData *bitmap.Threadsafe
+    if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
+        log.Error("Failed to create a bitmap")
+        return nil, errors.New("Failed to create bitmap")
+    }
+    Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
+
+    Value, err := json.Marshal(Resource)
+    if err != nil {
+        log.Errorf("Failed to marshall resource")
+        return nil, err
+    }
+    return Value, err
+}
+func (PONRMgr *PONResourceManager) GetResource(Path string) (map[string]interface{}, error) {
+    /*
+       Get resource from kv store.
+
+       :param path: path to get resource
+       :return: resource if resource present in kv store else None
+    */
+    //get resource from kv store
+
+    var Value []byte
+    Result := make(map[string]interface{})
+    var Str string
+
+    Resource, err := PONRMgr.KVStore.Get(Path)
+    if (err != nil) || (Resource == nil) {
+        log.Debugf("Resource  unavailable at %s", Path)
+        return nil, err
+    }
+
+    Value, err = ToByte(Resource.Value)
+
+    // decode resource fetched from backend store to dictionary
+    err = json.Unmarshal(Value, &Result)
+    if err != nil {
+        log.Error("Failed to decode resource")
+        return Result, err
+    }
+    /*
+       resource pool in backend store stored as binary string whereas to
+       access the pool to generate/release IDs it need to be converted
+       as BitArray
+    */
+    Str, err = ToString(Result[POOL])
+    if err != nil {
+        log.Error("Failed to conver to kv pair to string")
+        return Result, err
+    }
+    Decode64, _ := base64.StdEncoding.DecodeString(Str)
+    Result[POOL], err = ToByte(Decode64)
+    if err != nil {
+        log.Error("Failed to convert resource pool to byte")
+        return Result, err
+    }
+
+    return Result, err
+}
+
+func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
+    /*
+       Get path for given resource type.
+       :param pon_intf_id: OLT PON interface id
+       :param resource_type: String to identify type of resource
+       :return: path for given resource type
+    */
+
+    /*
+       Get the shared pool for the given resource type.
+       all the resource ranges and the shared resource maps are initialized during the init.
+    */
+    SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
+    if SharedPoolID != 0 {
+        IntfID = SharedPoolID
+    }
+    var Path string
+    if ResourceType == ONU_ID {
+        Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+    } else if ResourceType == ALLOC_ID {
+        Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+    } else if ResourceType == GEMPORT_ID {
+        Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+    } else if ResourceType == FLOW_ID {
+        Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+    } else {
+        log.Error("Invalid resource pool identifier")
+    }
+    return Path
+}
+
+func (PONRMgr *PONResourceManager) GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
+    /*
+       Create alloc/gemport/onu/flow id for given OLT PON interface.
+       :param pon_intf_id: OLT PON interface id
+       :param resource_type: String to identify type of resource
+       :param num_of_id: required number of ids
+       :return list/uint32/None: list, uint32 or None if resource type is
+        alloc_id/gemport_id, onu_id or invalid type respectively
+    */
+    if NumIDs < 1 {
+        log.Error("Invalid number of resources requested")
+        return nil, errors.New(fmt.Sprintf("Invalid number of resources requested %d", NumIDs))
+    }
+    // delegate to the master instance if sharing enabled across instances
+
+    SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+    if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+        return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
+    }
+
+    Path := PONRMgr.GetPath(IntfID, ResourceType)
+    if Path == "" {
+        log.Errorf("Failed to get path for resource type %s", ResourceType)
+        return nil, errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+    }
+    log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+    var Result []uint32
+    var NextID uint32
+    Resource, err := PONRMgr.GetResource(Path)
+    if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
+        if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+            log.Error("Failed to Generate ID")
+            return Result, err
+        }
+        Result = append(Result, NextID)
+    } else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
+        if NumIDs == 1 {
+            if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+                log.Error("Failed to Generate ID")
+                return Result, err
+            }
+            Result = append(Result, NextID)
+        } else {
+            for NumIDs > 0 {
+                if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+                    log.Error("Failed to Generate ID")
+                    return Result, err
+                }
+                Result = append(Result, NextID)
+                NumIDs--
+            }
+        }
+    } else {
+        log.Error("get resource failed")
+        return Result, err
+    }
+
+    //Update resource in kv store
+    if PONRMgr.UpdateResource(Path, Resource) != nil {
+        log.Errorf("Failed to update resource %s", Path)
+        return nil, errors.New(fmt.Sprintf("Failed to update resource %s", Path))
+    }
+    return Result, nil
+}
+
+func checkValidResourceType(ResourceType string) bool {
+    KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
+
+    for _, v := range KnownResourceTypes {
+        if v == ResourceType {
+            return true
+        }
+    }
+    return false
+}
+
+func (PONRMgr *PONResourceManager) FreeResourceID(IntfID uint32, ResourceType string, ReleaseContent []uint32) bool {
+    /*
+       Release alloc/gemport/onu/flow id for given OLT PON interface.
+       :param pon_intf_id: OLT PON interface id
+       :param resource_type: String to identify type of resource
+       :param release_content: required number of ids
+       :return boolean: True if all IDs in given release_content release else False
+    */
+    if checkValidResourceType(ResourceType) == false {
+        log.Error("Invalid resource type")
+        return false
+    }
+    if ReleaseContent == nil {
+        log.Debug("Nothing to release")
+        return true
+    }
+    // delegate to the master instance if sharing enabled across instances
+    SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+    if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+        return SharedResourceMgr.FreeResourceID(IntfID, ResourceType, ReleaseContent)
+    }
+    Path := PONRMgr.GetPath(IntfID, ResourceType)
+    if Path == "" {
+        log.Error("Failed to get path")
+        return false
+    }
+    Resource, err := PONRMgr.GetResource(Path)
+    if err != nil {
+        log.Error("Failed to get resource")
+        return false
+    }
+    for _, Val := range ReleaseContent {
+        PONRMgr.ReleaseID(Resource, Val)
+    }
+    if PONRMgr.UpdateResource(Path, Resource) != nil {
+        log.Errorf("Free resource for %s failed", Path)
+        return false
+    }
+    return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateResource(Path string, Resource map[string]interface{}) error {
+    /*
+       Update resource in resource kv store.
+       :param path: path to update resource
+       :param resource: resource need to be updated
+       :return boolean: True if resource updated in kv store else False
+    */
+    // TODO resource[POOL] = resource[POOL].bin
+    Value, err := json.Marshal(Resource)
+    if err != nil {
+        log.Error("failed to Marshal")
+        return err
+    }
+    err = PONRMgr.KVStore.Put(Path, Value)
+    if err != nil {
+        log.Error("failed to put data to kv store %s", Path)
+        return err
+    }
+    return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearResourceIDPool(IntfID uint32, ResourceType string) bool {
+    /*
+       Clear Resource Pool for a given Resource Type on a given PON Port.
+       :return boolean: True if removed else False
+    */
+
+    // delegate to the master instance if sharing enabled across instances
+    SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+    if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+        return SharedResourceMgr.ClearResourceIDPool(IntfID, ResourceType)
+    }
+    Path := PONRMgr.GetPath(IntfID, ResourceType)
+    if Path == "" {
+        log.Error("Failed to get path")
+        return false
+    }
+
+    if err := PONRMgr.KVStore.Delete(Path); err != nil {
+        log.Errorf("Failed to delete resource %s", Path)
+        return false
+    }
+    log.Debugf("Cleared resource %s", Path)
+    return true
+}
+
+func (PONRMgr PONResourceManager) InitResourceMap(PONIntfONUID string) {
+    /*
+       Initialize resource map
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+    */
+    // initialize pon_intf_onu_id tuple to alloc_ids map
+    AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+    var AllocIDs []byte
+    Result := PONRMgr.KVStore.Put(AllocIDPath, AllocIDs)
+    if Result != nil {
+        log.Error("Failed to update the KV store")
+        return
+    }
+    // initialize pon_intf_onu_id tuple to gemport_ids map
+    GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+    var GEMPortIDs []byte
+    Result = PONRMgr.KVStore.Put(GEMPortIDPath, GEMPortIDs)
+    if Result != nil {
+        log.Error("Failed to update the KV store")
+        return
+    }
+}
+
+func (PONRMgr PONResourceManager) RemoveResourceMap(PONIntfONUID string) bool {
+    /*
+       Remove resource map
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+    */
+    // remove pon_intf_onu_id tuple to alloc_ids map
+    var err error
+    AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+    if err = PONRMgr.KVStore.Delete(AllocIDPath); err != nil {
+        log.Errorf("Failed to remove resource %s", AllocIDPath)
+        return false
+    }
+    // remove pon_intf_onu_id tuple to gemport_ids map
+    GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+    err = PONRMgr.KVStore.Delete(GEMPortIDPath)
+    if err != nil {
+        log.Errorf("Failed to remove resource %s", GEMPortIDPath)
+        return false
+    }
+
+    FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+    if FlowIDs, err := PONRMgr.KVStore.List(FlowIDPath); err != nil {
+        for _, Flow := range FlowIDs {
+            FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow)
+            if err = PONRMgr.KVStore.Delete(FlowIDInfoPath); err != nil {
+                log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+                return false
+            }
+        }
+    }
+
+    if err = PONRMgr.KVStore.Delete(FlowIDPath); err != nil {
+        log.Errorf("Failed to remove resource %s", FlowIDPath)
+        return false
+    }
+
+    return true
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(IntfONUID string) []uint32 {
+    /*
+       Get currently configured alloc ids for given pon_intf_onu_id
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :return list: List of alloc_ids if available, else None
+    */
+    Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+    var Data []uint32
+    Value, err := PONRMgr.KVStore.Get(Path)
+    if err == nil {
+        if Value != nil {
+            Val,err := ToByte(Value.Value)
+            if err != nil{
+                log.Errorw("Failed to convert into byte array",log.Fields{"error":err})
+                return Data
+            }
+            if err = json.Unmarshal(Val, &Data); err != nil {
+                log.Error("Failed to unmarshal",log.Fields{"error":err})
+                return Data
+            }
+        }
+    }
+    return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(IntfONUID string) []uint32 {
+    /*
+       Get currently configured gemport ids for given pon_intf_onu_id
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :return list: List of gemport IDs if available, else None
+    */
+
+    Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+    log.Debugf("Getting current gemports for %s", Path)
+    var Data []uint32
+    Value, err := PONRMgr.KVStore.Get(Path)
+    if err == nil {
+        if Value != nil {
+            Val, _ := ToByte(Value.Value)
+            if err = json.Unmarshal(Val, &Data); err != nil {
+                log.Errorw("Failed to unmarshal",log.Fields{"error":err})
+                return Data
+            }
+        }
+    } else {
+        log.Errorf("Failed to get data from kvstore for %s", Path)
+    }
+    return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(IntfONUID string) []uint32 {
+    /*
+       Get currently configured flow ids for given pon_intf_onu_id
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :return list: List of Flow IDs if available, else None
+    */
+
+    Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+    var Data []uint32
+    Value, err := PONRMgr.KVStore.Get(Path)
+    if err == nil {
+        if Value != nil {
+            Val, _ := ToByte(Value.Value)
+            if err = json.Unmarshal(Val, &Data); err != nil {
+                log.Error("Failed to unmarshal")
+                return Data
+            }
+        }
+    }
+    return Data
+}
+
+func (PONRMgr *PONResourceManager) GetFlowIDInfo(IntfONUID string, FlowID uint32, Data interface{})error{
+    /*
+       Get flow details configured for the ONU.
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :param flow_id: Flow Id reference
+       :param Data: Result
+       :return error: nil if no error in getting from KV store
+    */
+
+    Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+    Value, err := PONRMgr.KVStore.Get(Path)
+    if err == nil {
+        if Value != nil {
+            Val,err := ToByte(Value.Value)
+            if err != nil{
+                log.Errorw("Failed to convert flowinfo into byte array",log.Fields{"error":err})
+                return err
+            }
+            if err = json.Unmarshal(Val, Data); err != nil {
+                log.Errorw("Failed to unmarshal",log.Fields{"error":err})
+                return err
+            }
+        }
+    }
+    return err
+}
+
+func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(IntfONUID string, FlowID uint32) bool {
+    /*
+       Get flow_id details configured for the ONU.
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :param flow_id: Flow Id reference
+    */
+    Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+    if err := PONRMgr.KVStore.Delete(Path); err != nil {
+        log.Errorf("Falied to remove resource %s", Path)
+        return false
+    }
+    return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(IntfONUID string, AllocIDs []uint32) error {
+    /*
+       Update currently configured alloc ids for given pon_intf_onu_id
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :param alloc_ids: list of alloc ids
+    */
+    var Value []byte
+    var err error
+    Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+    Value, err = json.Marshal(AllocIDs)
+    if err != nil {
+        log.Error("failed to Marshal")
+        return err
+    }
+
+    if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+        log.Errorf("Failed to update resource %s", Path)
+        return err
+    }
+    return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(IntfONUID string, GEMPortIDs []uint32) error {
+    /*
+       Update currently configured gemport ids for given pon_intf_onu_id
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :param gemport_ids: list of gem port ids
+    */
+
+    var Value []byte
+    var err error
+    Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+    log.Debugf("Updating gemport ids for %s", Path)
+    Value, err = json.Marshal(GEMPortIDs)
+    if err != nil {
+        log.Error("failed to Marshal")
+        return err
+    }
+
+    if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+        log.Errorf("Failed to update resource %s", Path)
+        return err
+    }
+    return err
+}
+
+func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
+    /*
+       Check for a flow id in a given list of flow IDs.
+       :param FLowIDList: List of Flow IDs
+       :param FlowID: Flowd to check in the list
+       : return true and the index if present false otherwise.
+    */
+
+    for idx, _ := range FlowIDList {
+        if FlowID == FlowIDList[idx] {
+            return true, uint32(idx)
+        }
+    }
+    return false, 0
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(IntfONUID string, FlowID uint32, Add bool) error {
+    /*
+       Update the flow_id list of the ONU (add or remove flow_id from the list)
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :param flow_id: flow ID
+       :param add: Boolean flag to indicate whether the flow_id should be
+                   added or removed from the list. Defaults to adding the flow.
+    */
+    var Value []byte
+    var err error
+    var RetVal bool
+    var IDx uint32
+    Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+    FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(IntfONUID)
+
+    if Add {
+        if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == true {
+            return err
+        }
+        FlowIDs = append(FlowIDs, FlowID)
+    } else {
+        if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == false {
+            return err
+        }
+        // delete the index and shift
+        FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
+    }
+    Value, err = json.Marshal(FlowIDs)
+    if err != nil {
+        log.Error("Failed to Marshal")
+        return err
+    }
+
+    if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+        log.Errorf("Failed to update resource %s", Path)
+        return err
+    }
+    return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(IntfONUID string, FlowID uint32, FlowData interface {}) error {
+    /*
+       Update any metadata associated with the flow_id. The flow_data could be json
+       or any of other data structure. The resource manager doesnt care
+       :param pon_intf_onu_id: reference of PON interface id and onu id
+       :param flow_id: Flow ID
+       :param flow_data: Flow data blob
+    */
+    var Value []byte
+    var err error
+    Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+    Value, err = json.Marshal(FlowData)
+    if err != nil {
+        log.Error("failed to Marshal")
+        return err
+    }
+
+    if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+        log.Errorf("Failed to update resource %s", Path)
+        return err
+    }
+    return err
+}
+
+func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
+    /*
+       Generate unique id having OFFSET as start
+       :param resource: resource used to generate ID
+       :return uint32: generated id
+    */
+    ByteArray, err := ToByte(Resource[POOL])
+    if err != nil {
+        log.Error("Failed to convert resource to byte array")
+        return 0, err
+    }
+    Data := bitmap.TSFromData(ByteArray, false)
+    if Data == nil {
+        log.Error("Failed to get data from byte array")
+        return 0, errors.New("Failed to get data from byte array")
+    }
+
+    Len := Data.Len()
+    var Idx int
+    for Idx = 0; Idx < Len; Idx++ {
+        Val := Data.Get(Idx)
+        if Val == false {
+            break
+        }
+    }
+    Data.Set(Idx, true)
+    res := uint32(Resource[START_IDX].(float64))
+    Resource[POOL] = Data.Data(false)
+    log.Debugf("Generated ID for %d", (uint32(Idx) + res))
+    return (uint32(Idx) + res), err
+}
+
+func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
+    /*
+       Release unique id having OFFSET as start index.
+       :param resource: resource used to release ID
+       :param unique_id: id need to be released
+    */
+    ByteArray, err := ToByte(Resource[POOL])
+    if err != nil {
+        log.Error("Failed to convert resource to byte array")
+        return false
+    }
+    Data := bitmap.TSFromData(ByteArray, false)
+    if Data == nil {
+        log.Error("Failed to get resource pool")
+        return false
+    }
+    var Idx uint32
+    Idx = Id - uint32(Resource[START_IDX].(float64))
+    Data.Set(int(Idx), false)
+    Resource[POOL] = Data.Data(false)
+
+    return true
+}
+
+func (PONRMgr *PONResourceManager) GetTechnology()string{
+    return PONRMgr.Technology
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeAllocID()string{
+    return ALLOC_ID
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID()string{
+    return GEMPORT_ID
+}
+
+
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+    switch t := value.(type) {
+    case []byte:
+        return value.([]byte), nil
+    case string:
+        return []byte(value.(string)), nil
+    default:
+        return nil, fmt.Errorf("unexpected-type-%T", t)
+    }
+}
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+    switch t := value.(type) {
+    case []byte:
+        return string(value.([]byte)), nil
+    case string:
+        return value.(string), nil
+    default:
+        return "", fmt.Errorf("unexpected-type-%T", t)
+    }
+}
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-go/common/techprofile/4QueueHybridProfileMap1.json
new file mode 100644
index 0000000..d11f8e4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/4QueueHybridProfileMap1.json
@@ -0,0 +1,141 @@
+ {
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/README.md b/vendor/github.com/opencord/voltha-go/common/techprofile/README.md
new file mode 100644
index 0000000..03a396d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/README.md
@@ -0,0 +1,336 @@
+Technology Profile Management
+Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+Example JSON :
+
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3 
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
+
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/xgspon/64
+
+jq -c . 4QueueHybridProfileMap1.json |  etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/xgspon/64
+
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+
+
+#List Tech profile 
+etcdctl --endpoints=<EtcdIPAddres>:2379 get  service/voltha/technology_profiles/xgspon/64
+
+
+# Example output
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
+
+etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/xgspon/64
+# Example outpout
+service/voltha/technology_profiles/xgspon/64/uni-1
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+#Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/config.go b/vendor/github.com/opencord/voltha-go/common/techprofile/config.go
new file mode 100644
index 0000000..5312dc0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/config.go
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+	"github.com/opencord/voltha-go/db/model"
+)
+
+// tech profile default constants
+const (
+	defaultTechProfileName        = "Default_1tcont_1gem_Profile"
+	DEFAULT_TECH_PROFILE_TABLE_ID = 64
+	defaultVersion                = 1.0
+	defaultLogLevel               = 0
+	defaultGemportsCount          = 1
+	defaultNumTconts              = 1
+	defaultPbits                  = "0b11111111"
+
+	defaultKVStoreType    = "etcd"
+	defaultKVStoreTimeout = 5            //in seconds
+	defaultKVStoreHost    = "172.21.0.8" // TODO: Need to get IP from adapter
+	defaultKVStorePort    = 2379         // Consul = 8500; Etcd = 2379
+
+	// Tech profile path prefix in kv store
+	defaultKVPathPrefix = "service/voltha/technology_profiles"
+
+	// Tech profile path in kv store
+	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
+
+	// Tech profile instance path in kv store
+	// Format: <technology>/<tech_profile_tableID>/<uni_port_name>
+	defaultTPInstanceKVPath = "%s/%d/%s"
+)
+
+//Tech-Profile JSON String Keys
+// NOTE: Tech profile templeate JSON file should comply with below keys
+const (
+	NAME                               = "name"
+	PROFILE_TYPE                       = "profile_type"
+	VERSION                            = "version"
+	NUM_GEM_PORTS                      = "num_gem_ports"
+	INSTANCE_CONTROL                   = "instance_control"
+	US_SCHEDULER                       = "us_scheduler"
+	DS_SCHEDULER                       = "ds_scheduler"
+	UPSTREAM_GEM_PORT_ATTRIBUTE_LIST   = "upstream_gem_port_attribute_list"
+	DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
+	ONU                                = "onu"
+	UNI                                = "uni"
+	MAX_GEM_PAYLOAD_SIZE               = "max_gem_payload_size"
+	DIRECTION                          = "direction"
+	ADDITIONAL_BW                      = "additional_bw"
+	PRIORITY                           = "priority"
+	Q_SCHED_POLICY                     = "q_sched_policy"
+	WEIGHT                             = "weight"
+	PBIT_MAP                           = "pbit_map"
+	DISCARD_CONFIG                     = "discard_config"
+	MAX_THRESHOLD                      = "max_threshold"
+	MIN_THRESHOLD                      = "min_threshold"
+	MAX_PROBABILITY                    = "max_probability"
+	DISCARD_POLICY                     = "discard_policy"
+	PRIORITY_Q                         = "priority_q"
+	SCHEDULING_POLICY                  = "scheduling_policy"
+	MAX_Q_SIZE                         = "max_q_size"
+	AES_ENCRYPTION                     = "aes_encryption"
+)
+
+// TechprofileFlags represents the set of configurations used
+type TechProfileFlags struct {
+	KVStoreHost          string
+	KVStorePort          int
+	KVStoreType          string
+	KVStoreTimeout       int
+	KVBackend            *model.Backend
+	TPKVPathPrefix       string
+	TPFileKVPath         string
+	TPInstanceKVPath     string
+	DefaultTPName        string
+	TPVersion            int
+	NumGemPorts          uint32
+	NumTconts            uint32
+	DefaultPbits         []string
+	LogLevel             int
+	DefaultTechProfileID uint32
+	DefaultNumGemPorts   uint32
+	DefaultNumTconts     uint32
+}
+
+func NewTechProfileFlags() *TechProfileFlags {
+	// initialize with default values
+	var techProfileFlags = TechProfileFlags{
+		KVBackend:            nil,
+		KVStoreHost:          defaultKVStoreHost,
+		KVStorePort:          defaultKVStorePort,
+		KVStoreType:          defaultKVStoreType,
+		KVStoreTimeout:       defaultKVStoreTimeout,
+		DefaultTPName:        defaultTechProfileName,
+		TPKVPathPrefix:       defaultKVPathPrefix,
+		TPVersion:            defaultVersion,
+		TPFileKVPath:         defaultTechProfileKVPath,
+		TPInstanceKVPath:     defaultTPInstanceKVPath,
+		DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
+		DefaultNumGemPorts:   defaultGemportsCount,
+		DefaultNumTconts:     defaultNumTconts,
+		DefaultPbits:         []string{defaultPbits},
+		LogLevel:             defaultLogLevel,
+	}
+
+	return &techProfileFlags
+}
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
new file mode 100644
index 0000000..2a256e9
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
@@ -0,0 +1,580 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
+	"github.com/opencord/voltha-go/db/model"
+	openolt_pb "github.com/opencord/voltha-protos/go/openolt"
+)
+
+// Interface to pon resource manager APIs
+type iPonResourceMgr interface {
+	GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
+	GetResourceTypeAllocID() string
+	GetResourceTypeGemPortID() string
+	GetTechnology() string
+}
+
+type Direction int32
+
+const (
+	Direction_UPSTREAM      Direction = 0
+	Direction_DOWNSTREAM    Direction = 1
+	Direction_BIDIRECTIONAL Direction = 2
+)
+
+var Direction_name = map[Direction]string{
+	0: "UPSTREAM",
+	1: "DOWNSTREAM",
+	2: "BIDIRECTIONAL",
+}
+
+type SchedulingPolicy int32
+
+const (
+	SchedulingPolicy_WRR            SchedulingPolicy = 0
+	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
+)
+
+var SchedulingPolicy_name = map[SchedulingPolicy]string{
+	0: "WRR",
+	1: "StrictPriority",
+	2: "Hybrid",
+}
+
+type AdditionalBW int32
+
+const (
+	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
+	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
+	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
+)
+
+var AdditionalBW_name = map[AdditionalBW]string{
+	0: "AdditionalBW_None",
+	1: "AdditionalBW_NA",
+	2: "AdditionalBW_BestEffort",
+	3: "AdditionalBW_Auto",
+}
+
+type DiscardPolicy int32
+
+const (
+	DiscardPolicy_TailDrop  DiscardPolicy = 0
+	DiscardPolicy_WTailDrop DiscardPolicy = 1
+	DiscardPolicy_Red       DiscardPolicy = 2
+	DiscardPolicy_WRed      DiscardPolicy = 3
+)
+
+var DiscardPolicy_name = map[DiscardPolicy]string{
+	0: "TailDrop",
+	1: "WTailDrop",
+	2: "Red",
+	3: "WRed",
+}
+
+/*
+type InferredAdditionBWIndication int32
+
+const (
+	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
+	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
+	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
+)
+
+var InferredAdditionBWIndication_name = map[int32]string{
+	0: "InferredAdditionBWIndication_None",
+	1: "InferredAdditionBWIndication_Assured",
+	2: "InferredAdditionBWIndication_BestEffort",
+}
+*/
+// instance control defaults
+const (
+	defaultOnuInstance    = "multi-instance"
+	defaultUniInstance    = "single-instance"
+	defaultNumGemPorts    = 1
+	defaultGemPayloadSize = "auto"
+)
+
+const MAX_GEM_PAYLOAD = "max_gem_payload_size"
+
+type InstanceControl struct {
+	Onu               string `json:ONU"`
+	Uni               string `json:"uni"`
+	MaxGemPayloadSize string `json:"max_gem_payload_size"`
+}
+
+// default discard config constants
+const (
+	defaultMinThreshold   = 0
+	defaultMaxThreshold   = 0
+	defaultMaxProbability = 0
+)
+
+type DiscardConfig struct {
+	MinThreshold   int `json:"min_threshold"`
+	MaxThreshold   int `json:"max_threshold"`
+	MaxProbability int `json:"max_probability"`
+}
+
+// default scheduler contants
+const (
+	defaultAddtionalBw      = AdditionalBW_AdditionalBW_Auto
+	defaultPriority         = 0
+	defaultWeight           = 0
+	defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
+)
+
+type Scheduler struct {
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
+}
+
+// default GEM attribute constants
+const (
+	defaultAESEncryption  = "True"
+	defaultPriorityQueue  = 0
+	defaultQueueWeight    = 0
+	defaultMaxQueueSize   = "auto"
+	defaultdropPolicy     = DiscardPolicy_TailDrop
+	defaultSchedulePolicy = SchedulingPolicy_WRR
+)
+
+type GemPortAttribute struct {
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    int           `json:"priority_q"`
+	Weight           int           `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
+}
+
+type iScheduler struct {
+	AllocID   uint32    `json:"alloc_id"`
+	Scheduler Scheduler `json:"scheduler"`
+}
+type iGemPortAttribute struct {
+	GemportID    uint32           `json:"gem_port_id"`
+	GemAttribute GemPortAttribute `json:"gem_attribute"`
+}
+
+type TechProfileMgr struct {
+	config      *TechProfileFlags
+	resourceMgr iPonResourceMgr
+}
+type DefaultTechProfile struct {
+	Name                           string             `json:"name"`
+	ProfileType                    string             `json:"profile_type"`
+	Version                        int                `json:"version"`
+	NumGemPorts                    uint32             `json:"num_gem_ports"`
+	InstanceCtrl                   InstanceControl    `json:"instance_control"`
+	UsScheduler                    Scheduler          `json:"us_scheduler"`
+	DsScheduler                    Scheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+type TechProfile struct {
+	Name                           string              `json:"name"`
+	SubscriberIdentifier           string              `json:"subscriber_identifier"`
+	ProfileType                    string              `json:"profile_type"`
+	Version                        int                 `json:"version"`
+	NumGemPorts                    uint32              `json:"num_gem_ports"`
+	NumTconts                      uint32              `json:"num_tconts"`
+	InstanceCtrl                   InstanceControl     `json:"instance_control"`
+	UsScheduler                    iScheduler          `json:"us_scheduler"`
+	DsScheduler                    iScheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []iGemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+
+func (t *TechProfileMgr) SetKVClient() *model.Backend {
+	addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort)
+	kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout)
+	if err != nil {
+		log.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort,
+				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
+				"error": err.Error(),
+			})
+		return nil
+	}
+	return &model.Backend{
+		Client:     kvClient,
+		StoreType:  t.config.KVStoreType,
+		Host:       t.config.KVStoreHost,
+		Port:       t.config.KVStorePort,
+		Timeout:    t.config.KVStoreTimeout,
+		PathPrefix: t.config.TPKVPathPrefix}
+
+	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+	            issue between kv store and backend , core is not calling NewBackend directly
+		   kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
+										t.config.KVStoreTimeout,  kvStoreTechProfilePathPrefix)
+	*/
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+
+	log.Infow("kv-store-type", log.Fields{"store": storeType})
+	switch storeType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func NewTechProfile(resourceMgr iPonResourceMgr) (*TechProfileMgr, error) {
+	var techprofileObj TechProfileMgr
+	log.Debug("Initializing techprofile Manager")
+	techprofileObj.config = NewTechProfileFlags()
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
+	if techprofileObj.config.KVBackend == nil {
+		log.Error("Failed to initialize KV backend\n")
+		return nil, errors.New("KV backend init failed")
+	}
+	techprofileObj.resourceMgr = resourceMgr
+	log.Debug("Initializing techprofile object instance success")
+	return &techprofileObj, nil
+}
+
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+	return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
+}
+
+func (t *TechProfileMgr) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error) {
+	var KvTpIns TechProfile
+	var resPtr *TechProfile = &KvTpIns
+	var err error
+	/*path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)*/
+	log.Infow("Getting tech profile instance from KV store", log.Fields{"path": path})
+	kvresult, err := t.config.KVBackend.Get(path)
+	if err != nil {
+		log.Errorw("Error while fetching tech-profile instance  from KV backend", log.Fields{"key": path})
+		return nil, err
+	}
+	if kvresult == nil {
+		log.Infow("Tech profile does not exist in KV store", log.Fields{"key": path})
+		resPtr = nil
+	} else {
+		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+			if err = json.Unmarshal(value, resPtr); err != nil {
+				log.Errorw("Error while unmarshal KV result", log.Fields{"key": path, "value": value})
+			}
+		}
+	}
+	return resPtr, err
+}
+
+func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	tpInstanceJson, err := json.Marshal(*tpInstance)
+	if err == nil {
+		// Backend will convert JSON byte array into string format
+		log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		err = t.config.KVBackend.Put(path, tpInstanceJson)
+	} else {
+		log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+	}
+	return err
+}
+func (t *TechProfileMgr) getTPFromKVStore(techProfiletblID uint32) *DefaultTechProfile {
+	var kvtechprofile DefaultTechProfile
+	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
+	log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	kvresult, err := t.config.KVBackend.Get(key)
+	if err != nil {
+		log.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		return nil
+	}
+	if kvresult != nil {
+		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+			if err = json.Unmarshal(value, &kvtechprofile); err == nil {
+				log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+				return &kvtechprofile
+			}
+		}
+	}
+	return nil
+}
+func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile {
+	var tpInstance *TechProfile
+	log.Infow("Creating tech profile instance ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+	tp := t.getTPFromKVStore(techProfiletblID)
+	if tp != nil {
+		log.Infow("Creating tech profile instance with profile from KV store", log.Fields{"tpid": techProfiletblID})
+	} else {
+		tp = t.getDefaultTechProfile()
+		log.Infow("Creating tech profile instance with default values", log.Fields{"tpid": techProfiletblID})
+	}
+	tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, t.config.DefaultNumTconts)
+	if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil {
+		log.Errorw("Error in adding tech profile instance to KV ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+		return nil
+	}
+	log.Infow("Added tech profile instance to KV store successfully ",
+		log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
+	return tpInstance
+}
+
+func (t *TechProfileMgr) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error {
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	return t.config.KVBackend.Delete(path)
+}
+
+func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, numOfTconts uint32) *TechProfile {
+
+	var usGemPortAttributeList []iGemPortAttribute
+	var dsGemPortAttributeList []iGemPortAttribute
+	var tcontIDs []uint32
+	var gemPorts []uint32
+	var err error
+
+	log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numOfTconts": numOfTconts, "numGem": tp.NumGemPorts})
+	if numOfTconts > 1 {
+		log.Errorw("Multiple Tconts not supported currently", log.Fields{"uniPortName": uniPortName, "intfId": intfId})
+		return nil
+	}
+	if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), numOfTconts); err != nil {
+		log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
+		return nil
+	}
+	fmt.Println("Num GEM ports in TP:", tp.NumGemPorts)
+	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
+		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		return nil
+	}
+	log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			iGemPortAttribute{GemportID: gemPorts[index],
+				GemAttribute: tp.UpstreamGemPortAttributeList[index]})
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			iGemPortAttribute{GemportID: gemPorts[index],
+				GemAttribute: tp.DownstreamGemPortAttributeList[index]})
+	}
+	return &TechProfile{
+		SubscriberIdentifier: uniPortName,
+		Name:                 tp.Name,
+		ProfileType:          tp.ProfileType,
+		Version:              tp.Version,
+		NumGemPorts:          tp.NumGemPorts,
+		NumTconts:            numOfTconts,
+		InstanceCtrl:         tp.InstanceCtrl,
+		UsScheduler: iScheduler{
+			AllocID:   tcontIDs[0],
+			Scheduler: tp.UsScheduler},
+		DsScheduler: iScheduler{
+			AllocID:   tcontIDs[0],
+			Scheduler: tp.DsScheduler},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
+
+	var usGemPortAttributeList []GemPortAttribute
+	var dsGemPortAttributeList []GemPortAttribute
+
+	for _, pbit := range t.config.DefaultPbits {
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			GemPortAttribute{
+				MaxQueueSize:     defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+				PriorityQueue:    defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
+				DiscardConfig: DiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability}})
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			GemPortAttribute{
+				MaxQueueSize:     defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+				PriorityQueue:    defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
+				DiscardConfig: DiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability}})
+	}
+	return &DefaultTechProfile{
+		Name:        t.config.DefaultTPName,
+		ProfileType: t.resourceMgr.GetTechnology(),
+		Version:     t.config.TPVersion,
+		NumGemPorts: uint32(len(usGemPortAttributeList)),
+		InstanceCtrl: InstanceControl{
+			Onu:               defaultOnuInstance,
+			Uni:               defaultUniInstance,
+			MaxGemPayloadSize: defaultGemPayloadSize},
+		UsScheduler: Scheduler{
+			Direction:    Direction_name[Direction_UPSTREAM],
+			AdditionalBw: AdditionalBW_name[defaultAddtionalBw],
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+		DsScheduler: Scheduler{
+			Direction:    Direction_name[Direction_DOWNSTREAM],
+			AdditionalBw: AdditionalBW_name[defaultAddtionalBw],
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+	var result int32 = -1
+
+	if paramType == "direction" {
+		for key, val := range openolt_pb.Direction_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else if paramType == "discard_policy" {
+		for key, val := range openolt_pb.DiscardPolicy_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else if paramType == "sched_policy" {
+		for key, val := range openolt_pb.SchedulingPolicy_value {
+			if key == paramKey {
+				log.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+				result = val
+			}
+		}
+	} else if paramType == "additional_bw" {
+		for key, val := range openolt_pb.AdditionalBW_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else {
+		log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		return -1
+	}
+	log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+	return result
+}
+
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Scheduler.Direction))
+	if dir == -1 {
+		log.Fatal("Error in getting Proto for direction for upstream scheduler")
+		return nil
+	}
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.Scheduler.AdditionalBw))
+	if bw == -1 {
+		log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
+		return nil
+	}
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.Scheduler.QSchedPolicy))
+	if policy == -1 {
+		log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
+		return nil
+	}
+	return &openolt_pb.Scheduler{
+		Direction:    dir,
+		AdditionalBw: bw,
+		Priority:     tpInstance.UsScheduler.Scheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Scheduler.Weight,
+		SchedPolicy:  policy}
+}
+
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
+
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Scheduler.Direction))
+	if dir == -1 {
+		log.Fatal("Error in getting Proto for direction for downstream scheduler")
+		return nil
+	}
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.Scheduler.AdditionalBw))
+	if bw == -1 {
+		log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
+		return nil
+	}
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.Scheduler.QSchedPolicy))
+	if policy == -1 {
+		log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
+		return nil
+	}
+
+	return &openolt_pb.Scheduler{
+		Direction:    dir,
+		AdditionalBw: bw,
+		Priority:     tpInstance.DsScheduler.Scheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Scheduler.Weight,
+		SchedPolicy:  policy}
+}
+
+func (t *TechProfileMgr) GetTconts(tpInstance *TechProfile, usSched *openolt_pb.Scheduler, dsSched *openolt_pb.Scheduler) []*openolt_pb.Tcont {
+	if usSched == nil {
+		if usSched = t.GetUsScheduler(tpInstance); usSched == nil {
+			log.Fatal("Error in getting upstream scheduler from techprofile")
+			return nil
+		}
+	}
+	if dsSched == nil {
+		if dsSched = t.GetDsScheduler(tpInstance); dsSched == nil {
+			log.Fatal("Error in getting downstream scheduler from techprofile")
+			return nil
+		}
+	}
+	tconts := []*openolt_pb.Tcont{}
+	// TODO: Fix me , UPSTREAM direction is not proper
+	// upstream scheduler
+	tcont_us := &openolt_pb.Tcont{
+		Direction: usSched.Direction,
+		AllocId:   tpInstance.UsScheduler.AllocID,
+		Scheduler: usSched} /*TrafficShapingInfo: ? */
+	tconts = append(tconts, tcont_us)
+
+	// downstream scheduler
+	tcont_ds := &openolt_pb.Tcont{
+		Direction: dsSched.Direction,
+		AllocId:   tpInstance.DsScheduler.AllocID,
+		Scheduler: dsSched}
+
+	tconts = append(tconts, tcont_ds)
+	return tconts
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/backend.go b/vendor/github.com/opencord/voltha-go/db/model/backend.go
new file mode 100644
index 0000000..981a1d5
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/backend.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
+	"strconv"
+	"sync"
+	"time"
+)
+
+//TODO: missing cache stuff
+//TODO: missing retry stuff
+//TODO: missing proper logging
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	sync.RWMutex
+	Client     kvstore.Client
+	StoreType  string
+	Host       string
+	Port       int
+	Timeout    int
+	PathPrefix string
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(storeType string, host string, port int, timeout int, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:  storeType,
+		Host:       host,
+		Port:       port,
+		Timeout:    timeout,
+		PathPrefix: pathPrefix,
+	}
+
+	address := host + ":" + strconv.Itoa(port)
+	if b.Client, err = b.newClient(address, timeout); err != nil {
+		log.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "host": host, "port": port,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(address string, timeout int) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(key string, lock ...bool) (map[string]*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath, "lock": lock})
+
+	return b.Client.List(formattedPath, b.Timeout, lock...)
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(key string, lock ...bool) (*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath, "lock": lock})
+
+	start := time.Now()
+	err, pair := b.Client.Get(formattedPath, b.Timeout, lock...)
+	stop := time.Now()
+
+	GetProfiling().AddToDatabaseRetrieveTime(stop.Sub(start).Seconds())
+
+	return err, pair
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(key string, value interface{}, lock ...bool) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath, "lock": lock})
+
+	return b.Client.Put(formattedPath, value, b.Timeout, lock...)
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(key string, lock ...bool) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath, "lock": lock})
+
+	return b.Client.Delete(formattedPath, b.Timeout, lock...)
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(key string) chan *kvstore.Event {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(formattedPath)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	log.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/branch.go b/vendor/github.com/opencord/voltha-go/db/model/branch.go
new file mode 100644
index 0000000..40c66ad
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/branch.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"sync"
+)
+
+// TODO: implement weak references or something equivalent
+// TODO: missing proper logging
+
+// Branch structure is used to classify a collection of transaction based revisions
+type Branch struct {
+	sync.RWMutex
+	Node      *node
+	Txid      string
+	Origin    Revision
+	Revisions map[string]Revision
+	Latest    Revision
+}
+
+// NewBranch creates a new instance of the Branch structure
+func NewBranch(node *node, txid string, origin Revision, autoPrune bool) *Branch {
+	b := &Branch{}
+	b.Node = node
+	b.Txid = txid
+	b.Origin = origin
+	b.Revisions = make(map[string]Revision)
+	b.Latest = origin
+
+	return b
+}
+
+// SetLatest assigns the latest revision for this branch
+func (b *Branch) SetLatest(latest Revision) {
+	b.Lock()
+	defer b.Unlock()
+
+	if b.Latest != nil {
+		log.Debugf("Switching latest from <%s> to <%s>", b.Latest.GetHash(), latest.GetHash())
+	} else {
+		log.Debugf("Switching latest from <NIL> to <%s>", latest.GetHash())
+	}
+
+
+	b.Latest = latest
+}
+
+// GetLatest retrieves the latest revision of the branch
+func (b *Branch) GetLatest() Revision {
+	b.Lock()
+	defer b.Unlock()
+
+	return b.Latest
+}
+
+// GetOrigin retrieves the original revision of the branch
+func (b *Branch) GetOrigin() Revision {
+	b.Lock()
+	defer b.Unlock()
+
+	return b.Origin
+}
+
+// AddRevision inserts a new revision to the branch
+func (b *Branch) AddRevision(revision Revision) {
+	if revision != nil && b.GetRevision(revision.GetHash()) == nil {
+		b.SetRevision(revision.GetHash(), revision)
+	}
+}
+
+// GetRevision pulls a revision entry at the specified hash
+func (b *Branch) GetRevision(hash string) Revision {
+	b.Lock()
+	defer b.Unlock()
+
+	if revision, ok := b.Revisions[hash]; ok {
+		return revision
+	}
+
+	return nil
+}
+
+// SetRevision updates a revision entry at the specified hash
+func (b *Branch) SetRevision(hash string, revision Revision) {
+	b.Lock()
+	defer b.Unlock()
+
+	b.Revisions[hash] = revision
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/callback_type.go b/vendor/github.com/opencord/voltha-go/db/model/callback_type.go
new file mode 100644
index 0000000..b530dee
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/callback_type.go
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+// CallbackType is an enumerated value to express when a callback should be executed
+type CallbackType uint8
+
+// Enumerated list of callback types
+const (
+	GET CallbackType = iota
+	PRE_UPDATE
+	POST_UPDATE
+	PRE_ADD
+	POST_ADD
+	PRE_REMOVE
+	POST_REMOVE
+	POST_LISTCHANGE
+)
+
+var enumCallbackTypes = []string{
+	"GET",
+	"PRE_UPDATE",
+	"POST_UPDATE",
+	"PRE_ADD",
+	"POST_ADD",
+	"PRE_REMOVE",
+	"POST_REMOVE",
+	"POST_LISTCHANGE",
+}
+
+func (t CallbackType) String() string {
+	return enumCallbackTypes[t]
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/child_type.go b/vendor/github.com/opencord/voltha-go/db/model/child_type.go
new file mode 100644
index 0000000..da6f688
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/child_type.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	desc "github.com/golang/protobuf/descriptor"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/common"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+type singletonChildTypeCache struct {
+	Cache map[interface{}]map[string]*ChildType
+}
+
+var instanceChildTypeCache *singletonChildTypeCache
+var onceChildTypeCache sync.Once
+
+func getChildTypeCache() *singletonChildTypeCache {
+	onceChildTypeCache.Do(func() {
+		instanceChildTypeCache = &singletonChildTypeCache{}
+	})
+	return instanceChildTypeCache
+}
+
+// ChildType structure contains construct details of an object
+type ChildType struct {
+	ClassModule string
+	ClassType   reflect.Type
+	IsContainer bool
+	Key         string
+	KeyFromStr  func(s string) interface{}
+}
+
+// ChildrenFields retrieves list of child objects associated to a given interface
+func ChildrenFields(cls interface{}) map[string]*ChildType {
+	if cls == nil {
+		return nil
+	}
+	var names map[string]*ChildType
+	var namesExist bool
+
+	if getChildTypeCache().Cache == nil {
+		getChildTypeCache().Cache = make(map[interface{}]map[string]*ChildType)
+	}
+
+	msgType := reflect.TypeOf(cls)
+	inst := getChildTypeCache()
+
+	if names, namesExist = inst.Cache[msgType.String()]; !namesExist {
+		names = make(map[string]*ChildType)
+
+		_, md := desc.ForMessage(cls.(desc.Message))
+
+		// TODO: Do we need to validate MD for nil, panic or exception?
+		for _, field := range md.Field {
+			if options := field.GetOptions(); options != nil {
+				if proto.HasExtension(options, common.E_ChildNode) {
+					isContainer := *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+					meta, _ := proto.GetExtension(options, common.E_ChildNode)
+
+					var keyFromStr func(string) interface{}
+					var ct ChildType
+
+					parentType := FindOwnerType(reflect.ValueOf(cls), field.GetName(), 0, false)
+					if meta.(*common.ChildNode).GetKey() != "" {
+						keyType := FindKeyOwner(reflect.New(parentType).Elem().Interface(), meta.(*common.ChildNode).GetKey(), 0)
+
+						switch keyType.(reflect.Type).Name() {
+						case "string":
+							keyFromStr = func(s string) interface{} {
+								return s
+							}
+						case "int32":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return int32(i)
+							}
+						case "int64":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return int64(i)
+							}
+						case "uint32":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return uint32(i)
+							}
+						case "uint64":
+							keyFromStr = func(s string) interface{} {
+								i, _ := strconv.Atoi(s)
+								return uint64(i)
+							}
+						default:
+							log.Errorf("Key type not implemented - type: %s\n", keyType.(reflect.Type))
+						}
+					}
+
+					ct = ChildType{
+						ClassModule: parentType.String(),
+						ClassType:   parentType,
+						IsContainer: isContainer,
+						Key:         meta.(*common.ChildNode).GetKey(),
+						KeyFromStr:  keyFromStr,
+					}
+
+					names[field.GetName()] = &ct
+				}
+			}
+		}
+
+		getChildTypeCache().Cache[msgType.String()] = names
+	} else {
+		log.Debugf("Cache entry for %s: %+v", msgType.String(), inst.Cache[msgType.String()])
+	}
+
+	return names
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/data_revision.go b/vendor/github.com/opencord/voltha-go/db/model/data_revision.go
new file mode 100644
index 0000000..0763d09
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/data_revision.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/json"
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+)
+
+// DataRevision stores the data associated to a revision along with its calculated checksum hash value
+type DataRevision struct {
+	Data interface{}
+	Hash string
+}
+
+// NewDataRevision creates a new instance of a DataRevision structure
+func NewDataRevision(root *root, data interface{}) *DataRevision {
+	dr := DataRevision{}
+	dr.Data = data
+	dr.Hash = dr.hashData(root, data)
+
+	return &dr
+}
+
+func (dr *DataRevision) hashData(root *root, data interface{}) string {
+	var buffer bytes.Buffer
+
+	if IsProtoMessage(data) {
+		if pbdata, err := proto.Marshal(data.(proto.Message)); err != nil {
+			log.Debugf("problem to marshal protobuf data --> err: %s", err.Error())
+		} else {
+			buffer.Write(pbdata)
+			// To ensure uniqueness in case data is nil, also include data type
+			buffer.Write([]byte(reflect.TypeOf(data).String()))
+		}
+
+	} else if reflect.ValueOf(data).IsValid() {
+		dataObj := reflect.New(reflect.TypeOf(data).Elem())
+		if json, err := json.Marshal(dataObj.Interface()); err != nil {
+			log.Debugf("problem to marshal data --> err: %s", err.Error())
+		} else {
+			buffer.Write(json)
+		}
+	} else {
+		dataObj := reflect.New(reflect.TypeOf(data).Elem())
+		buffer.Write(dataObj.Bytes())
+	}
+
+	// Add the root pointer that owns the current data for extra uniqueness
+	rootPtr := fmt.Sprintf("%p", root)
+	buffer.Write([]byte(rootPtr))
+
+	return fmt.Sprintf("%x", md5.Sum(buffer.Bytes()))[:12]
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/event_bus.go b/vendor/github.com/opencord/voltha-go/db/model/event_bus.go
new file mode 100644
index 0000000..335d43f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/event_bus.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"encoding/json"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/voltha"
+)
+
+// EventBus contains the details required to communicate with the event bus mechanism
+type EventBus struct {
+	client *EventBusClient
+	topic  string
+}
+
+// ignoredCallbacks keeps a list of callbacks that should not be advertised on the event bus
+var (
+	ignoredCallbacks = map[CallbackType]struct{}{
+		PRE_ADD:         {},
+		GET:             {},
+		POST_LISTCHANGE: {},
+		PRE_REMOVE:      {},
+		PRE_UPDATE:      {},
+	}
+)
+
+// NewEventBus creates a new instance of the EventBus structure
+func NewEventBus() *EventBus {
+	bus := &EventBus{
+		client: NewEventBusClient(),
+		topic:  "model-change-events",
+	}
+	return bus
+}
+
+// Advertise will publish the provided information to the event bus
+func (bus *EventBus) Advertise(args ...interface{}) interface{} {
+	eventType := args[0].(CallbackType)
+	hash := args[1].(string)
+	data := args[2:]
+
+	if _, ok := ignoredCallbacks[eventType]; ok {
+		log.Debugf("ignoring event - type:%s, data:%+v", eventType, data)
+	}
+	var kind voltha.ConfigEventType_ConfigEventType
+	switch eventType {
+	case POST_ADD:
+		kind = voltha.ConfigEventType_add
+	case POST_REMOVE:
+		kind = voltha.ConfigEventType_remove
+	default:
+		kind = voltha.ConfigEventType_update
+	}
+
+	var msg []byte
+	var err error
+	if IsProtoMessage(data) {
+		if msg, err = proto.Marshal(data[0].(proto.Message)); err != nil {
+			log.Debugf("problem marshalling proto data: %+v, err:%s", data[0], err.Error())
+		}
+	} else if data[0] != nil {
+		if msg, err = json.Marshal(data[0]); err != nil {
+			log.Debugf("problem marshalling json data: %+v, err:%s", data[0], err.Error())
+		}
+	} else {
+		log.Debugf("no data to advertise : %+v", data[0])
+	}
+
+	event := voltha.ConfigEvent{
+		Type: kind,
+		Hash: hash,
+		Data: string(msg),
+	}
+
+	bus.client.Publish(bus.topic, event)
+
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/event_bus_client.go b/vendor/github.com/opencord/voltha-go/db/model/event_bus_client.go
new file mode 100644
index 0000000..c9c1314
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/event_bus_client.go
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/voltha"
+)
+
+// EventBusClient is an abstraction layer structure to communicate with an event bus mechanism
+type EventBusClient struct {
+}
+
+// NewEventBusClient creates a new EventBusClient instance
+func NewEventBusClient() *EventBusClient {
+	return &EventBusClient{}
+}
+
+// Publish sends a event to the bus
+func (ebc *EventBusClient) Publish(topic string, event voltha.ConfigEvent) {
+	log.Debugf("publishing event:%+v, topic:%s\n", event, topic)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/merge.go b/vendor/github.com/opencord/voltha-go/db/model/merge.go
new file mode 100644
index 0000000..c59dda4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/merge.go
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
+func revisionsAreEqual(a, b []Revision) bool {
+	// If one is nil, the other must also be nil.
+	if (a == nil) != (b == nil) {
+		return false
+	}
+
+	if len(a) != len(b) {
+		return false
+	}
+
+	for i := range a {
+		if a[i] != b[i] {
+			return false
+		}
+	}
+
+	return true
+}
+
+type changeAnalysis struct {
+	KeyMap1     map[string]int
+	KeyMap2     map[string]int
+	AddedKeys   map[string]struct{}
+	RemovedKeys map[string]struct{}
+	ChangedKeys map[string]struct{}
+}
+
+func newChangeAnalysis(lst1, lst2 []Revision, keyName string) *changeAnalysis {
+	changes := &changeAnalysis{}
+
+	changes.KeyMap1 = make(map[string]int)
+	changes.KeyMap2 = make(map[string]int)
+
+	changes.AddedKeys = make(map[string]struct{})
+	changes.RemovedKeys = make(map[string]struct{})
+	changes.ChangedKeys = make(map[string]struct{})
+
+	for i, rev := range lst1 {
+		_, v := GetAttributeValue(rev.GetData(), keyName, 0)
+		changes.KeyMap1[v.String()] = i
+	}
+	for i, rev := range lst2 {
+		_, v := GetAttributeValue(rev.GetData(), keyName, 0)
+		changes.KeyMap2[v.String()] = i
+	}
+	for v := range changes.KeyMap2 {
+		if _, ok := changes.KeyMap1[v]; !ok {
+			changes.AddedKeys[v] = struct{}{}
+		}
+	}
+	for v := range changes.KeyMap1 {
+		if _, ok := changes.KeyMap2[v]; !ok {
+			changes.RemovedKeys[v] = struct{}{}
+		}
+	}
+	for v := range changes.KeyMap1 {
+		if _, ok := changes.KeyMap2[v]; ok && lst1[changes.KeyMap1[v]].GetHash() != lst2[changes.KeyMap2[v]].GetHash() {
+			changes.ChangedKeys[v] = struct{}{}
+		}
+	}
+
+	return changes
+}
+
+// Merge3Way takes care of combining the revision contents of the same data set
+func Merge3Way(
+	forkRev, srcRev, dstRev Revision,
+	mergeChildFunc func(Revision) Revision,
+	dryRun bool) (rev Revision, changes []ChangeTuple) {
+
+	log.Debugw("3-way-merge-request", log.Fields{"dryRun": dryRun})
+
+	var configChanged bool
+	var revsToDiscard []Revision
+
+	if dstRev.GetConfig() == forkRev.GetConfig() {
+		configChanged = dstRev.GetConfig() != srcRev.GetConfig()
+	} else {
+		if dstRev.GetConfig().Hash != srcRev.GetConfig().Hash {
+			log.Error("config-collision")
+		}
+		configChanged = true
+	}
+
+	//newChildren := reflect.ValueOf(dstRev.GetAllChildren()).Interface().(map[string][]Revision)
+	newChildren := make(map[string][]Revision)
+	for entryName, childrenEntry := range dstRev.GetAllChildren() {
+		//newRev.Children[entryName] = append(newRev.Children[entryName], childrenEntry...)
+		newChildren[entryName] = make([]Revision, len(childrenEntry))
+		copy(newChildren[entryName], childrenEntry)
+	}
+
+	childrenFields := ChildrenFields(forkRev.GetData())
+
+	for fieldName, field := range childrenFields {
+		forkList := forkRev.GetChildren(fieldName)
+		srcList := srcRev.GetChildren(fieldName)
+		dstList := dstRev.GetChildren(fieldName)
+
+		if revisionsAreEqual(dstList, srcList) {
+			for _, rev := range srcList {
+				mergeChildFunc(rev)
+			}
+			continue
+		}
+
+		if field.Key == "" {
+			if revisionsAreEqual(dstList, forkList) {
+				if !revisionsAreEqual(srcList, forkList) {
+					log.Error("we should not be here")
+				} else {
+					for _, rev := range srcList {
+						newChildren[fieldName] = append(newChildren[fieldName], mergeChildFunc(rev))
+					}
+					if field.IsContainer {
+						changes = append(
+							changes, ChangeTuple{POST_LISTCHANGE,
+								NewOperationContext("", nil, fieldName, ""), nil},
+						)
+					}
+				}
+			} else {
+				if !revisionsAreEqual(srcList, forkList) {
+					log.Error("cannot merge - single child node or un-keyed children list has changed")
+				}
+			}
+		} else {
+			if revisionsAreEqual(dstList, forkList) {
+				src := newChangeAnalysis(forkList, srcList, field.Key)
+
+				newList := make([]Revision, len(srcList))
+				copy(newList, srcList)
+
+				for key := range src.AddedKeys {
+					idx := src.KeyMap2[key]
+					newRev := mergeChildFunc(newList[idx])
+
+					// FIXME: newRev may come back as nil... exclude those entries for now
+					if newRev != nil {
+						newList[idx] = newRev
+						changes = append(changes, ChangeTuple{POST_ADD, newList[idx].GetData(), newRev.GetData()})
+					}
+				}
+				for key := range src.RemovedKeys {
+					oldRev := forkList[src.KeyMap1[key]]
+					revsToDiscard = append(revsToDiscard, oldRev)
+					changes = append(changes, ChangeTuple{POST_REMOVE, oldRev.GetData(), nil})
+				}
+				for key := range src.ChangedKeys {
+					idx := src.KeyMap2[key]
+					newRev := mergeChildFunc(newList[idx])
+
+					// FIXME: newRev may come back as nil... exclude those entries for now
+					if newRev != nil {
+						newList[idx] = newRev
+					}
+				}
+
+				if !dryRun {
+					newChildren[fieldName] = newList
+				}
+			} else {
+				src := newChangeAnalysis(forkList, srcList, field.Key)
+				dst := newChangeAnalysis(forkList, dstList, field.Key)
+
+				newList := make([]Revision, len(dstList))
+				copy(newList, dstList)
+
+				for key := range src.AddedKeys {
+					if _, exists := dst.AddedKeys[key]; exists {
+						childDstRev := dstList[dst.KeyMap2[key]]
+						childSrcRev := srcList[src.KeyMap2[key]]
+						if childDstRev.GetHash() == childSrcRev.GetHash() {
+							mergeChildFunc(childDstRev)
+						} else {
+							log.Error("conflict error - revision has been added is different")
+						}
+					} else {
+						newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
+						newList = append(newList, newRev)
+						changes = append(changes, ChangeTuple{POST_ADD, srcList[src.KeyMap2[key]], newRev.GetData()})
+					}
+				}
+				for key := range src.ChangedKeys {
+					if _, removed := dst.RemovedKeys[key]; removed {
+						log.Error("conflict error - revision has been removed")
+					} else if _, changed := dst.ChangedKeys[key]; changed {
+						childDstRev := dstList[dst.KeyMap2[key]]
+						childSrcRev := srcList[src.KeyMap2[key]]
+						if childDstRev.GetHash() == childSrcRev.GetHash() {
+							mergeChildFunc(childSrcRev)
+						} else if childDstRev.GetConfig().Hash != childSrcRev.GetConfig().Hash {
+							log.Error("conflict error - revision has been changed and is different")
+						} else {
+							newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
+							newList[dst.KeyMap2[key]] = newRev
+						}
+					} else {
+						newRev := mergeChildFunc(srcList[src.KeyMap2[key]])
+						newList[dst.KeyMap2[key]] = newRev
+					}
+				}
+
+				// TODO: how do i sort this map in reverse order?
+				for key := range src.RemovedKeys {
+					if _, changed := dst.ChangedKeys[key]; changed {
+						log.Error("conflict error - revision has changed")
+					}
+					if _, removed := dst.RemovedKeys[key]; !removed {
+						dstIdx := dst.KeyMap2[key]
+						oldRev := newList[dstIdx]
+						revsToDiscard = append(revsToDiscard, oldRev)
+
+						copy(newList[dstIdx:], newList[dstIdx+1:])
+						newList[len(newList)-1] = nil
+						newList = newList[:len(newList)-1]
+
+						changes = append(changes, ChangeTuple{POST_REMOVE, oldRev.GetData(), nil})
+					}
+				}
+
+				if !dryRun {
+					newChildren[fieldName] = newList
+				}
+			}
+		}
+	}
+
+	if !dryRun && len(newChildren) > 0 {
+		if configChanged {
+			rev = srcRev
+		} else {
+			rev = dstRev
+		}
+
+		for _, discarded := range revsToDiscard {
+			discarded.Drop("", true)
+		}
+
+		// FIXME: Do not discard the latest value for now
+		//dstRev.GetBranch().GetLatest().Drop("", configChanged)
+		rev = rev.UpdateAllChildren(newChildren, dstRev.GetBranch())
+
+		if configChanged {
+			changes = append(changes, ChangeTuple{POST_UPDATE, dstRev.GetBranch().GetLatest().GetData(), rev.GetData()})
+		}
+		return rev, changes
+	}
+
+	return nil, nil
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/model.go b/vendor/github.com/opencord/voltha-go/db/model/model.go
new file mode 100644
index 0000000..18ff905
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/model.go
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
+func init() {
+	log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
+	defer log.CleanUp()
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/node.go b/vendor/github.com/opencord/voltha-go/db/model/node.go
new file mode 100644
index 0000000..2a9309c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/node.go
@@ -0,0 +1,992 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+// TODO: proper error handling
+// TODO: proper logging
+
+import (
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"runtime/debug"
+	"strings"
+	"sync"
+)
+
+// When a branch has no transaction id, everything gets stored in NONE
+const (
+	NONE string = "none"
+)
+
+// Node interface is an abstraction of the node data structure
+type Node interface {
+	MakeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple)
+
+	// CRUD functions
+	Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision
+	Get(path string, hash string, depth int, deep bool, txid string) interface{}
+	Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision
+	Remove(path string, txid string, makeBranch MakeBranchFunction) Revision
+
+	MakeBranch(txid string) *Branch
+	DeleteBranch(txid string)
+	MergeBranch(txid string, dryRun bool) (Revision, error)
+
+	MakeTxBranch() string
+	DeleteTxBranch(txid string)
+	FoldTxBranch(txid string)
+
+	CreateProxy(path string, exclusive bool) *Proxy
+	GetProxy() *Proxy
+}
+
+type node struct {
+	sync.RWMutex
+	Root      *root
+	Type      interface{}
+	Branches  map[string]*Branch
+	Tags      map[string]Revision
+	Proxy     *Proxy
+	EventBus  *EventBus
+	AutoPrune bool
+}
+
+// ChangeTuple holds details of modifications made to a revision
+type ChangeTuple struct {
+	Type         CallbackType
+	PreviousData interface{}
+	LatestData   interface{}
+}
+
+// NewNode creates a new instance of the node data structure
+func NewNode(root *root, initialData interface{}, autoPrune bool, txid string) *node {
+	n := &node{}
+
+	n.Root = root
+	n.Branches = make(map[string]*Branch)
+	n.Tags = make(map[string]Revision)
+	n.Proxy = nil
+	n.EventBus = nil
+	n.AutoPrune = autoPrune
+
+	if IsProtoMessage(initialData) {
+		n.Type = reflect.ValueOf(initialData).Interface()
+		dataCopy := proto.Clone(initialData.(proto.Message))
+		n.initialize(dataCopy, txid)
+	} else if reflect.ValueOf(initialData).IsValid() {
+		// FIXME: this block does not reflect the original implementation
+		// it should be checking if the provided initial_data is already a type!??!
+		// it should be checked before IsProtoMessage
+		n.Type = reflect.ValueOf(initialData).Interface()
+	} else {
+		// not implemented error
+		log.Errorf("cannot process initial data - %+v", initialData)
+	}
+
+	return n
+}
+
+// MakeNode creates a new node in the tree
+func (n *node) MakeNode(data interface{}, txid string) *node {
+	return NewNode(n.Root, data, true, txid)
+}
+
+// MakeRevision create a new revision of the node in the tree
+func (n *node) MakeRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	return n.GetRoot().MakeRevision(branch, data, children)
+}
+
+// makeLatest will mark the revision of a node as being the latest
+func (n *node) makeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple) {
+	n.Lock()
+	defer n.Unlock()
+
+	branch.AddRevision(revision)
+
+	if branch.GetLatest() == nil || revision.GetHash() != branch.GetLatest().GetHash() {
+		branch.SetLatest(revision)
+	}
+
+	if changeAnnouncement != nil && branch.Txid == "" {
+		if n.Proxy != nil {
+			for _, change := range changeAnnouncement {
+				//log.Debugw("invoking callback",
+				//	log.Fields{
+				//		"callbacks":    n.Proxy.getCallbacks(change.Type),
+				//		"type":         change.Type,
+				//		"previousData": change.PreviousData,
+				//		"latestData":   change.LatestData,
+				//	})
+				n.Root.AddCallback(
+					n.Proxy.InvokeCallbacks,
+					change.Type,
+					true,
+					change.PreviousData,
+					change.LatestData)
+			}
+		}
+
+		//for _, change := range changeAnnouncement {
+		//log.Debugf("sending notification - changeType: %+v, previous:%+v, latest: %+v",
+		//	change.Type,
+		//	change.PreviousData,
+		//	change.LatestData)
+		//n.Root.AddNotificationCallback(
+		//	n.makeEventBus().Advertise,
+		//	change.Type,
+		//	revision.GetHash(),
+		//	change.PreviousData,
+		//	change.LatestData)
+		//}
+	}
+}
+
+// Latest returns the latest revision of node with or without the transaction id
+func (n *node) Latest(txid ...string) Revision {
+	var branch *Branch
+
+	if len(txid) > 0 && txid[0] != "" {
+		if branch = n.GetBranch(txid[0]); branch != nil {
+			return branch.GetLatest()
+		}
+	} else if branch = n.GetBranch(NONE); branch != nil {
+		return branch.GetLatest()
+	}
+	return nil
+}
+
+// initialize prepares the content of a node along with its possible ramifications
+func (n *node) initialize(data interface{}, txid string) {
+	n.Lock()
+	children := make(map[string][]Revision)
+	for fieldName, field := range ChildrenFields(n.Type) {
+		_, fieldValue := GetAttributeValue(data, fieldName, 0)
+
+		if fieldValue.IsValid() {
+			if field.IsContainer {
+				if field.Key != "" {
+					for i := 0; i < fieldValue.Len(); i++ {
+						v := fieldValue.Index(i)
+
+						if rev := n.MakeNode(v.Interface(), txid).Latest(txid); rev != nil {
+							children[fieldName] = append(children[fieldName], rev)
+						}
+
+						// TODO: The following logic was ported from v1.0.  Need to verify if it is required
+						//var keysSeen []string
+						//_, key := GetAttributeValue(v.Interface(), field.Key, 0)
+						//for _, k := range keysSeen {
+						//	if k == key.String() {
+						//		//log.Errorf("duplicate key - %s", k)
+						//	}
+						//}
+						//keysSeen = append(keysSeen, key.String())
+					}
+
+				} else {
+					for i := 0; i < fieldValue.Len(); i++ {
+						v := fieldValue.Index(i)
+						if newNodeRev := n.MakeNode(v.Interface(), txid).Latest(); newNodeRev != nil {
+							children[fieldName] = append(children[fieldName], newNodeRev)
+						}
+					}
+				}
+			} else {
+				if newNodeRev := n.MakeNode(fieldValue.Interface(), txid).Latest(); newNodeRev != nil {
+					children[fieldName] = append(children[fieldName], newNodeRev)
+				}
+			}
+		} else {
+			log.Errorf("field is invalid - %+v", fieldValue)
+		}
+	}
+	n.Unlock()
+
+	branch := NewBranch(n, "", nil, n.AutoPrune)
+	rev := n.MakeRevision(branch, data, children)
+	n.makeLatest(branch, rev, nil)
+
+	if txid == "" {
+		n.SetBranch(NONE, branch)
+	} else {
+		n.SetBranch(txid, branch)
+	}
+}
+
+// findRevByKey retrieves a specific revision from a node tree
+func (n *node) findRevByKey(revs []Revision, keyName string, value interface{}) (int, Revision) {
+	n.Lock()
+	defer n.Unlock()
+
+	for i, rev := range revs {
+		dataValue := reflect.ValueOf(rev.GetData())
+		dataStruct := GetAttributeStructure(rev.GetData(), keyName, 0)
+
+		fieldValue := dataValue.Elem().FieldByName(dataStruct.Name)
+
+		a := fmt.Sprintf("%s", fieldValue.Interface())
+		b := fmt.Sprintf("%s", value)
+		if a == b {
+			return i, revs[i]
+		}
+	}
+
+	return -1, nil
+}
+
+// Get retrieves the data from a node tree that resides at the specified path
+func (n *node) List(path string, hash string, depth int, deep bool, txid string) interface{} {
+	log.Debugw("node-list-request", log.Fields{"path": path, "hash": hash, "depth": depth, "deep": deep, "txid": txid})
+	if deep {
+		depth = -1
+	}
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+
+	var branch *Branch
+	var rev Revision
+
+	if branch = n.GetBranch(txid); txid == "" || branch == nil {
+		branch = n.GetBranch(NONE)
+	}
+
+	if hash != "" {
+		rev = branch.GetRevision(hash)
+	} else {
+		rev = branch.GetLatest()
+	}
+
+	var result interface{}
+	var prList []interface{}
+	if pr := rev.LoadFromPersistence(path, txid); pr != nil {
+		for _, revEntry := range pr {
+			prList = append(prList, revEntry.GetData())
+		}
+		result = prList
+	}
+
+	return result
+}
+
+// Get retrieves the data from a node tree that resides at the specified path
+func (n *node) Get(path string, hash string, depth int, reconcile bool, txid string) interface{} {
+	log.Debugw("node-get-request", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile,
+		"txid": txid})
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+
+	var branch *Branch
+	var rev Revision
+
+	if branch = n.GetBranch(txid); txid == "" || branch == nil {
+		branch = n.GetBranch(NONE)
+	}
+
+	if hash != "" {
+		rev = branch.GetRevision(hash)
+	} else {
+		rev = branch.GetLatest()
+	}
+
+	var result interface{}
+
+	// If there is not request to reconcile, try to get it from memory
+	if !reconcile {
+		if result = n.getPath(rev.GetBranch().GetLatest(), path, depth);
+			result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+			return result
+		}
+	}
+
+	// If we got to this point, we are either trying to reconcile with the db or
+	// or we simply failed at getting information from memory
+	if n.Root.KvStore != nil {
+		var prList []interface{}
+		if pr := rev.LoadFromPersistence(path, txid); pr != nil && len(pr) > 0 {
+			// Did we receive a single or multiple revisions?
+			if len(pr) > 1 {
+				for _, revEntry := range pr {
+					prList = append(prList, revEntry.GetData())
+				}
+				result = prList
+			} else {
+				result = pr[0].GetData()
+			}
+		}
+	}
+
+	return result
+}
+
+// getPath traverses the specified path and retrieves the data associated to it
+func (n *node) getPath(rev Revision, path string, depth int) interface{} {
+	if path == "" {
+		return n.getData(rev, depth)
+	}
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	names := ChildrenFields(n.Type)
+	field := names[name]
+
+	if field != nil && field.IsContainer {
+		children := make([]Revision, len(rev.GetChildren(name)))
+		copy(children, rev.GetChildren(name))
+
+		if field.Key != "" {
+			if path != "" {
+				partition = strings.SplitN(path, "/", 2)
+				key := partition[0]
+				path = ""
+				keyValue := field.KeyFromStr(key)
+				if _, childRev := n.findRevByKey(children, field.Key, keyValue); childRev == nil {
+					return nil
+				} else {
+					childNode := childRev.GetNode()
+					return childNode.getPath(childRev, path, depth)
+				}
+			} else {
+				var response []interface{}
+				for _, childRev := range children {
+					childNode := childRev.GetNode()
+					value := childNode.getData(childRev, depth)
+					response = append(response, value)
+				}
+				return response
+			}
+		} else {
+			var response []interface{}
+			if path != "" {
+				// TODO: raise error
+				return response
+			}
+			for _, childRev := range children {
+				childNode := childRev.GetNode()
+				value := childNode.getData(childRev, depth)
+				response = append(response, value)
+			}
+			return response
+		}
+	}
+
+	childRev := rev.GetChildren(name)[0]
+	childNode := childRev.GetNode()
+	return childNode.getPath(childRev, path, depth)
+}
+
+// getData retrieves the data from a node revision
+func (n *node) getData(rev Revision, depth int) interface{} {
+	msg := rev.GetBranch().GetLatest().Get(depth)
+	var modifiedMsg interface{}
+
+	if n.GetProxy() != nil {
+		log.Debugw("invoking-get-callbacks", log.Fields{"data": msg})
+		if modifiedMsg = n.GetProxy().InvokeCallbacks(GET, false, msg); modifiedMsg != nil {
+			msg = modifiedMsg
+		}
+
+	}
+
+	return msg
+}
+
+// Update changes the content of a node at the specified path with the provided data
+func (n *node) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+	log.Debugw("node-update-request", log.Fields{"path": path, "strict": strict, "txid": txid, "makeBranch": makeBranch})
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+
+	var branch *Branch
+	if txid == "" {
+		branch = n.GetBranch(NONE)
+	} else if branch = n.GetBranch(txid); branch == nil {
+		branch = makeBranch(n)
+	}
+
+	if branch.GetLatest() != nil {
+		log.Debugf("Branch data : %+v, Passed data: %+v", branch.GetLatest().GetData(), data)
+	}
+	if path == "" {
+		return n.doUpdate(branch, data, strict)
+	}
+
+	rev := branch.GetLatest()
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+	var children []Revision
+
+	if field == nil {
+		return n.doUpdate(branch, data, strict)
+	}
+
+	if field.IsContainer {
+		if path == "" {
+			log.Errorf("cannot update a list")
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+			keyValue := field.KeyFromStr(key)
+
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+
+			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+			childNode := childRev.GetNode()
+
+			// Save proxy in child node to ensure callbacks are called later on
+			// only assign in cases of non sub-folder proxies, i.e. "/"
+			if childNode.Proxy == nil && n.Proxy != nil && n.Proxy.getFullPath() == "" {
+				childNode.Proxy = n.Proxy
+			}
+
+			newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+
+			if newChildRev.GetHash() == childRev.GetHash() {
+				if newChildRev != childRev {
+					log.Debug("clear-hash - %s %+v", newChildRev.GetHash(), newChildRev)
+					newChildRev.ClearHash()
+				}
+				return branch.GetLatest()
+			}
+
+			_, newKey := GetAttributeValue(newChildRev.GetData(), field.Key, 0)
+
+			_newKeyType := fmt.Sprintf("%s", newKey)
+			_keyValueType := fmt.Sprintf("%s", keyValue)
+
+			if _newKeyType != _keyValueType {
+				log.Errorf("cannot change key field")
+			}
+
+			// Prefix the hash value with the data type (e.g. devices, logical_devices, adapters)
+			newChildRev.SetName(name + "/" + _keyValueType)
+			children[idx] = newChildRev
+
+			updatedRev := rev.UpdateChildren(name, children, branch)
+
+			branch.GetLatest().Drop(txid, false)
+			n.makeLatest(branch, updatedRev, nil)
+
+			return newChildRev
+
+		} else {
+			log.Errorf("cannot index into container with no keys")
+		}
+	} else {
+		childRev := rev.GetChildren(name)[0]
+		childNode := childRev.GetNode()
+		newChildRev := childNode.Update(path, data, strict, txid, makeBranch)
+		updatedRev := rev.UpdateChildren(name, []Revision{newChildRev}, branch)
+		rev.Drop(txid, false)
+		n.makeLatest(branch, updatedRev, nil)
+
+		return newChildRev
+	}
+
+	return nil
+}
+
+func (n *node) doUpdate(branch *Branch, data interface{}, strict bool) Revision {
+	log.Debugf("Comparing types - expected: %+v, actual: %+v &&&&&& %s", reflect.ValueOf(n.Type).Type(),
+		reflect.TypeOf(data),
+		string(debug.Stack()))
+
+	if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
+		// TODO raise error
+		log.Errorf("data does not match type: %+v", n.Type)
+		return nil
+	}
+
+	// TODO: validate that this actually works
+	//if n.hasChildren(data) {
+	//	return nil
+	//}
+
+	if n.GetProxy() != nil {
+		log.Debug("invoking proxy PRE_UPDATE Callbacks")
+		n.GetProxy().InvokeCallbacks(PRE_UPDATE, false, branch.GetLatest(), data)
+	}
+
+	if branch.GetLatest().GetData().(proto.Message).String() != data.(proto.Message).String() {
+		if strict {
+			// TODO: checkAccessViolations(data, Branch.GetLatest.data)
+			log.Debugf("checking access violations")
+		}
+
+		rev := branch.GetLatest().UpdateData(data, branch)
+		changes := []ChangeTuple{{POST_UPDATE, branch.GetLatest().GetData(), rev.GetData()}}
+
+		// FIXME VOL-1293: the following statement corrupts the kv when using a subproxy (commenting for now)
+		// FIXME VOL-1293 cont'd: need to figure out the required conditions otherwise we are not cleaning up entries
+		//branch.GetLatest().Drop(branch.Txid, false)
+
+		n.makeLatest(branch, rev, changes)
+
+		return rev
+	}
+
+	return branch.GetLatest()
+}
+
+// Add inserts a new node at the specified path with the provided data
+func (n *node) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+	log.Debugw("node-add-request", log.Fields{"path": path, "txid": txid, "makeBranch": makeBranch})
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+	if path == "" {
+		// TODO raise error
+		log.Errorf("cannot add for non-container mode")
+		return nil
+	}
+
+	var branch *Branch
+	if txid == "" {
+		branch = n.GetBranch(NONE)
+	} else if branch = n.GetBranch(txid); branch == nil {
+		branch = makeBranch(n)
+	}
+
+	rev := branch.GetLatest()
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+
+	var children []Revision
+
+	if field.IsContainer {
+		if path == "" {
+			if field.Key != "" {
+				if n.GetProxy() != nil {
+					log.Debug("invoking proxy PRE_ADD Callbacks")
+					n.GetProxy().InvokeCallbacks(PRE_ADD, false, data)
+				}
+
+				children = make([]Revision, len(rev.GetChildren(name)))
+				copy(children, rev.GetChildren(name))
+
+				_, key := GetAttributeValue(data, field.Key, 0)
+
+				if _, exists := n.findRevByKey(children, field.Key, key.String()); exists != nil {
+					// TODO raise error
+					log.Warnw("duplicate-key-found", log.Fields{"key": key.String()})
+					return exists
+				}
+				childRev := n.MakeNode(data, "").Latest()
+
+				// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
+				childRev.SetName(name + "/" + key.String())
+
+				// Create watch for <component>/<key>
+				childRev.SetupWatch(childRev.GetName())
+
+				children = append(children, childRev)
+				rev = rev.UpdateChildren(name, children, branch)
+				changes := []ChangeTuple{{POST_ADD, nil, childRev.GetData()}}
+
+				rev.Drop(txid, false)
+				n.makeLatest(branch, rev, changes)
+
+				return childRev
+			}
+			log.Errorf("cannot add to non-keyed container")
+
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+			keyValue := field.KeyFromStr(key)
+
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+
+			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+
+			childNode := childRev.GetNode()
+			newChildRev := childNode.Add(path, data, txid, makeBranch)
+
+			// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
+			childRev.SetName(name + "/" + keyValue.(string))
+
+			children[idx] = newChildRev
+
+			rev = rev.UpdateChildren(name, children, branch)
+			rev.Drop(txid, false)
+			n.makeLatest(branch, rev.GetBranch().GetLatest(), nil)
+
+			return newChildRev
+		} else {
+			log.Errorf("cannot add to non-keyed container")
+		}
+	} else {
+		log.Errorf("cannot add to non-container field")
+	}
+
+	return nil
+}
+
+// Remove eliminates a node at the specified path
+func (n *node) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+	log.Debugw("node-remove-request", log.Fields{"path": path, "txid": txid, "makeBranch": makeBranch})
+
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+	if path == "" {
+		// TODO raise error
+		log.Errorf("cannot remove for non-container mode")
+	}
+	var branch *Branch
+	if txid == "" {
+		branch = n.GetBranch(NONE)
+	} else if branch = n.GetBranch(txid); branch == nil {
+		branch = makeBranch(n)
+	}
+
+	rev := branch.GetLatest()
+
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+	var children []Revision
+	postAnnouncement := []ChangeTuple{}
+
+	if field.IsContainer {
+		if path == "" {
+			log.Errorw("cannot-remove-without-key", log.Fields{"name": name, "key": path})
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+
+			keyValue := field.KeyFromStr(key)
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+
+			if path != "" {
+				idx, childRev := n.findRevByKey(children, field.Key, keyValue)
+				childNode := childRev.GetNode()
+				if childNode.Proxy == nil {
+					childNode.Proxy = n.Proxy
+				}
+				newChildRev := childNode.Remove(path, txid, makeBranch)
+				children[idx] = newChildRev
+				rev.SetChildren(name, children)
+				branch.GetLatest().Drop(txid, false)
+				n.makeLatest(branch, rev, nil)
+				return nil
+			}
+
+			if idx, childRev := n.findRevByKey(children, field.Key, keyValue); childRev != nil {
+				if n.GetProxy() != nil {
+					data := childRev.GetData()
+					n.GetProxy().InvokeCallbacks(PRE_REMOVE, false, data)
+					postAnnouncement = append(postAnnouncement, ChangeTuple{POST_REMOVE, data, nil})
+				} else {
+					postAnnouncement = append(postAnnouncement, ChangeTuple{POST_REMOVE, childRev.GetData(), nil})
+				}
+
+				childRev.StorageDrop(txid, true)
+				children = append(children[:idx], children[idx+1:]...)
+				rev.SetChildren(name, children)
+
+				branch.GetLatest().Drop(txid, false)
+				n.makeLatest(branch, rev, postAnnouncement)
+
+				return rev
+			} else {
+				log.Errorw("failed-to-find-revision", log.Fields{"name": name, "key": keyValue.(string)})
+			}
+		}
+		log.Errorw("cannot-add-to-non-keyed-container", log.Fields{"name": name, "path": path, "fieldKey": field.Key})
+
+	} else {
+		log.Errorw("cannot-add-to-non-container-field", log.Fields{"name": name, "path": path})
+	}
+
+	return nil
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Branching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+// MakeBranchFunction is a type for function references intented to create a branch
+type MakeBranchFunction func(*node) *Branch
+
+// MakeBranch creates a new branch for the provided transaction id
+func (n *node) MakeBranch(txid string) *Branch {
+	branchPoint := n.GetBranch(NONE).GetLatest()
+	branch := NewBranch(n, txid, branchPoint, true)
+	n.SetBranch(txid, branch)
+	return branch
+}
+
+// DeleteBranch removes a branch with the specified id
+func (n *node) DeleteBranch(txid string) {
+	n.Lock()
+	defer n.Unlock()
+	delete(n.Branches, txid)
+}
+
+func (n *node) mergeChild(txid string, dryRun bool) func(Revision) Revision {
+	f := func(rev Revision) Revision {
+		childBranch := rev.GetBranch()
+
+		if childBranch.Txid == txid {
+			rev, _ = childBranch.Node.MergeBranch(txid, dryRun)
+		}
+
+		return rev
+	}
+	return f
+}
+
+// MergeBranch will integrate the contents of a transaction branch within the latest branch of a given node
+func (n *node) MergeBranch(txid string, dryRun bool) (Revision, error) {
+	srcBranch := n.GetBranch(txid)
+	dstBranch := n.GetBranch(NONE)
+
+	forkRev := srcBranch.Origin
+	srcRev := srcBranch.GetLatest()
+	dstRev := dstBranch.GetLatest()
+
+	rev, changes := Merge3Way(forkRev, srcRev, dstRev, n.mergeChild(txid, dryRun), dryRun)
+
+	if !dryRun {
+		if rev != nil {
+			rev.SetName(dstRev.GetName())
+			n.makeLatest(dstBranch, rev, changes)
+		}
+		n.DeleteBranch(txid)
+	}
+
+	// TODO: return proper error when one occurs
+	return rev, nil
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diff utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+//func (n *node) diff(hash1, hash2, txid string) {
+//	branch := n.Branches[txid]
+//	rev1 := branch.GetHash(hash1)
+//	rev2 := branch.GetHash(hash2)
+//
+//	if rev1.GetHash() == rev2.GetHash() {
+//		// empty patch
+//	} else {
+//		// translate data to json and generate patch
+//		patch, err := jsonpatch.MakePatch(rev1.GetData(), rev2.GetData())
+//		patch.
+//	}
+//}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tag utility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+// TODO: is tag mgmt used in the python implementation? Need to validate
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Internals ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+func (n *node) hasChildren(data interface{}) bool {
+	for fieldName, field := range ChildrenFields(n.Type) {
+		_, fieldValue := GetAttributeValue(data, fieldName, 0)
+
+		if (field.IsContainer && fieldValue.Len() > 0) || !fieldValue.IsNil() {
+			log.Error("cannot update external children")
+			return true
+		}
+	}
+
+	return false
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ node Proxy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+// CreateProxy returns a reference to a sub-tree of the data model
+func (n *node) CreateProxy(path string, exclusive bool) *Proxy {
+	return n.createProxy(path, path, n, exclusive)
+}
+
+func (n *node) createProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
+	if path == "" {
+		return n.makeProxy(path, fullPath, parentNode, exclusive)
+	}
+
+	rev := n.GetBranch(NONE).GetLatest()
+	partition := strings.SplitN(path, "/", 2)
+	name := partition[0]
+	if len(partition) < 2 {
+		path = ""
+	} else {
+		path = partition[1]
+	}
+
+	field := ChildrenFields(n.Type)[name]
+	if field.IsContainer {
+		if path == "" {
+			//log.Error("cannot proxy a container field")
+			newNode := n.MakeNode(reflect.New(field.ClassType.Elem()).Interface(), "")
+			return newNode.makeProxy(path, fullPath, parentNode, exclusive)
+		} else if field.Key != "" {
+			partition := strings.SplitN(path, "/", 2)
+			key := partition[0]
+			if len(partition) < 2 {
+				path = ""
+			} else {
+				path = partition[1]
+			}
+			keyValue := field.KeyFromStr(key)
+			var children []Revision
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+			if _, childRev := n.findRevByKey(children, field.Key, keyValue); childRev != nil {
+				childNode := childRev.GetNode()
+				return childNode.createProxy(path, fullPath, n, exclusive)
+			}
+		} else {
+			log.Error("cannot index into container with no keys")
+		}
+	} else {
+		childRev := rev.GetChildren(name)[0]
+		childNode := childRev.GetNode()
+		return childNode.createProxy(path, fullPath, n, exclusive)
+	}
+
+	log.Warnf("Cannot create proxy - latest rev:%s, all revs:%+v", rev.GetHash(), n.GetBranch(NONE).Revisions)
+	return nil
+}
+
+func (n *node) makeProxy(path string, fullPath string, parentNode *node, exclusive bool) *Proxy {
+	n.Lock()
+	defer n.Unlock()
+	r := &root{
+		node:                  n,
+		Callbacks:             n.Root.GetCallbacks(),
+		NotificationCallbacks: n.Root.GetNotificationCallbacks(),
+		DirtyNodes:            n.Root.DirtyNodes,
+		KvStore:               n.Root.KvStore,
+		Loading:               n.Root.Loading,
+		RevisionClass:         n.Root.RevisionClass,
+	}
+
+	if n.Proxy == nil {
+		n.Proxy = NewProxy(r, n, parentNode, path, fullPath, exclusive)
+	} else {
+		if n.Proxy.Exclusive {
+			log.Error("node is already owned exclusively")
+		}
+	}
+
+	return n.Proxy
+}
+
+func (n *node) makeEventBus() *EventBus {
+	n.Lock()
+	defer n.Unlock()
+	if n.EventBus == nil {
+		n.EventBus = NewEventBus()
+	}
+	return n.EventBus
+}
+
+func (n *node) SetProxy(proxy *Proxy) {
+	n.Lock()
+	defer n.Unlock()
+	n.Proxy = proxy
+}
+
+func (n *node) GetProxy() *Proxy {
+	n.Lock()
+	defer n.Unlock()
+	return n.Proxy
+}
+
+func (n *node) GetBranch(key string) *Branch {
+	n.Lock()
+	defer n.Unlock()
+
+	if n.Branches != nil {
+		if branch, exists := n.Branches[key]; exists {
+			return branch
+		}
+	}
+	return nil
+}
+
+func (n *node) SetBranch(key string, branch *Branch) {
+	n.Lock()
+	defer n.Unlock()
+	n.Branches[key] = branch
+}
+
+func (n *node) GetRoot() *root {
+	n.Lock()
+	defer n.Unlock()
+	return n.Root
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
new file mode 100644
index 0000000..3c39e01
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+import (
+	"bytes"
+	"crypto/md5"
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type revCacheSingleton struct {
+	sync.RWMutex
+	Cache map[string]interface{}
+}
+
+var revCacheInstance *revCacheSingleton
+var revCacheOnce sync.Once
+
+func GetRevCache() *revCacheSingleton {
+	revCacheOnce.Do(func() {
+		revCacheInstance = &revCacheSingleton{Cache: make(map[string]interface{})}
+	})
+	return revCacheInstance
+}
+
+type NonPersistedRevision struct {
+	mutex    sync.RWMutex
+	Root     *root
+	Config   *DataRevision
+	Children map[string][]Revision
+	Hash     string
+	Branch   *Branch
+	WeakRef  string
+	Name     string
+}
+
+func NewNonPersistedRevision(root *root, branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	r := &NonPersistedRevision{}
+	r.Root = root
+	r.Branch = branch
+	r.Config = NewDataRevision(root, data)
+	r.Children = children
+	r.Hash = r.hashContent()
+	return r
+}
+
+func (npr *NonPersistedRevision) SetConfig(config *DataRevision) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Config = config
+}
+
+func (npr *NonPersistedRevision) GetConfig() *DataRevision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Config
+}
+
+func (npr *NonPersistedRevision) SetAllChildren(children map[string][]Revision) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Children = children
+}
+
+func (npr *NonPersistedRevision) SetChildren(name string, children []Revision) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	if _, exists := npr.Children[name]; exists {
+		npr.Children[name] = children
+	}
+}
+
+func (npr *NonPersistedRevision) GetAllChildren() map[string][]Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Children
+}
+
+func (npr *NonPersistedRevision) GetChildren(name string) []Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	if _, exists := npr.Children[name]; exists {
+		return npr.Children[name]
+	}
+	return nil
+}
+
+func (npr *NonPersistedRevision) SetHash(hash string) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Hash = hash
+}
+
+func (npr *NonPersistedRevision) GetHash() string {
+	//npr.mutex.Lock()
+	//defer npr.mutex.Unlock()
+	return npr.Hash
+}
+
+func (npr *NonPersistedRevision) ClearHash() {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Hash = ""
+}
+
+func (npr *NonPersistedRevision) GetName() string {
+	//npr.mutex.Lock()
+	//defer npr.mutex.Unlock()
+	return npr.Name
+}
+
+func (npr *NonPersistedRevision) SetName(name string) {
+	//npr.mutex.Lock()
+	//defer npr.mutex.Unlock()
+	npr.Name = name
+}
+func (npr *NonPersistedRevision) SetBranch(branch *Branch) {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	npr.Branch = branch
+}
+
+func (npr *NonPersistedRevision) GetBranch() *Branch {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Branch
+}
+
+func (npr *NonPersistedRevision) GetData() interface{} {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	if npr.Config == nil {
+		return nil
+	}
+	return npr.Config.Data
+}
+
+func (npr *NonPersistedRevision) GetNode() *node {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+	return npr.Branch.Node
+}
+
+func (npr *NonPersistedRevision) Finalize(skipOnExist bool) {
+	GetRevCache().Lock()
+	defer GetRevCache().Unlock()
+
+	if !skipOnExist {
+		npr.Hash = npr.hashContent()
+	}
+	if _, exists := GetRevCache().Cache[npr.Hash]; !exists {
+		GetRevCache().Cache[npr.Hash] = npr
+	}
+	if _, exists := GetRevCache().Cache[npr.Config.Hash]; !exists {
+		GetRevCache().Cache[npr.Config.Hash] = npr.Config
+	} else {
+		npr.Config = GetRevCache().Cache[npr.Config.Hash].(*DataRevision)
+	}
+}
+
+func (npr *NonPersistedRevision) hashContent() string {
+	var buffer bytes.Buffer
+	var childrenKeys []string
+
+	if npr.Config != nil {
+		buffer.WriteString(npr.Config.Hash)
+	}
+
+	for key := range npr.Children {
+		childrenKeys = append(childrenKeys, key)
+	}
+
+	sort.Strings(childrenKeys)
+
+	if len(npr.Children) > 0 {
+		// Loop through sorted Children keys
+		for _, key := range childrenKeys {
+			for _, child := range npr.Children[key] {
+				if child != nil && child.GetHash() != "" {
+					buffer.WriteString(child.GetHash())
+				}
+			}
+		}
+	}
+
+	return fmt.Sprintf("%x", md5.Sum(buffer.Bytes()))[:12]
+}
+
+func (npr *NonPersistedRevision) Get(depth int) interface{} {
+	// 1. Clone the data to avoid any concurrent access issues
+	// 2. The current rev might still be pointing to an old config
+	//    thus, force the revision to get its latest value
+	latestRev := npr.GetBranch().GetLatest()
+	originalData := proto.Clone(latestRev.GetData().(proto.Message))
+
+	data := originalData
+	// Get back to the interface type
+	//data := reflect.ValueOf(originalData).Interface()
+
+	if depth != 0 {
+		for fieldName, field := range ChildrenFields(latestRev.GetData()) {
+			childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
+			if field.IsContainer {
+				for _, rev := range latestRev.GetChildren(fieldName) {
+					childData := rev.Get(depth - 1)
+					foundEntry := false
+					for i := 0; i < childDataHolder.Len(); i++ {
+						cdh_if := childDataHolder.Index(i).Interface()
+						if cdh_if.(proto.Message).String() == childData.(proto.Message).String() {
+							foundEntry = true
+							break
+						}
+					}
+					if !foundEntry {
+						// avoid duplicates by adding it only if the child was not found in the holder
+						childDataHolder = reflect.Append(childDataHolder, reflect.ValueOf(childData))
+					}
+				}
+			} else {
+				if revs := latestRev.GetChildren(fieldName); revs != nil && len(revs) > 0 {
+					rev := latestRev.GetChildren(fieldName)[0]
+					if rev != nil {
+						childData := rev.Get(depth - 1)
+						if reflect.TypeOf(childData) == reflect.TypeOf(childDataHolder.Interface()) {
+							childDataHolder = reflect.ValueOf(childData)
+						}
+					}
+				}
+			}
+			// Merge child data with cloned object
+			reflect.ValueOf(data).Elem().FieldByName(childDataName).Set(childDataHolder)
+		}
+	}
+
+	result := data
+
+	if result != nil {
+		// We need to send back a copy of the retrieved object
+		result = proto.Clone(data.(proto.Message))
+	}
+
+	return result
+}
+
+func (npr *NonPersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+
+	if npr.Config.Data != nil && npr.Config.hashData(npr.Root, data) == npr.Config.Hash {
+		log.Debugw("stored-data-matches-latest", log.Fields{"stored": npr.Config.Data, "provided": data})
+		return npr
+	}
+
+	newRev := NonPersistedRevision{}
+	newRev.Config = NewDataRevision(npr.Root, data)
+	newRev.Hash = npr.Hash
+	newRev.Branch = branch
+
+	newRev.Children = make(map[string][]Revision)
+	for entryName, childrenEntry := range npr.Children {
+		newRev.Children[entryName] = append(newRev.Children[entryName], childrenEntry...)
+	}
+
+	newRev.Finalize(false)
+
+	return &newRev
+}
+
+func (npr *NonPersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+
+	updatedRev := npr
+
+	// Verify if the map contains already contains an entry matching the name value
+	// If so, we need to retain the contents of that entry and merge them with the provided children revision list
+	if _, exists := updatedRev.Children[name]; exists {
+		// Go through all child hashes and save their index within the map
+		existChildMap := make(map[string]int)
+		for i, child := range updatedRev.Children[name] {
+			existChildMap[child.GetHash()] = i
+		}
+
+		for _, newChild := range children {
+			if _, childExists := existChildMap[newChild.GetHash()]; !childExists {
+				// revision is not present in the existing list... add it
+				updatedRev.Children[name] = append(updatedRev.Children[name], newChild)
+			} else {
+				// replace
+				updatedRev.Children[name][existChildMap[newChild.GetHash()]] = newChild
+			}
+		}
+	} else {
+		// Map entry does not exist, thus just create a new entry and assign the provided revisions
+		updatedRev.Children[name] = make([]Revision, len(children))
+		copy(updatedRev.Children[name], children)
+	}
+
+	updatedRev.Config = NewDataRevision(npr.Root, npr.Config.Data)
+	updatedRev.Hash = npr.Hash
+	updatedRev.Branch = branch
+	updatedRev.Finalize(false)
+
+	return updatedRev
+}
+
+func (npr *NonPersistedRevision) UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision {
+	npr.mutex.Lock()
+	defer npr.mutex.Unlock()
+
+	newRev := npr
+	newRev.Config = npr.Config
+	newRev.Hash = npr.Hash
+	newRev.Branch = branch
+	newRev.Children = make(map[string][]Revision)
+	for entryName, childrenEntry := range children {
+		newRev.Children[entryName] = append(newRev.Children[entryName], childrenEntry...)
+	}
+	newRev.Finalize(false)
+
+	return newRev
+}
+
+func (npr *NonPersistedRevision) Drop(txid string, includeConfig bool) {
+	GetRevCache().Lock()
+	defer GetRevCache().Unlock()
+
+	if includeConfig {
+		delete(GetRevCache().Cache, npr.Config.Hash)
+	}
+	delete(GetRevCache().Cache, npr.Hash)
+}
+
+func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+	// stub... required by interface
+	return nil
+}
+
+func (npr *NonPersistedRevision) SetupWatch(key string) {
+	// stub ... required by interface
+}
+
+func (pr *NonPersistedRevision) StorageDrop(txid string, includeConfig bool) {
+	// stub ... required by interface
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
new file mode 100644
index 0000000..c2a6c64
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"bytes"
+	"compress/gzip"
+	"encoding/hex"
+	"github.com/golang/protobuf/proto"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
+	"reflect"
+	"runtime/debug"
+	"strings"
+	"sync"
+)
+
+// PersistedRevision holds information of revision meant to be saved in a persistent storage
+type PersistedRevision struct {
+	Revision
+	Compress bool
+
+	events    chan *kvstore.Event `json:"-"`
+	kvStore   *Backend            `json:"-"`
+	mutex     sync.RWMutex        `json:"-"`
+	isStored  bool
+	isWatched bool
+}
+
+// NewPersistedRevision creates a new instance of a PersistentRevision structure
+func NewPersistedRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	pr := &PersistedRevision{}
+	pr.kvStore = branch.Node.GetRoot().KvStore
+	pr.Revision = NewNonPersistedRevision(nil, branch, data, children)
+	return pr
+}
+
+// Finalize is responsible of saving the revision in the persistent storage
+func (pr *PersistedRevision) Finalize(skipOnExist bool) {
+	pr.store(skipOnExist)
+}
+
+type revData struct {
+	Children map[string][]string
+	Config   string
+}
+
+func (pr *PersistedRevision) store(skipOnExist bool) {
+	if pr.GetBranch().Txid != "" {
+		return
+	}
+
+	if pair, _ := pr.kvStore.Get(pr.GetName()); pair != nil && skipOnExist {
+		log.Debugw("revision-config-already-exists", log.Fields{"hash": pr.GetHash(), "name": pr.GetName()})
+		return
+	}
+
+	if blob, err := proto.Marshal(pr.GetConfig().Data.(proto.Message)); err != nil {
+		// TODO report error
+	} else {
+		if pr.Compress {
+			var b bytes.Buffer
+			w := gzip.NewWriter(&b)
+			w.Write(blob)
+			w.Close()
+			blob = b.Bytes()
+		}
+
+		if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
+			log.Warnw("problem-storing-revision-config",
+				log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+		} else {
+			log.Debugw("storing-revision-config", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			pr.isStored = true
+		}
+	}
+}
+
+func (pr *PersistedRevision) SetupWatch(key string) {
+	if pr.events == nil {
+		pr.events = make(chan *kvstore.Event)
+
+		log.Debugw("setting-watch", log.Fields{"key": key, "revision-hash": pr.GetHash()})
+
+		pr.SetName(key)
+		pr.events = pr.kvStore.CreateWatch(key)
+
+		pr.isWatched = true
+
+		// Start watching
+		go pr.startWatching()
+	}
+}
+
+func (pr *PersistedRevision) updateInMemory(data interface{}) {
+	pr.mutex.Lock()
+	defer pr.mutex.Unlock()
+
+	var pac *proxyAccessControl
+	var pathLock string
+
+	//
+	// If a proxy exists for this revision, use it to lock access to the path
+	// and prevent simultaneous updates to the object in memory
+	//
+	if pr.GetNode().GetProxy() != nil {
+		pathLock, _ = pr.GetNode().GetProxy().parseForControlledPath(pr.GetNode().GetProxy().getFullPath())
+
+		// If the proxy already has a request in progress, then there is no need to process the watch
+		log.Debugw("update-in-memory--checking-pathlock", log.Fields{"key": pr.GetHash(), "pathLock": pathLock})
+		if PAC().IsReserved(pathLock) {
+			switch pr.GetNode().GetRoot().GetProxy().Operation {
+			case PROXY_ADD:
+				fallthrough
+			case PROXY_REMOVE:
+				fallthrough
+			case PROXY_UPDATE:
+				log.Debugw("update-in-memory--skipping", log.Fields{"key": pr.GetHash(), "path": pr.GetNode().GetProxy().getFullPath()})
+				return
+			default:
+				log.Debugw("update-in-memory--operation", log.Fields{"operation": pr.GetNode().GetRoot().GetProxy().Operation})
+			}
+		} else {
+			log.Debugw("update-in-memory--path-not-locked", log.Fields{"key": pr.GetHash(), "path": pr.GetNode().GetProxy().getFullPath()})
+		}
+
+		log.Debugw("update-in-memory--reserve-and-lock", log.Fields{"key": pr.GetHash(), "path": pathLock})
+
+		pac = PAC().ReservePath(pr.GetNode().GetProxy().getFullPath(), pr.GetNode().GetProxy(), pathLock)
+		pac.SetProxy(pr.GetNode().GetProxy())
+		pac.lock()
+
+		defer log.Debugw("update-in-memory--release-and-unlock", log.Fields{"key": pr.GetHash(), "path": pathLock})
+		defer pac.unlock()
+		defer PAC().ReleasePath(pathLock)
+	}
+
+	//
+	// Update the object in memory through a transaction
+	// This will allow for the object to be subsequently merged with any changes
+	// that might have occurred in memory
+	//
+
+	log.Debugw("update-in-memory--custom-transaction", log.Fields{"key": pr.GetHash()})
+
+	// Prepare the transaction
+	branch := pr.GetBranch()
+	latest := branch.GetLatest()
+	txidBin, _ := uuid.New().MarshalBinary()
+	txid := hex.EncodeToString(txidBin)[:12]
+
+	makeBranch := func(node *node) *Branch {
+		return node.MakeBranch(txid)
+	}
+
+	// Apply the update in a transaction branch
+	updatedRev := latest.GetNode().Update("", data, false, txid, makeBranch)
+	updatedRev.SetName(latest.GetName())
+
+	// Merge the transaction branch in memory
+	if mergedRev, _ := latest.GetNode().MergeBranch(txid, false); mergedRev != nil {
+		branch.SetLatest(mergedRev)
+	}
+}
+
+func (pr *PersistedRevision) startWatching() {
+	log.Debugw("starting-watch", log.Fields{"key": pr.GetHash()})
+
+StopWatchLoop:
+	for {
+		select {
+		case event, ok := <-pr.events:
+			if !ok {
+				log.Errorw("event-channel-failure: stopping watch loop", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+				break StopWatchLoop
+			}
+
+			log.Debugw("received-event", log.Fields{"type": event.EventType, "watch": pr.GetName()})
+
+			switch event.EventType {
+			case kvstore.DELETE:
+				log.Debugw("delete-from-memory", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+				pr.Revision.Drop("", true)
+				break StopWatchLoop
+
+			case kvstore.PUT:
+				log.Debugw("update-in-memory", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+
+				if dataPair, err := pr.kvStore.Get(pr.GetName()); err != nil || dataPair == nil {
+					log.Errorw("update-in-memory--key-retrieval-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
+				} else {
+					data := reflect.New(reflect.TypeOf(pr.GetData()).Elem())
+
+					if err := proto.Unmarshal(dataPair.Value.([]byte), data.Interface().(proto.Message)); err != nil {
+						log.Errorw("update-in-memory--unmarshal-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
+					} else {
+						pr.updateInMemory(data.Interface())
+					}
+				}
+
+			default:
+				log.Debugw("unhandled-event", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "type": event.EventType})
+			}
+		}
+	}
+
+	log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+}
+
+func (pr *PersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+	log.Debugw("loading-from-persistence", log.Fields{"path": path, "txid": txid})
+
+	var response []Revision
+	var rev Revision
+
+	rev = pr
+
+	if pr.kvStore != nil && path != "" {
+		blobMap, _ := pr.kvStore.List(path)
+
+		partition := strings.SplitN(path, "/", 2)
+		name := partition[0]
+
+		if len(partition) < 2 {
+			path = ""
+		} else {
+			path = partition[1]
+		}
+
+		field := ChildrenFields(rev.GetBranch().Node.Type)[name]
+
+		if field != nil && field.IsContainer {
+			var children []Revision
+			children = make([]Revision, len(rev.GetChildren(name)))
+			copy(children, rev.GetChildren(name))
+			existChildMap := make(map[string]int)
+			for i, child := range rev.GetChildren(name) {
+				existChildMap[child.GetHash()] = i
+			}
+
+			for _, blob := range blobMap {
+				output := blob.Value.([]byte)
+
+				data := reflect.New(field.ClassType.Elem())
+
+				if err := proto.Unmarshal(output, data.Interface().(proto.Message)); err != nil {
+					log.Errorw(
+						"loading-from-persistence--failed-to-unmarshal",
+						log.Fields{"path": path, "txid": txid, "error": err},
+					)
+				} else if field.Key != "" {
+					var key reflect.Value
+					var keyValue interface{}
+					var keyStr string
+
+					if path == "" {
+						// e.g. /logical_devices --> path="" name=logical_devices key=""
+						_, key = GetAttributeValue(data.Interface(), field.Key, 0)
+						keyStr = key.String()
+
+					} else {
+						// e.g.
+						// /logical_devices/abcde --> path="abcde" name=logical_devices
+						// /logical_devices/abcde/image_downloads --> path="abcde/image_downloads" name=logical_devices
+
+						partition := strings.SplitN(path, "/", 2)
+						key := partition[0]
+						if len(partition) < 2 {
+							path = ""
+						} else {
+							path = partition[1]
+						}
+						keyValue = field.KeyFromStr(key)
+						keyStr = keyValue.(string)
+
+						if idx, childRev := rev.GetBranch().Node.findRevByKey(children, field.Key, keyValue); childRev != nil {
+							// Key is memory, continue recursing path
+							if newChildRev := childRev.LoadFromPersistence(path, txid); newChildRev != nil {
+								children[idx] = newChildRev[0]
+
+								rev := rev.UpdateChildren(name, rev.GetChildren(name), rev.GetBranch())
+								rev.GetBranch().Node.makeLatest(rev.GetBranch(), rev, nil)
+
+								response = append(response, newChildRev[0])
+								continue
+							}
+						}
+					}
+
+					childRev := rev.GetBranch().Node.MakeNode(data.Interface(), txid).Latest(txid)
+					childRev.SetName(name + "/" + keyStr)
+
+					// Do not process a child that is already in memory
+					if _, childExists := existChildMap[childRev.GetHash()]; !childExists {
+						// Create watch for <component>/<key>
+						childRev.SetupWatch(childRev.GetName())
+
+						children = append(children, childRev)
+						rev = rev.UpdateChildren(name, children, rev.GetBranch())
+
+						rev.GetBranch().Node.makeLatest(rev.GetBranch(), rev, nil)
+					}
+					response = append(response, childRev)
+					continue
+				}
+			}
+		}
+	}
+
+	return response
+}
+
+// UpdateData modifies the information in the data model and saves it in the persistent storage
+func (pr *PersistedRevision) UpdateData(data interface{}, branch *Branch) Revision {
+	log.Debugw("updating-persisted-data", log.Fields{"hash": pr.GetHash()})
+
+	newNPR := pr.Revision.UpdateData(data, branch)
+
+	newPR := &PersistedRevision{
+		Revision: newNPR,
+		Compress: pr.Compress,
+		kvStore:  pr.kvStore,
+	}
+
+	return newPR
+}
+
+// UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
+func (pr *PersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+	log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
+
+	newNPR := pr.Revision.UpdateChildren(name, children, branch)
+
+	newPR := &PersistedRevision{
+		Revision: newNPR,
+		Compress: pr.Compress,
+		kvStore:  pr.kvStore,
+	}
+
+	return newPR
+}
+
+// UpdateAllChildren modifies the children for all components of a revision and saves it in the peristent storage
+func (pr *PersistedRevision) UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision {
+	log.Debugw("updating-all-persisted-children", log.Fields{"hash": pr.GetHash()})
+
+	newNPR := pr.Revision.UpdateAllChildren(children, branch)
+
+	newPR := &PersistedRevision{
+		Revision: newNPR,
+		Compress: pr.Compress,
+		kvStore:  pr.kvStore,
+	}
+
+	return newPR
+}
+
+// Drop takes care of eliminating a revision hash that is no longer needed
+// and its associated config when required
+func (pr *PersistedRevision) Drop(txid string, includeConfig bool) {
+	pr.Revision.Drop(txid, includeConfig)
+}
+
+// Drop takes care of eliminating a revision hash that is no longer needed
+// and its associated config when required
+func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
+	log.Debugw("dropping-revision",
+		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "stack": string(debug.Stack())})
+
+	pr.mutex.Lock()
+	defer pr.mutex.Unlock()
+	if pr.kvStore != nil && txid == "" {
+		if pr.isStored {
+			if pr.isWatched {
+				pr.kvStore.DeleteWatch(pr.GetName(), pr.events)
+				pr.isWatched = false
+			}
+
+			if err := pr.kvStore.Delete(pr.GetName()); err != nil {
+				log.Errorw("failed-to-remove-revision", log.Fields{"hash": pr.GetHash(), "error": err.Error()})
+			} else {
+				pr.isStored = false
+			}
+		}
+
+	} else {
+		if includeConfig {
+			log.Debugw("attempted-to-remove-transacted-revision-config", log.Fields{"hash": pr.GetConfig().Hash, "txid": txid})
+		}
+		log.Debugw("attempted-to-remove-transacted-revision", log.Fields{"hash": pr.GetHash(), "txid": txid})
+	}
+
+	pr.Revision.Drop(txid, includeConfig)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/profiling.go b/vendor/github.com/opencord/voltha-go/db/model/profiling.go
new file mode 100644
index 0000000..874b035
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/profiling.go
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"sync"
+)
+
+// Profiling is used to store performance details collected at runtime
+type profiling struct {
+	sync.RWMutex
+	DatabaseRetrieveTime  float64
+	DatabaseRetrieveCount int
+	InMemoryModelTime     float64
+	InMemoryModelCount    int
+	InMemoryProcessTime   float64
+	DatabaseStoreTime     float64
+	InMemoryLockTime      float64
+	InMemoryLockCount     int
+}
+
+var profilingInstance *profiling
+var profilingOnce sync.Once
+
+// GetProfiling returns a singleton instance of the Profiling structure
+func GetProfiling() *profiling {
+	profilingOnce.Do(func() {
+		profilingInstance = &profiling{}
+	})
+	return profilingInstance
+}
+
+// AddToDatabaseRetrieveTime appends a time period to retrieve data from the database
+func (p *profiling) AddToDatabaseRetrieveTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.DatabaseRetrieveTime += period
+	p.DatabaseRetrieveCount++
+}
+
+// AddToInMemoryModelTime appends a time period to construct/deconstruct data in memory
+func (p *profiling) AddToInMemoryModelTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.InMemoryModelTime += period
+	p.InMemoryModelCount++
+}
+
+// AddToInMemoryProcessTime appends a time period to process data
+func (p *profiling) AddToInMemoryProcessTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.InMemoryProcessTime += period
+}
+
+// AddToDatabaseStoreTime appends a time period to store data in the database
+func (p *profiling) AddToDatabaseStoreTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.DatabaseStoreTime += period
+}
+
+// AddToInMemoryLockTime appends a time period when a code block was locked
+func (p *profiling) AddToInMemoryLockTime(period float64) {
+	p.Lock()
+	defer p.Unlock()
+
+	p.InMemoryLockTime += period
+	p.InMemoryLockCount++
+}
+
+// Reset initializes the profile counters
+func (p *profiling) Reset() {
+	p.Lock()
+	defer p.Unlock()
+
+	p.DatabaseRetrieveTime = 0
+	p.DatabaseRetrieveCount = 0
+	p.InMemoryModelTime = 0
+	p.InMemoryModelCount = 0
+	p.InMemoryProcessTime = 0
+	p.DatabaseStoreTime = 0
+	p.InMemoryLockTime = 0
+	p.InMemoryLockCount = 0
+}
+
+// Report will provide the current profile counter status
+func (p *profiling) Report() {
+	p.Lock()
+	defer p.Unlock()
+
+	log.Infof("[ Profiling Report ]")
+	log.Infof("Database Retrieval : %f", p.DatabaseRetrieveTime)
+	log.Infof("Database Retrieval Count : %d", p.DatabaseRetrieveCount)
+	log.Infof("Avg Database Retrieval : %f", p.DatabaseRetrieveTime/float64(p.DatabaseRetrieveCount))
+	log.Infof("In-Memory Modeling : %f", p.InMemoryModelTime)
+	log.Infof("In-Memory Modeling Count: %d", p.InMemoryModelCount)
+	log.Infof("Avg In-Memory Modeling : %f", p.InMemoryModelTime/float64(p.InMemoryModelCount))
+	log.Infof("In-Memory Locking : %f", p.InMemoryLockTime)
+	log.Infof("In-Memory Locking Count: %d", p.InMemoryLockCount)
+	log.Infof("Avg In-Memory Locking : %f", p.InMemoryLockTime/float64(p.InMemoryLockCount))
+
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/proxy.go b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
new file mode 100644
index 0000000..86d426a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"crypto/md5"
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+)
+
+// OperationContext holds details on the information used during an operation
+type OperationContext struct {
+	Path      string
+	Data      interface{}
+	FieldName string
+	ChildKey  string
+}
+
+// NewOperationContext instantiates a new OperationContext structure
+func NewOperationContext(path string, data interface{}, fieldName string, childKey string) *OperationContext {
+	oc := &OperationContext{
+		Path:      path,
+		Data:      data,
+		FieldName: fieldName,
+		ChildKey:  childKey,
+	}
+	return oc
+}
+
+// Update applies new data to the context structure
+func (oc *OperationContext) Update(data interface{}) *OperationContext {
+	oc.Data = data
+	return oc
+}
+
+// Proxy holds the information for a specific location with the data model
+type Proxy struct {
+	sync.RWMutex
+	Root       *root
+	Node       *node
+	ParentNode *node
+	Path       string
+	FullPath   string
+	Exclusive  bool
+	Callbacks  map[CallbackType]map[string]*CallbackTuple
+	Operation  ProxyOperation
+}
+
+// NewProxy instantiates a new proxy to a specific location
+func NewProxy(root *root, node *node, parentNode *node, path string, fullPath string, exclusive bool) *Proxy {
+	callbacks := make(map[CallbackType]map[string]*CallbackTuple)
+	if fullPath == "/" {
+		fullPath = ""
+	}
+	p := &Proxy{
+		Root:       root,
+		Node:       node,
+		ParentNode: parentNode,
+		Exclusive:  exclusive,
+		Path:       path,
+		FullPath:   fullPath,
+		Callbacks:  callbacks,
+	}
+	return p
+}
+
+// GetRoot returns the root attribute of the proxy
+func (p *Proxy) GetRoot() *root {
+	return p.Root
+}
+
+// getPath returns the path attribute of the proxy
+func (p *Proxy) getPath() string {
+	return p.Path
+}
+
+// getFullPath returns the full path attribute of the proxy
+func (p *Proxy) getFullPath() string {
+	return p.FullPath
+}
+
+// getCallbacks returns the full list of callbacks associated to the proxy
+func (p *Proxy) getCallbacks(callbackType CallbackType) map[string]*CallbackTuple {
+	if cb, exists := p.Callbacks[callbackType]; exists {
+		return cb
+	}
+	return nil
+}
+
+// getCallback returns a specific callback matching the type and function hash
+func (p *Proxy) getCallback(callbackType CallbackType, funcHash string) *CallbackTuple {
+	p.Lock()
+	defer p.Unlock()
+	if tuple, exists := p.Callbacks[callbackType][funcHash]; exists {
+		return tuple
+	}
+	return nil
+}
+
+// setCallbacks applies a callbacks list to a type
+func (p *Proxy) setCallbacks(callbackType CallbackType, callbacks map[string]*CallbackTuple) {
+	p.Lock()
+	defer p.Unlock()
+	p.Callbacks[callbackType] = callbacks
+}
+
+// setCallback applies a callback to a type and hash value
+func (p *Proxy) setCallback(callbackType CallbackType, funcHash string, tuple *CallbackTuple) {
+	p.Lock()
+	defer p.Unlock()
+	p.Callbacks[callbackType][funcHash] = tuple
+}
+
+// DeleteCallback removes a callback matching the type and hash
+func (p *Proxy) DeleteCallback(callbackType CallbackType, funcHash string) {
+	p.Lock()
+	defer p.Unlock()
+	delete(p.Callbacks[callbackType], funcHash)
+}
+
+// CallbackType is an enumerated value to express when a callback should be executed
+type ProxyOperation uint8
+
+// Enumerated list of callback types
+const (
+	PROXY_GET ProxyOperation = iota
+	PROXY_LIST
+	PROXY_ADD
+	PROXY_UPDATE
+	PROXY_REMOVE
+)
+
+// parseForControlledPath verifies if a proxy path matches a pattern
+// for locations that need to be access controlled.
+func (p *Proxy) parseForControlledPath(path string) (pathLock string, controlled bool) {
+	// TODO: Add other path prefixes that may need control
+	if strings.HasPrefix(path, "/devices") ||
+		strings.HasPrefix(path, "/logical_devices") ||
+		strings.HasPrefix(path, "/adapters") {
+
+		split := strings.SplitN(path, "/", -1)
+		switch len(split) {
+		case 2:
+			controlled = false
+			pathLock = ""
+			break
+		case 3:
+			fallthrough
+		default:
+			pathLock = fmt.Sprintf("%s/%s", split[1], split[2])
+			controlled = true
+		}
+	}
+	return pathLock, controlled
+}
+
+// List will retrieve information from the data model at the specified path location
+// A list operation will force access to persistence storage
+func (p *Proxy) List(path string, depth int, deep bool, txid string) interface{} {
+	var effectivePath string
+	if path == "/" {
+		effectivePath = p.getFullPath()
+	} else {
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+	pac.SetProxy(p)
+
+	rv := pac.List(path, depth, deep, txid, controlled)
+
+	return rv
+}
+
+// Get will retrieve information from the data model at the specified path location
+func (p *Proxy) Get(path string, depth int, deep bool, txid string) interface{} {
+	var effectivePath string
+	if path == "/" {
+		effectivePath = p.getFullPath()
+	} else {
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+	pac.SetProxy(p)
+
+	rv := pac.Get(path, depth, deep, txid, controlled)
+
+	return rv
+}
+
+// Update will modify information in the data model at the specified location with the provided data
+func (p *Proxy) Update(path string, data interface{}, strict bool, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s, Controlled: %b", path, effectivePath, fullPath, pathLock, controlled)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_UPDATE
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--update", log.Fields{"operation":p.Operation})
+
+	return pac.Update(fullPath, data, strict, txid, controlled)
+}
+
+// AddWithID will insert new data at specified location.
+// This method also allows the user to specify the ID of the data entry to ensure
+// that access control is active while inserting the information.
+func (p *Proxy) AddWithID(path string, id string, data interface{}, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path + "/" + id
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+
+	pac := PAC().ReservePath(path, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_ADD
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--add", log.Fields{"operation":p.Operation})
+
+	return pac.Add(fullPath, data, txid, controlled)
+}
+
+// Add will insert new data at specified location.
+func (p *Proxy) Add(path string, data interface{}, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+
+	pac := PAC().ReservePath(path, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_ADD
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--add", log.Fields{"operation":p.Operation})
+
+	return pac.Add(fullPath, data, txid, controlled)
+}
+
+// Remove will delete an entry at the specified location
+func (p *Proxy) Remove(path string, txid string) interface{} {
+	if !strings.HasPrefix(path, "/") {
+		log.Errorf("invalid path: %s", path)
+		return nil
+	}
+	var fullPath string
+	var effectivePath string
+	if path == "/" {
+		fullPath = p.getPath()
+		effectivePath = p.getFullPath()
+	} else {
+		fullPath = p.getPath() + path
+		effectivePath = p.getFullPath() + path
+	}
+
+	pathLock, controlled := p.parseForControlledPath(effectivePath)
+
+	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+
+	pac := PAC().ReservePath(effectivePath, p, pathLock)
+	defer PAC().ReleasePath(pathLock)
+
+	p.Operation = PROXY_REMOVE
+	pac.SetProxy(p)
+
+	log.Debugw("proxy-operation--remove", log.Fields{"operation":p.Operation})
+
+	return pac.Remove(fullPath, txid, controlled)
+}
+
+// OpenTransaction creates a new transaction branch to isolate operations made to the data model
+func (p *Proxy) OpenTransaction() *Transaction {
+	txid := p.GetRoot().MakeTxBranch()
+	return NewTransaction(p, txid)
+}
+
+// commitTransaction will apply and merge modifications made in the transaction branch to the data model
+func (p *Proxy) commitTransaction(txid string) {
+	p.GetRoot().FoldTxBranch(txid)
+}
+
+// cancelTransaction will terminate a transaction branch along will all changes within it
+func (p *Proxy) cancelTransaction(txid string) {
+	p.GetRoot().DeleteTxBranch(txid)
+}
+
+// CallbackFunction is a type used to define callback functions
+type CallbackFunction func(args ...interface{}) interface{}
+
+// CallbackTuple holds the function and arguments details of a callback
+type CallbackTuple struct {
+	callback CallbackFunction
+	args     []interface{}
+}
+
+// Execute will process the a callback with its provided arguments
+func (tuple *CallbackTuple) Execute(contextArgs []interface{}) interface{} {
+	args := []interface{}{}
+
+	for _, ta := range tuple.args {
+		args = append(args, ta)
+	}
+
+	if contextArgs != nil {
+		for _, ca := range contextArgs {
+			args = append(args, ca)
+		}
+	}
+
+	return tuple.callback(args...)
+}
+
+// RegisterCallback associates a callback to the proxy
+func (p *Proxy) RegisterCallback(callbackType CallbackType, callback CallbackFunction, args ...interface{}) {
+	if p.getCallbacks(callbackType) == nil {
+		p.setCallbacks(callbackType, make(map[string]*CallbackTuple))
+	}
+	funcName := runtime.FuncForPC(reflect.ValueOf(callback).Pointer()).Name()
+	log.Debugf("value of function: %s", funcName)
+	funcHash := fmt.Sprintf("%x", md5.Sum([]byte(funcName)))[:12]
+
+	p.setCallback(callbackType, funcHash, &CallbackTuple{callback, args})
+}
+
+// UnregisterCallback removes references to a callback within a proxy
+func (p *Proxy) UnregisterCallback(callbackType CallbackType, callback CallbackFunction, args ...interface{}) {
+	if p.getCallbacks(callbackType) == nil {
+		log.Errorf("no such callback type - %s", callbackType.String())
+		return
+	}
+
+	funcName := runtime.FuncForPC(reflect.ValueOf(callback).Pointer()).Name()
+	funcHash := fmt.Sprintf("%x", md5.Sum([]byte(funcName)))[:12]
+
+	log.Debugf("value of function: %s", funcName)
+
+	if p.getCallback(callbackType, funcHash) == nil {
+		log.Errorf("function with hash value: '%s' not registered with callback type: '%s'", funcHash, callbackType)
+		return
+	}
+
+	p.DeleteCallback(callbackType, funcHash)
+}
+
+func (p *Proxy) invoke(callback *CallbackTuple, context []interface{}) (result interface{}, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			errStr := fmt.Sprintf("callback error occurred: %+v", r)
+			err = errors.New(errStr)
+			log.Error(errStr)
+		}
+	}()
+
+	result = callback.Execute(context)
+
+	return result, err
+}
+
+// InvokeCallbacks executes all callbacks associated to a specific type
+func (p *Proxy) InvokeCallbacks(args ...interface{}) (result interface{}) {
+	callbackType := args[0].(CallbackType)
+	proceedOnError := args[1].(bool)
+	context := args[2:]
+
+	var err error
+
+	if callbacks := p.getCallbacks(callbackType); callbacks != nil {
+		p.Lock()
+		for _, callback := range callbacks {
+			if result, err = p.invoke(callback, context); err != nil {
+				if !proceedOnError {
+					log.Info("An error occurred.  Stopping callback invocation")
+					break
+				}
+				log.Info("An error occurred.  Invoking next callback")
+			}
+		}
+		p.Unlock()
+	}
+
+	return result
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/proxy_access_control.go b/vendor/github.com/opencord/voltha-go/db/model/proxy_access_control.go
new file mode 100644
index 0000000..66d3222
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/proxy_access_control.go
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"sync"
+	"time"
+)
+
+type singletonProxyAccessControl struct {
+	sync.RWMutex
+	cache         sync.Map
+	reservedCount int
+}
+
+var instanceProxyAccessControl *singletonProxyAccessControl
+var onceProxyAccessControl sync.Once
+
+// PAC provides access to the proxy access control singleton instance
+func PAC() *singletonProxyAccessControl {
+	onceProxyAccessControl.Do(func() {
+		instanceProxyAccessControl = &singletonProxyAccessControl{}
+	})
+	return instanceProxyAccessControl
+}
+
+// IsReserved will verify if access control is active for a specific path within the model
+func (singleton *singletonProxyAccessControl) IsReserved(pathLock string) bool {
+	singleton.Lock()
+	defer singleton.Unlock()
+
+	_, exists := singleton.cache.Load(pathLock)
+	log.Debugw("is-reserved", log.Fields{"pathLock": pathLock, "exists": exists})
+
+	return exists
+}
+
+// ReservePath will apply access control for a specific path within the model
+func (singleton *singletonProxyAccessControl) ReservePath(path string, proxy *Proxy, pathLock string) *proxyAccessControl {
+	singleton.Lock()
+	defer singleton.Unlock()
+	singleton.reservedCount++
+	if pac, exists := singleton.cache.Load(pathLock); !exists {
+		log.Debugf("Creating new PAC entry for path:%s pathLock:%s", path, pathLock)
+		newPac := NewProxyAccessControl(proxy, pathLock)
+		singleton.cache.Store(pathLock, newPac)
+		return newPac
+	} else {
+		log.Debugf("Re-using existing PAC entry for path:%s pathLock:%s", path, pathLock)
+		return pac.(*proxyAccessControl)
+	}
+}
+
+// ReleasePath will remove access control for a specific path within the model
+func (singleton *singletonProxyAccessControl) ReleasePath(pathLock string) {
+	singleton.Lock()
+	defer singleton.Unlock()
+
+	singleton.reservedCount--
+
+	if singleton.reservedCount == 0 {
+		singleton.cache.Delete(pathLock)
+	}
+}
+
+// ProxyAccessControl is the abstraction interface to the base proxyAccessControl structure
+type ProxyAccessControl interface {
+	Get(path string, depth int, deep bool, txid string, control bool) interface{}
+	Update(path string, data interface{}, strict bool, txid string, control bool) interface{}
+	Add(path string, data interface{}, txid string, control bool) interface{}
+	Remove(path string, txid string, control bool) interface{}
+	SetProxy(proxy *Proxy)
+}
+
+// proxyAccessControl holds details of the path and proxy that requires access control
+type proxyAccessControl struct {
+	sync.RWMutex
+	Proxy    *Proxy
+	PathLock chan struct{}
+	Path     string
+
+	start time.Time
+	stop  time.Time
+}
+
+// NewProxyAccessControl creates a new instance of an access control structure
+func NewProxyAccessControl(proxy *Proxy, path string) *proxyAccessControl {
+	return &proxyAccessControl{
+		Proxy:    proxy,
+		Path:     path,
+		PathLock: make(chan struct{}, 1),
+	}
+}
+
+// lock will prevent access to a model path
+func (pac *proxyAccessControl) lock() {
+	pac.PathLock <- struct{}{}
+	pac.setStart(time.Now())
+}
+
+// unlock will release control of a model path
+func (pac *proxyAccessControl) unlock() {
+	<-pac.PathLock
+	pac.setStop(time.Now())
+	GetProfiling().AddToInMemoryLockTime(pac.getStop().Sub(pac.getStart()).Seconds())
+}
+
+// getStart is used for profiling purposes and returns the time at which access control was applied
+func (pac *proxyAccessControl) getStart() time.Time {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.start
+}
+
+// getStart is used for profiling purposes and returns the time at which access control was removed
+func (pac *proxyAccessControl) getStop() time.Time {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.stop
+}
+
+// getPath returns the access controlled path
+func (pac *proxyAccessControl) getPath() string {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.Path
+}
+
+// getProxy returns the proxy used to reach a specific location in the data model
+func (pac *proxyAccessControl) getProxy() *Proxy {
+	pac.Lock()
+	defer pac.Unlock()
+	return pac.Proxy
+}
+
+// setStart is for profiling purposes and applies a start time value at which access control was started
+func (pac *proxyAccessControl) setStart(time time.Time) {
+	pac.Lock()
+	defer pac.Unlock()
+	pac.start = time
+}
+
+// setStop is for profiling purposes and applies a stop time value at which access control was stopped
+func (pac *proxyAccessControl) setStop(time time.Time) {
+	pac.Lock()
+	defer pac.Unlock()
+	pac.stop = time
+}
+
+// SetProxy is used to changed the proxy object of an access controlled path
+func (pac *proxyAccessControl) SetProxy(proxy *Proxy) {
+	pac.Lock()
+	defer pac.Unlock()
+	pac.Proxy = proxy
+}
+
+// List retrieves data linked to a data model path
+func (pac *proxyAccessControl) List(path string, depth int, deep bool, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--list", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	// FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
+	// The data traversal through reflection currently corrupts the content
+
+	return pac.getProxy().GetRoot().List(path, "", depth, deep, txid)
+}
+
+// Get retrieves data linked to a data model path
+func (pac *proxyAccessControl) Get(path string, depth int, deep bool, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--get", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	// FIXME: Forcing depth to 0 for now due to problems deep copying the data structure
+	// The data traversal through reflection currently corrupts the content
+	return pac.getProxy().GetRoot().Get(path, "", 0, deep, txid)
+}
+
+// Update changes the content of the data model at the specified location with the provided data
+func (pac *proxyAccessControl) Update(path string, data interface{}, strict bool, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--update", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	result := pac.getProxy().GetRoot().Update(path, data, strict, txid, nil)
+
+	if result != nil {
+		return result.GetData()
+	}
+	return nil
+}
+
+// Add creates a new data model entry at the specified location with the provided data
+func (pac *proxyAccessControl) Add(path string, data interface{}, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--add", log.Fields{"path": path, "fullPath": pac.Path})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--add", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	result := pac.getProxy().GetRoot().Add(path, data, txid, nil)
+
+	if result != nil {
+		return result.GetData()
+	}
+	return nil
+}
+
+// Remove discards information linked to the data model path
+func (pac *proxyAccessControl) Remove(path string, txid string, control bool) interface{} {
+	if control {
+		pac.lock()
+		log.Debugw("locked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+		defer pac.unlock()
+		defer log.Debugw("unlocked-access--remove", log.Fields{"path": path, "fullPath": pac.Proxy.getFullPath()})
+	}
+
+	return pac.getProxy().GetRoot().Remove(path, txid, nil)
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/revision.go b/vendor/github.com/opencord/voltha-go/db/model/revision.go
new file mode 100644
index 0000000..2c10137
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/revision.go
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+type Revision interface {
+	Finalize(bool)
+	SetConfig(revision *DataRevision)
+	GetConfig() *DataRevision
+	Drop(txid string, includeConfig bool)
+	StorageDrop(txid string, includeConfig bool)
+	SetChildren(name string, children []Revision)
+	GetChildren(name string) []Revision
+	SetAllChildren(children map[string][]Revision)
+	GetAllChildren() map[string][]Revision
+	SetHash(hash string)
+	GetHash() string
+	ClearHash()
+	SetupWatch(key string)
+	SetName(name string)
+	GetName() string
+	SetBranch(branch *Branch)
+	GetBranch() *Branch
+	Get(int) interface{}
+	GetData() interface{}
+	GetNode() *node
+	LoadFromPersistence(path string, txid string) []Revision
+	UpdateData(data interface{}, branch *Branch) Revision
+	UpdateChildren(name string, children []Revision, branch *Branch) Revision
+	UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/root.go b/vendor/github.com/opencord/voltha-go/db/model/root.go
new file mode 100644
index 0000000..8f9e001
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/root.go
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"encoding/hex"
+	"encoding/json"
+	"github.com/golang/protobuf/proto"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
+	"sync"
+)
+
+// Root is used to provide an abstraction to the base root structure
+type Root interface {
+	Node
+
+	ExecuteCallbacks()
+	AddCallback(callback CallbackFunction, args ...interface{})
+	AddNotificationCallback(callback CallbackFunction, args ...interface{})
+}
+
+// root points to the top of the data model tree or sub-tree identified by a proxy
+type root struct {
+	*node
+
+	Callbacks             []CallbackTuple
+	NotificationCallbacks []CallbackTuple
+
+	DirtyNodes    map[string][]*node
+	KvStore       *Backend
+	Loading       bool
+	RevisionClass interface{}
+
+	mutex sync.RWMutex
+}
+
+// NewRoot creates an new instance of a root object
+func NewRoot(initialData interface{}, kvStore *Backend) *root {
+	root := &root{}
+
+	root.KvStore = kvStore
+	root.DirtyNodes = make(map[string][]*node)
+	root.Loading = false
+
+	// If there is no storage in place just revert to
+	// a non persistent mechanism
+	if kvStore != nil {
+		root.RevisionClass = reflect.TypeOf(PersistedRevision{})
+	} else {
+		root.RevisionClass = reflect.TypeOf(NonPersistedRevision{})
+	}
+
+	root.Callbacks = []CallbackTuple{}
+	root.NotificationCallbacks = []CallbackTuple{}
+
+	root.node = NewNode(root, initialData, false, "")
+
+	return root
+}
+
+// MakeTxBranch creates a new transaction branch
+func (r *root) MakeTxBranch() string {
+	txidBin, _ := uuid.New().MarshalBinary()
+	txid := hex.EncodeToString(txidBin)[:12]
+
+	r.DirtyNodes[txid] = []*node{r.node}
+	r.node.MakeBranch(txid)
+
+	return txid
+}
+
+// DeleteTxBranch removes a transaction branch
+func (r *root) DeleteTxBranch(txid string) {
+	for _, dirtyNode := range r.DirtyNodes[txid] {
+		dirtyNode.DeleteBranch(txid)
+	}
+	delete(r.DirtyNodes, txid)
+	delete(r.node.Branches, txid)
+}
+
+// FoldTxBranch will merge the contents of a transaction branch with the root object
+func (r *root) FoldTxBranch(txid string) {
+	// Start by doing a dry run of the merge
+	// If that fails, it bails out and the branch is deleted
+	if _, err := r.node.MergeBranch(txid, true); err != nil {
+		// Merge operation fails
+		r.DeleteTxBranch(txid)
+	} else {
+		r.node.MergeBranch(txid, false)
+		r.ExecuteCallbacks()
+		r.DeleteTxBranch(txid)
+	}
+}
+
+// ExecuteCallbacks will invoke all the callbacks linked to root object
+func (r *root) ExecuteCallbacks() {
+	r.mutex.Lock()
+	log.Debugf("ExecuteCallbacks has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("ExecuteCallbacks released the ROOT lock : %+v", r)
+	for len(r.Callbacks) > 0 {
+		callback := r.Callbacks[0]
+		r.Callbacks = r.Callbacks[1:]
+		go callback.Execute(nil)
+	}
+	for len(r.NotificationCallbacks) > 0 {
+		callback := r.NotificationCallbacks[0]
+		r.NotificationCallbacks = r.NotificationCallbacks[1:]
+		go callback.Execute(nil)
+	}
+}
+
+func (r *root) hasCallbacks() bool {
+	return len(r.Callbacks) == 0
+}
+
+// getCallbacks returns the available callbacks
+func (r *root) GetCallbacks() []CallbackTuple {
+	r.mutex.Lock()
+	log.Debugf("getCallbacks has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("getCallbacks released the ROOT lock : %+v", r)
+	return r.Callbacks
+}
+
+// getCallbacks returns the available notification callbacks
+func (r *root) GetNotificationCallbacks() []CallbackTuple {
+	r.mutex.Lock()
+	log.Debugf("GetNotificationCallbacks has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("GetNotificationCallbacks released the ROOT lock : %+v", r)
+	return r.NotificationCallbacks
+}
+
+// AddCallback inserts a new callback with its arguments
+func (r *root) AddCallback(callback CallbackFunction, args ...interface{}) {
+	r.mutex.Lock()
+	log.Debugf("AddCallback has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("AddCallback released the ROOT lock : %+v", r)
+	r.Callbacks = append(r.Callbacks, CallbackTuple{callback, args})
+}
+
+// AddNotificationCallback inserts a new notification callback with its arguments
+func (r *root) AddNotificationCallback(callback CallbackFunction, args ...interface{}) {
+	r.mutex.Lock()
+	log.Debugf("AddNotificationCallback has the ROOT lock : %+v", r)
+	defer r.mutex.Unlock()
+	defer log.Debugf("AddNotificationCallback released the ROOT lock : %+v", r)
+	r.NotificationCallbacks = append(r.NotificationCallbacks, CallbackTuple{callback, args})
+}
+
+func (r *root) syncParent(childRev Revision, txid string) {
+	data := proto.Clone(r.Proxy.ParentNode.Latest().GetData().(proto.Message))
+
+	for fieldName, _ := range ChildrenFields(data) {
+		childDataName, childDataHolder := GetAttributeValue(data, fieldName, 0)
+		if reflect.TypeOf(childRev.GetData()) == reflect.TypeOf(childDataHolder.Interface()) {
+			childDataHolder = reflect.ValueOf(childRev.GetData())
+			reflect.ValueOf(data).Elem().FieldByName(childDataName).Set(childDataHolder)
+		}
+	}
+
+	r.Proxy.ParentNode.Latest().SetConfig(NewDataRevision(r.Proxy.ParentNode.Root, data))
+	r.Proxy.ParentNode.Latest(txid).Finalize(false)
+}
+
+
+// Update modifies the content of an object at a given path with the provided data
+func (r *root) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
+	var result Revision
+
+	if makeBranch != nil {
+		// TODO: raise error
+	}
+
+	if r.hasCallbacks() {
+		// TODO: raise error
+	}
+
+	if txid != "" {
+		trackDirty := func(node *node) *Branch {
+			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
+			return node.MakeBranch(txid)
+		}
+		result = r.node.Update(path, data, strict, txid, trackDirty)
+	} else {
+		result = r.node.Update(path, data, strict, "", nil)
+	}
+
+	if result != nil {
+		if r.Proxy.FullPath != r.Proxy.Path {
+			r.syncParent(result, txid)
+		} else {
+			result.Finalize(false)
+		}
+	}
+
+	r.node.GetRoot().ExecuteCallbacks()
+
+	return result
+}
+
+// Add creates a new object at the given path with the provided data
+func (r *root) Add(path string, data interface{}, txid string, makeBranch MakeBranchFunction) Revision {
+	var result Revision
+
+	if makeBranch != nil {
+		// TODO: raise error
+	}
+
+	if r.hasCallbacks() {
+		// TODO: raise error
+	}
+
+	if txid != "" {
+		trackDirty := func(node *node) *Branch {
+			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
+			return node.MakeBranch(txid)
+		}
+		result = r.node.Add(path, data, txid, trackDirty)
+	} else {
+		result = r.node.Add(path, data, "", nil)
+	}
+
+	if result != nil {
+		result.Finalize(true)
+		r.node.GetRoot().ExecuteCallbacks()
+	}
+	return result
+}
+
+// Remove discards an object at a given path
+func (r *root) Remove(path string, txid string, makeBranch MakeBranchFunction) Revision {
+	var result Revision
+
+	if makeBranch != nil {
+		// TODO: raise error
+	}
+
+	if r.hasCallbacks() {
+		// TODO: raise error
+	}
+
+	if txid != "" {
+		trackDirty := func(node *node) *Branch {
+			r.DirtyNodes[txid] = append(r.DirtyNodes[txid], node)
+			return node.MakeBranch(txid)
+		}
+		result = r.node.Remove(path, txid, trackDirty)
+	} else {
+		result = r.node.Remove(path, "", nil)
+	}
+
+	r.node.GetRoot().ExecuteCallbacks()
+
+	return result
+}
+
+// MakeLatest updates a branch with the latest node revision
+func (r *root) MakeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple) {
+	r.makeLatest(branch, revision, changeAnnouncement)
+}
+
+func (r *root) MakeRevision(branch *Branch, data interface{}, children map[string][]Revision) Revision {
+	if r.RevisionClass.(reflect.Type) == reflect.TypeOf(PersistedRevision{}) {
+		return NewPersistedRevision(branch, data, children)
+	}
+
+	return NewNonPersistedRevision(r, branch, data, children)
+}
+
+func (r *root) makeLatest(branch *Branch, revision Revision, changeAnnouncement []ChangeTuple) {
+	r.node.makeLatest(branch, revision, changeAnnouncement)
+
+	if r.KvStore != nil && branch.Txid == "" {
+		tags := make(map[string]string)
+		for k, v := range r.node.Tags {
+			tags[k] = v.GetHash()
+		}
+		data := &rootData{
+			Latest: branch.GetLatest().GetHash(),
+			Tags:   tags,
+		}
+		if blob, err := json.Marshal(data); err != nil {
+			// TODO report error
+		} else {
+			log.Debugf("Changing root to : %s", string(blob))
+			if err := r.KvStore.Put("root", blob); err != nil {
+				log.Errorf("failed to properly put value in kvstore - err: %s", err.Error())
+			}
+		}
+	}
+}
+
+type rootData struct {
+	Latest string            `json:latest`
+	Tags   map[string]string `json:tags`
+}
\ No newline at end of file
diff --git a/vendor/github.com/opencord/voltha-go/db/model/transaction.go b/vendor/github.com/opencord/voltha-go/db/model/transaction.go
new file mode 100644
index 0000000..fa8de1d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/transaction.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package model
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
+type Transaction struct {
+	proxy *Proxy
+	txid  string
+}
+
+func NewTransaction(proxy *Proxy, txid string) *Transaction {
+	tx := &Transaction{
+		proxy: proxy,
+		txid:  txid,
+	}
+	return tx
+}
+func (t *Transaction) Get(path string, depth int, deep bool) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	// TODO: need to review the return values at the different layers!!!!!
+	return t.proxy.Get(path, depth, deep, t.txid)
+}
+func (t *Transaction) Update(path string, data interface{}, strict bool) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	return t.proxy.Update(path, data, strict, t.txid)
+}
+func (t *Transaction) Add(path string, data interface{}) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	return t.proxy.Add(path, data, t.txid)
+}
+func (t *Transaction) Remove(path string) interface{} {
+	if t.txid == "" {
+		log.Errorf("closed transaction")
+		return nil
+	}
+	return t.proxy.Remove(path, t.txid)
+}
+func (t *Transaction) Cancel() {
+	t.proxy.cancelTransaction(t.txid)
+	t.txid = ""
+}
+func (t *Transaction) Commit() {
+	t.proxy.commitTransaction(t.txid)
+	t.txid = ""
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/utils.go b/vendor/github.com/opencord/voltha-go/db/model/utils.go
new file mode 100644
index 0000000..f0fd618
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/db/model/utils.go
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package model
+
+import (
+	"bytes"
+	"encoding/gob"
+	"reflect"
+	"strings"
+)
+
+// IsProtoMessage determines if the specified implements proto.Message type
+func IsProtoMessage(object interface{}) bool {
+	var ok = false
+
+	if object != nil {
+		st := reflect.TypeOf(object)
+		_, ok = st.MethodByName("ProtoMessage")
+	}
+	return ok
+}
+
+// FindOwnerType will traverse a data structure and find the parent type of the specified object
+func FindOwnerType(obj reflect.Value, name string, depth int, found bool) reflect.Type {
+ 	prefix := ""
+	for d:=0; d< depth; d++ {
+		prefix += ">>"
+	}
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		if found {
+			return obj.Type()
+		}
+
+		t := obj.Type().Elem()
+		n := reflect.New(t)
+
+		if rc := FindOwnerType(n.Elem(), name, depth+1, found); rc != nil {
+			return rc
+		}
+
+	case reflect.Struct:
+		if found {
+			return obj.Type()
+		}
+
+		for i := 0; i < obj.NumField(); i++ {
+			v := reflect.Indirect(obj)
+
+			json := strings.Split(v.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return FindOwnerType(obj.Field(i), name, depth+1, true)
+			}
+
+			if rc := FindOwnerType(obj.Field(i), name, depth+1, found); rc != nil {
+				return rc
+			}
+		}
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < n.Elem().Len(); i++ {
+			if found {
+				return reflect.ValueOf(n.Elem().Index(i).Interface()).Type()
+			}
+		}
+
+		for i := 0; i < obj.Len(); i++ {
+			if found {
+				return obj.Index(i).Type()
+			}
+
+			if rc := FindOwnerType(obj.Index(i), name, depth+1, found); rc != nil {
+				return rc
+			}
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return nil
+}
+
+// FindKeyOwner will traverse a structure to find the owner type of the specified name
+func FindKeyOwner(iface interface{}, name string, depth int) interface{} {
+	obj := reflect.ValueOf(iface)
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		t := obj.Type().Elem()
+		n := reflect.New(t)
+
+		if rc := FindKeyOwner(n.Elem().Interface(), name, depth+1); rc != nil {
+			return rc
+		}
+
+	case reflect.Struct:
+		for i := 0; i < obj.NumField(); i++ {
+			json := strings.Split(obj.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return obj.Type().Field(i).Type
+			}
+
+			if rc := FindKeyOwner(obj.Field(i).Interface(), name, depth+1); rc != nil {
+				return rc
+			}
+		}
+
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < n.Elem().Len(); i++ {
+			if rc := FindKeyOwner(n.Elem().Index(i).Interface(), name, depth+1); rc != nil {
+				return rc
+			}
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return nil
+}
+
+// GetAttributeValue traverse a structure to find the value of an attribute
+// FIXME: Need to figure out if GetAttributeValue and GetAttributeStructure can become one
+// Code is repeated in both, but outputs have a different purpose
+// Left as-is for now to get things working
+func GetAttributeValue(data interface{}, name string, depth int) (string, reflect.Value) {
+	var attribName string
+	var attribValue reflect.Value
+	obj := reflect.ValueOf(data)
+
+	if !obj.IsValid() {
+		return attribName, attribValue
+	}
+
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		if obj.IsNil() {
+			return attribName, attribValue
+		}
+
+		if attribName, attribValue = GetAttributeValue(obj.Elem().Interface(), name, depth+1); attribValue.IsValid() {
+			return attribName, attribValue
+		}
+
+	case reflect.Struct:
+		for i := 0; i < obj.NumField(); i++ {
+			json := strings.Split(obj.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return obj.Type().Field(i).Name, obj.Field(i)
+			}
+
+			if obj.Field(i).IsValid() {
+				if attribName, attribValue = GetAttributeValue(obj.Field(i).Interface(), name, depth+1); attribValue.IsValid() {
+					return attribName, attribValue
+				}
+			}
+		}
+
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < obj.Len(); i++ {
+			if attribName, attribValue = GetAttributeValue(obj.Index(i).Interface(), name, depth+1); attribValue.IsValid() {
+				return attribName, attribValue
+			}
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return attribName, attribValue
+
+}
+
+// GetAttributeStructure will traverse a structure to find the data structure for the named attribute
+// FIXME: See GetAttributeValue(...) comment
+func GetAttributeStructure(data interface{}, name string, depth int) reflect.StructField {
+	var result reflect.StructField
+	obj := reflect.ValueOf(data)
+
+	if !obj.IsValid() {
+		return result
+	}
+
+	k := obj.Kind()
+	switch k {
+	case reflect.Ptr:
+		t := obj.Type().Elem()
+		n := reflect.New(t)
+
+		if rc := GetAttributeStructure(n.Elem().Interface(), name, depth+1); rc.Name != "" {
+			return rc
+		}
+
+	case reflect.Struct:
+		for i := 0; i < obj.NumField(); i++ {
+			v := reflect.Indirect(obj)
+			json := strings.Split(obj.Type().Field(i).Tag.Get("json"), ",")
+
+			if json[0] == name {
+				return v.Type().Field(i)
+			}
+
+			if obj.Field(i).IsValid() {
+				if rc := GetAttributeStructure(obj.Field(i).Interface(), name, depth+1); rc.Name != "" {
+					return rc
+				}
+			}
+		}
+
+	case reflect.Slice:
+		s := reflect.MakeSlice(obj.Type(), 1, 1)
+		n := reflect.New(obj.Type())
+		n.Elem().Set(s)
+
+		for i := 0; i < obj.Len(); i++ {
+			if rc := GetAttributeStructure(obj.Index(i).Interface(), name, depth+1); rc.Name != "" {
+				return rc
+			}
+
+		}
+	default:
+		//log.Debugf("%s Unhandled <%+v> ... It's a %+v\n", prefix, obj, k)
+	}
+
+	return result
+
+}
+
+func clone2(a interface{}) interface{} {
+	b := reflect.ValueOf(a)
+	buff := new(bytes.Buffer)
+	enc := gob.NewEncoder(buff)
+	dec := gob.NewDecoder(buff)
+	enc.Encode(a)
+	dec.Decode(b.Elem().Interface())
+
+	return b.Interface()
+}
+
+func clone(a, b interface{}) interface{} {
+	buff := new(bytes.Buffer)
+	enc := gob.NewEncoder(buff)
+	dec := gob.NewDecoder(buff)
+	enc.Encode(a)
+	dec.Decode(b)
+	return b
+}
diff --git a/vendor/github.com/opencord/voltha-go/python/adapters/common/frameio/third_party/oftest/LICENSE b/vendor/github.com/opencord/voltha-go/python/adapters/common/frameio/third_party/oftest/LICENSE
deleted file mode 100644
index 3216042..0000000
--- a/vendor/github.com/opencord/voltha-go/python/adapters/common/frameio/third_party/oftest/LICENSE
+++ /dev/null
@@ -1,36 +0,0 @@
-OpenFlow Test Framework
-
-Copyright (c) 2010 The Board of Trustees of The Leland Stanford
-Junior University
-
-Except where otherwise noted, this software is distributed under
-the OpenFlow Software License.  See
-http://www.openflowswitch.org/wp/legal/ for current details.
-
-We are making the OpenFlow specification and associated documentation
-(Software) available for public use and benefit with the expectation
-that others will use, modify and enhance the Software and contribute
-those enhancements back to the community. However, since we would like
-to make the Software available for broadest use, with as few
-restrictions as possible permission is hereby granted, free of charge,
-to any person obtaining a copy of this Software to deal in the
-Software under the copyrights without restriction, including without
-limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED -Y´AS IS¡, WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-The name and trademarks of copyright holder(s) may NOT be used in
-advertising or publicity pertaining to the Software or any derivatives
-without specific, written prior permission.
diff --git a/vendor/github.com/opencord/voltha-go/python/protos/third_party/google/LICENSE b/vendor/github.com/opencord/voltha-go/python/protos/third_party/google/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/github.com/opencord/voltha-go/python/protos/third_party/google/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
new file mode 100644
index 0000000..ec191dc
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+Defines a DeviceManager Interface - Used for unit testing of the flow decomposer only at this
+time.
+*/
+package coreIf
+
+import "github.com/opencord/voltha-protos/go/voltha"
+
+// DeviceManager represents a generic device manager
+type DeviceManager interface {
+	GetDevice(string) (*voltha.Device, error)
+	IsRootDevice(string) (bool, error)
+}
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/coreIf/logical_device_agent_if.go b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/logical_device_agent_if.go
new file mode 100644
index 0000000..8394fac
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/logical_device_agent_if.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+  Defines a logicalDeviceAgent Interface - Used for unit testing of the flow decomposer only at this
+ time.
+*/
+package coreIf
+
+import (
+	"github.com/opencord/voltha-protos/go/voltha"
+	"github.com/opencord/voltha-go/rw_core/graph"
+	"github.com/opencord/voltha-go/rw_core/utils"
+)
+
+// LogicalAgent represents a generic agent
+type LogicalDeviceAgent interface {
+	GetLogicalDevice() (*voltha.LogicalDevice, error)
+	GetDeviceGraph() *graph.DeviceGraph
+	GetAllDefaultRules() *utils.DeviceRules
+	GetWildcardInputPorts(excludePort ...uint32) []uint32
+	GetRoute(ingressPortNo uint32, egressPortNo uint32) []graph.RouteHop
+}
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/flow_decomposition/flow_decomposer.go b/vendor/github.com/opencord/voltha-go/rw_core/flow_decomposition/flow_decomposer.go
new file mode 100644
index 0000000..ec2904f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/rw_core/flow_decomposition/flow_decomposer.go
@@ -0,0 +1,1273 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package flow_decomposition
+
+import (
+	"bytes"
+	"crypto/md5"
+	"fmt"
+	"github.com/gogo/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	ofp "github.com/opencord/voltha-protos/go/openflow_13"
+	"github.com/opencord/voltha-protos/go/voltha"
+	"github.com/opencord/voltha-go/rw_core/coreIf"
+	"github.com/opencord/voltha-go/rw_core/graph"
+	fu "github.com/opencord/voltha-go/rw_core/utils"
+	"math/big"
+)
+
+func init() {
+	log.AddPackage(log.JSON, log.DebugLevel, nil)
+}
+
+var (
+	// Instructions shortcut
+	APPLY_ACTIONS = ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS
+
+	//OFPAT_* shortcuts
+	OUTPUT       = ofp.OfpActionType_OFPAT_OUTPUT
+	COPY_TTL_OUT = ofp.OfpActionType_OFPAT_COPY_TTL_OUT
+	COPY_TTL_IN  = ofp.OfpActionType_OFPAT_COPY_TTL_IN
+	SET_MPLS_TTL = ofp.OfpActionType_OFPAT_SET_MPLS_TTL
+	DEC_MPLS_TTL = ofp.OfpActionType_OFPAT_DEC_MPLS_TTL
+	PUSH_VLAN    = ofp.OfpActionType_OFPAT_PUSH_VLAN
+	POP_VLAN     = ofp.OfpActionType_OFPAT_POP_VLAN
+	PUSH_MPLS    = ofp.OfpActionType_OFPAT_PUSH_MPLS
+	POP_MPLS     = ofp.OfpActionType_OFPAT_POP_MPLS
+	SET_QUEUE    = ofp.OfpActionType_OFPAT_SET_QUEUE
+	GROUP        = ofp.OfpActionType_OFPAT_GROUP
+	SET_NW_TTL   = ofp.OfpActionType_OFPAT_SET_NW_TTL
+	NW_TTL       = ofp.OfpActionType_OFPAT_DEC_NW_TTL
+	SET_FIELD    = ofp.OfpActionType_OFPAT_SET_FIELD
+	PUSH_PBB     = ofp.OfpActionType_OFPAT_PUSH_PBB
+	POP_PBB      = ofp.OfpActionType_OFPAT_POP_PBB
+	EXPERIMENTER = ofp.OfpActionType_OFPAT_EXPERIMENTER
+
+	//OFPXMT_OFB_* shortcuts (incomplete)
+	IN_PORT         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+	IN_PHY_PORT     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT
+	METADATA        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_METADATA
+	ETH_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST
+	ETH_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC
+	ETH_TYPE        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE
+	VLAN_VID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID
+	VLAN_PCP        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP
+	IP_DSCP         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP
+	IP_ECN          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN
+	IP_PROTO        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO
+	IPV4_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC
+	IPV4_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST
+	TCP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC
+	TCP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST
+	UDP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC
+	UDP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST
+	SCTP_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC
+	SCTP_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST
+	ICMPV4_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE
+	ICMPV4_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE
+	ARP_OP          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP
+	ARP_SPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA
+	ARP_TPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA
+	ARP_SHA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA
+	ARP_THA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA
+	IPV6_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC
+	IPV6_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST
+	IPV6_FLABEL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL
+	ICMPV6_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE
+	ICMPV6_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE
+	IPV6_ND_TARGET  = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET
+	OFB_IPV6_ND_SLL = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL
+	IPV6_ND_TLL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL
+	MPLS_LABEL      = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL
+	MPLS_TC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC
+	MPLS_BOS        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS
+	PBB_ISID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID
+	TUNNEL_ID       = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID
+	IPV6_EXTHDR     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR
+)
+
+//ofp_action_* shortcuts
+
+func Output(port uint32, maxLen ...ofp.OfpControllerMaxLen) *ofp.OfpAction {
+	maxLength := ofp.OfpControllerMaxLen_OFPCML_MAX
+	if len(maxLen) > 0 {
+		maxLength = maxLen[0]
+	}
+	return &ofp.OfpAction{Type: OUTPUT, Action: &ofp.OfpAction_Output{Output: &ofp.OfpActionOutput{Port: port, MaxLen: uint32(maxLength)}}}
+}
+
+func MplsTtl(ttl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: SET_MPLS_TTL, Action: &ofp.OfpAction_MplsTtl{MplsTtl: &ofp.OfpActionMplsTtl{MplsTtl: ttl}}}
+}
+
+func PushVlan(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: PUSH_VLAN, Action: &ofp.OfpAction_Push{Push: &ofp.OfpActionPush{Ethertype: ethType}}}
+}
+
+func PopVlan() *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_VLAN}
+}
+
+func PopMpls(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_MPLS, Action: &ofp.OfpAction_PopMpls{PopMpls: &ofp.OfpActionPopMpls{Ethertype: ethType}}}
+}
+
+func Group(groupId uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: GROUP, Action: &ofp.OfpAction_Group{Group: &ofp.OfpActionGroup{GroupId: groupId}}}
+}
+
+func NwTtl(nwTtl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: NW_TTL, Action: &ofp.OfpAction_NwTtl{NwTtl: &ofp.OfpActionNwTtl{NwTtl: nwTtl}}}
+}
+
+func SetField(field *ofp.OfpOxmOfbField) *ofp.OfpAction {
+	actionSetField := &ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: &ofp.OfpOxmField_OfbField{OfbField: field}}
+	return &ofp.OfpAction{Type: SET_FIELD, Action: &ofp.OfpAction_SetField{SetField: &ofp.OfpActionSetField{Field: actionSetField}}}
+}
+
+func Experimenter(experimenter uint32, data []byte) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: EXPERIMENTER, Action: &ofp.OfpAction_Experimenter{Experimenter: &ofp.OfpActionExperimenter{Experimenter: experimenter, Data: data}}}
+}
+
+//ofb_field generators (incomplete set)
+
+func InPort(inPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPort}}
+}
+
+func InPhyPort(inPhyPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PHY_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPhyPort}}
+}
+
+func Metadata_ofp(tableMetadata uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: METADATA, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: tableMetadata}}
+}
+
+// should Metadata_ofp used here ?????
+func EthDst(ethDst uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_DST, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethDst}}
+}
+
+// should Metadata_ofp used here ?????
+func EthSrc(ethSrc uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_SRC, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethSrc}}
+}
+
+func EthType(ethType uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_TYPE, Value: &ofp.OfpOxmOfbField_EthType{EthType: ethType}}
+}
+
+func VlanVid(vlanVid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_VID, Value: &ofp.OfpOxmOfbField_VlanVid{VlanVid: vlanVid}}
+}
+
+func VlanPcp(vlanPcp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_PCP, Value: &ofp.OfpOxmOfbField_VlanPcp{VlanPcp: vlanPcp}}
+}
+
+func IpDscp(ipDscp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_DSCP, Value: &ofp.OfpOxmOfbField_IpDscp{IpDscp: ipDscp}}
+}
+
+func IpEcn(ipEcn uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_ECN, Value: &ofp.OfpOxmOfbField_IpEcn{IpEcn: ipEcn}}
+}
+
+func IpProto(ipProto uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_PROTO, Value: &ofp.OfpOxmOfbField_IpProto{IpProto: ipProto}}
+}
+
+func Ipv4Src(ipv4Src uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_SRC, Value: &ofp.OfpOxmOfbField_Ipv4Src{Ipv4Src: ipv4Src}}
+}
+
+func Ipv4Dst(ipv4Dst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_DST, Value: &ofp.OfpOxmOfbField_Ipv4Dst{Ipv4Dst: ipv4Dst}}
+}
+
+func TcpSrc(tcpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_SRC, Value: &ofp.OfpOxmOfbField_TcpSrc{TcpSrc: tcpSrc}}
+}
+
+func TcpDst(tcpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_DST, Value: &ofp.OfpOxmOfbField_TcpDst{TcpDst: tcpDst}}
+}
+
+func UdpSrc(udpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_SRC, Value: &ofp.OfpOxmOfbField_UdpSrc{UdpSrc: udpSrc}}
+}
+
+func UdpDst(udpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_DST, Value: &ofp.OfpOxmOfbField_UdpDst{UdpDst: udpDst}}
+}
+
+func SctpSrc(sctpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_SRC, Value: &ofp.OfpOxmOfbField_SctpSrc{SctpSrc: sctpSrc}}
+}
+
+func SctpDst(sctpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_DST, Value: &ofp.OfpOxmOfbField_SctpDst{SctpDst: sctpDst}}
+}
+
+func Icmpv4Type(icmpv4Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv4Type{Icmpv4Type: icmpv4Type}}
+}
+
+func Icmpv4Code(icmpv4Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_CODE, Value: &ofp.OfpOxmOfbField_Icmpv4Code{Icmpv4Code: icmpv4Code}}
+}
+
+func ArpOp(arpOp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_OP, Value: &ofp.OfpOxmOfbField_ArpOp{ArpOp: arpOp}}
+}
+
+func ArpSpa(arpSpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SPA, Value: &ofp.OfpOxmOfbField_ArpSpa{ArpSpa: arpSpa}}
+}
+
+func ArpTpa(arpTpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_TPA, Value: &ofp.OfpOxmOfbField_ArpTpa{ArpTpa: arpTpa}}
+}
+
+func ArpSha(arpSha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SHA, Value: &ofp.OfpOxmOfbField_ArpSha{ArpSha: arpSha}}
+}
+
+func ArpTha(arpTha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_THA, Value: &ofp.OfpOxmOfbField_ArpTha{ArpTha: arpTha}}
+}
+
+func Ipv6Src(ipv6Src []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_SRC, Value: &ofp.OfpOxmOfbField_Ipv6Src{Ipv6Src: ipv6Src}}
+}
+
+func Ipv6Dst(ipv6Dst []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_DST, Value: &ofp.OfpOxmOfbField_Ipv6Dst{Ipv6Dst: ipv6Dst}}
+}
+
+func Ipv6Flabel(ipv6Flabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_FLABEL, Value: &ofp.OfpOxmOfbField_Ipv6Flabel{Ipv6Flabel: ipv6Flabel}}
+}
+
+func Icmpv6Type(icmpv6Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv6Type{Icmpv6Type: icmpv6Type}}
+}
+
+func Icmpv6Code(icmpv6Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_CODE, Value: &ofp.OfpOxmOfbField_Icmpv6Code{Icmpv6Code: icmpv6Code}}
+}
+
+func Ipv6NdTarget(ipv6NdTarget []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TARGET, Value: &ofp.OfpOxmOfbField_Ipv6NdTarget{Ipv6NdTarget: ipv6NdTarget}}
+}
+
+func OfbIpv6NdSll(ofbIpv6NdSll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: OFB_IPV6_ND_SLL, Value: &ofp.OfpOxmOfbField_Ipv6NdSsl{Ipv6NdSsl: ofbIpv6NdSll}}
+}
+
+func Ipv6NdTll(ipv6NdTll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TLL, Value: &ofp.OfpOxmOfbField_Ipv6NdTll{Ipv6NdTll: ipv6NdTll}}
+}
+
+func MplsLabel(mplsLabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_LABEL, Value: &ofp.OfpOxmOfbField_MplsLabel{MplsLabel: mplsLabel}}
+}
+
+func MplsTc(mplsTc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_TC, Value: &ofp.OfpOxmOfbField_MplsTc{MplsTc: mplsTc}}
+}
+
+func MplsBos(mplsBos uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_BOS, Value: &ofp.OfpOxmOfbField_MplsBos{MplsBos: mplsBos}}
+}
+
+func PbbIsid(pbbIsid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: PBB_ISID, Value: &ofp.OfpOxmOfbField_PbbIsid{PbbIsid: pbbIsid}}
+}
+
+func TunnelId(tunnelId uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TUNNEL_ID, Value: &ofp.OfpOxmOfbField_TunnelId{TunnelId: tunnelId}}
+}
+
+func Ipv6Exthdr(ipv6Exthdr uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_EXTHDR, Value: &ofp.OfpOxmOfbField_Ipv6Exthdr{Ipv6Exthdr: ipv6Exthdr}}
+}
+
+//frequently used extractors
+
+func excludeAction(action *ofp.OfpAction, exclude ...ofp.OfpActionType) bool {
+	for _, actionToExclude := range exclude {
+		if action.Type == actionToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetActions(flow *ofp.OfpFlowStats, exclude ...ofp.OfpActionType) []*ofp.OfpAction {
+	if flow == nil {
+		return nil
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			if len(exclude) == 0 {
+				return instActions.Actions
+			} else {
+				filteredAction := make([]*ofp.OfpAction, 0)
+				for _, action := range instActions.Actions {
+					if !excludeAction(action, exclude...) {
+						filteredAction = append(filteredAction, action)
+					}
+				}
+				return filteredAction
+			}
+		}
+	}
+	return nil
+}
+
+func UpdateOutputPortByActionType(flow *ofp.OfpFlowStats, actionType uint32, toPort uint32) *ofp.OfpFlowStats {
+	if flow == nil {
+		return nil
+	}
+	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	nFlow.Instructions = nil
+	nInsts := make([]*ofp.OfpInstruction, 0)
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == actionType {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			nActions := make([]*ofp.OfpAction, 0)
+			for _, action := range instActions.Actions {
+				if action.GetOutput() != nil {
+					nActions = append(nActions, Output(toPort))
+				} else {
+					nActions = append(nActions, action)
+				}
+			}
+			instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: nActions}}
+			nInsts = append(nInsts, &ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction})
+		} else {
+			nInsts = append(nInsts, instruction)
+		}
+	}
+	nFlow.Instructions = nInsts
+	return nFlow
+}
+
+func excludeOxmOfbField(field *ofp.OfpOxmOfbField, exclude ...ofp.OxmOfbFieldTypes) bool {
+	for _, fieldToExclude := range exclude {
+		if field.Type == fieldToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetOfbFields(flow *ofp.OfpFlowStats, exclude ...ofp.OxmOfbFieldTypes) []*ofp.OfpOxmOfbField {
+	if flow == nil || flow.Match == nil || flow.Match.Type != ofp.OfpMatchType_OFPMT_OXM {
+		return nil
+	}
+	ofbFields := make([]*ofp.OfpOxmOfbField, 0)
+	for _, field := range flow.Match.OxmFields {
+		if field.OxmClass == ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
+			ofbFields = append(ofbFields, field.GetOfbField())
+		}
+	}
+	if len(exclude) == 0 {
+		return ofbFields
+	} else {
+		filteredFields := make([]*ofp.OfpOxmOfbField, 0)
+		for _, ofbField := range ofbFields {
+			if !excludeOxmOfbField(ofbField, exclude...) {
+				filteredFields = append(filteredFields, ofbField)
+			}
+		}
+		return filteredFields
+	}
+}
+
+func GetPacketOutPort(packet *ofp.OfpPacketOut) uint32 {
+	if packet == nil {
+		return 0
+	}
+	for _, action := range packet.GetActions() {
+		if action.Type == OUTPUT {
+			return action.GetOutput().Port
+		}
+	}
+	return 0
+}
+
+func GetOutPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == OUTPUT {
+			out := action.GetOutput()
+			if out == nil {
+				return 0
+			}
+			return out.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetInPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == IN_PORT {
+			return field.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetGotoTableId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE) {
+			gotoTable := instruction.GetGotoTable()
+			if gotoTable == nil {
+				return 0
+			}
+			return gotoTable.GetTableId()
+		}
+	}
+	return 0
+}
+
+//GetMetaData - legacy get method (only want lower 32 bits)
+func GetMetaData(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return uint32(field.GetTableMetadata() & 0xffffffff)
+		}
+	}
+	return 0
+}
+
+func GetMetaData64Bit(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return field.GetTableMetadata()
+		}
+	}
+	return 0
+}
+
+// GetPortNumberFromMetadata retrieves the port number from the Metadata_ofp. The port number (UNI on ONU) is in the
+// lower 32-bits of Metadata_ofp and the inner_tag is in the upper 32-bits. This is set in the ONOS OltPipeline as
+// a Metadata_ofp field
+func GetPortNumberFromMetadata(flow *ofp.OfpFlowStats) uint64 {
+	md := GetMetaData64Bit(flow)
+	if md == 0 {
+		return 0
+	}
+	if md <= 0xffffffff {
+		log.Debugw("onos-upgrade-suggested", log.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		return md
+	}
+	return md & 0xffffffff
+}
+
+//GetInnerTagFromMetaData retrieves the inner tag from the Metadata_ofp. The port number (UNI on ONU) is in the
+// lower 32-bits of Metadata_ofp and the inner_tag is in the upper 32-bits. This is set in the ONOS OltPipeline as
+//// a Metadata_ofp field
+func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint64 {
+	md := GetMetaData64Bit(flow)
+	if md == 0 {
+		return 0
+	}
+	if md <= 0xffffffff {
+		log.Debugw("onos-upgrade-suggested", log.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		return md
+	}
+	return (md >> 32) & 0xffffffff
+}
+
+func HasNextTable(flow *ofp.OfpFlowStats) bool {
+	if flow == nil {
+		return false
+	}
+	return GetGotoTableId(flow) != 0
+}
+
+func GetGroup(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == GROUP {
+			grp := action.GetGroup()
+			if grp == nil {
+				return 0
+			}
+			return grp.GetGroupId()
+		}
+	}
+	return 0
+}
+
+func HasGroup(flow *ofp.OfpFlowStats) bool {
+	return GetGroup(flow) != 0
+}
+
+// GetNextTableId returns the next table ID if the "table_id" is present in the map, otherwise return nil
+func GetNextTableId(kw fu.OfpFlowModArgs) *uint32 {
+	if val, exist := kw["table_id"]; exist {
+		ret := uint32(val)
+		return &ret
+	}
+	return nil
+}
+
+// Return unique 64-bit integer hash for flow covering the following attributes:
+// 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
+func hashFlowStats(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil { // Should never happen
+		return 0
+	}
+	// Create string with the instructions field first
+	var instructionString bytes.Buffer
+	for _, instruction := range flow.Instructions {
+		instructionString.WriteString(instruction.String())
+	}
+	var flowString = fmt.Sprintf("%d%d%d%d%s%s", flow.TableId, flow.Priority, flow.Flags, flow.Cookie, flow.Match.String(), instructionString.String())
+	h := md5.New()
+	h.Write([]byte(flowString))
+	hash := big.NewInt(0)
+	hash.SetBytes(h.Sum(nil))
+	return hash.Uint64()
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func FlowStatsEntryFromFlowModMessage(mod *ofp.OfpFlowMod) *ofp.OfpFlowStats {
+	flow := &ofp.OfpFlowStats{}
+	if mod == nil {
+		return flow
+	}
+	flow.TableId = mod.TableId
+	flow.Priority = mod.Priority
+	flow.IdleTimeout = mod.IdleTimeout
+	flow.HardTimeout = mod.HardTimeout
+	flow.Flags = mod.Flags
+	flow.Cookie = mod.Cookie
+	flow.Match = mod.Match
+	flow.Instructions = mod.Instructions
+	flow.Id = hashFlowStats(flow)
+	return flow
+}
+
+func GroupEntryFromGroupMod(mod *ofp.OfpGroupMod) *ofp.OfpGroupEntry {
+	group := &ofp.OfpGroupEntry{}
+	if mod == nil {
+		return group
+	}
+	group.Desc = &ofp.OfpGroupDesc{Type: mod.Type, GroupId: mod.GroupId, Buckets: mod.Buckets}
+	group.Stats = &ofp.OfpGroupStats{GroupId: mod.GroupId}
+	//TODO do we need to instantiate bucket bins?
+	return group
+}
+
+func MkOxmFields(matchFields []ofp.OfpOxmField) []*ofp.OfpOxmField {
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	return oxmFields
+}
+
+func MkInstructionsFromActions(actions []*ofp.OfpAction) []*ofp.OfpInstruction {
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+	return instructions
+}
+
+// Convenience function to generare ofp_flow_mod message with OXM BASIC match composed from the match_fields, and
+// single APPLY_ACTIONS instruction with a list if ofp_action objects.
+func MkSimpleFlowMod(matchFields []*ofp.OfpOxmField, actions []*ofp.OfpAction, command *ofp.OfpFlowModCommand, kw fu.OfpFlowModArgs) *ofp.OfpFlowMod {
+
+	// Process actions instructions
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+
+	// Process next table
+	if tableId := GetNextTableId(kw); tableId != nil {
+		var instGotoTable ofp.OfpInstruction_GotoTable
+		instGotoTable.GotoTable = &ofp.OfpInstructionGotoTable{TableId: *tableId}
+		inst := ofp.OfpInstruction{Type: uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE), Data: &instGotoTable}
+		instructions = append(instructions, &inst)
+	}
+
+	// Process match fields
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	var match ofp.OfpMatch
+	match.Type = ofp.OfpMatchType_OFPMT_OXM
+	match.OxmFields = oxmFields
+
+	// Create ofp_flow_message
+	msg := &ofp.OfpFlowMod{}
+	if command == nil {
+		msg.Command = ofp.OfpFlowModCommand_OFPFC_ADD
+	} else {
+		msg.Command = *command
+	}
+	msg.Instructions = instructions
+	msg.Match = &match
+
+	// Set the variadic argument values
+	msg = setVariadicModAttributes(msg, kw)
+
+	return msg
+}
+
+func MkMulticastGroupMod(groupId uint32, buckets []*ofp.OfpBucket, command *ofp.OfpGroupModCommand) *ofp.OfpGroupMod {
+	group := &ofp.OfpGroupMod{}
+	if command == nil {
+		group.Command = ofp.OfpGroupModCommand_OFPGC_ADD
+	} else {
+		group.Command = *command
+	}
+	group.Type = ofp.OfpGroupType_OFPGT_ALL
+	group.GroupId = groupId
+	group.Buckets = buckets
+	return group
+}
+
+//SetVariadicModAttributes sets only uint64 or uint32 fields of the ofp_flow_mod message
+func setVariadicModAttributes(mod *ofp.OfpFlowMod, args fu.OfpFlowModArgs) *ofp.OfpFlowMod {
+	if args == nil {
+		return mod
+	}
+	for key, val := range args {
+		switch key {
+		case "cookie":
+			mod.Cookie = val
+		case "cookie_mask":
+			mod.CookieMask = val
+		case "table_id":
+			mod.TableId = uint32(val)
+		case "idle_timeout":
+			mod.IdleTimeout = uint32(val)
+		case "hard_timeout":
+			mod.HardTimeout = uint32(val)
+		case "priority":
+			mod.Priority = uint32(val)
+		case "buffer_id":
+			mod.BufferId = uint32(val)
+		case "out_port":
+			mod.OutPort = uint32(val)
+		case "out_group":
+			mod.OutGroup = uint32(val)
+		case "flags":
+			mod.Flags = uint32(val)
+		}
+	}
+	return mod
+}
+
+func MkPacketIn(port uint32, packet []byte) *ofp.OfpPacketIn {
+	packetIn := &ofp.OfpPacketIn{
+		Reason: ofp.OfpPacketInReason_OFPR_ACTION,
+		Match: &ofp.OfpMatch{
+			Type: ofp.OfpMatchType_OFPMT_OXM,
+			OxmFields: []*ofp.OfpOxmField{
+				{
+					OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC,
+					Field: &ofp.OfpOxmField_OfbField{
+						OfbField: InPort(port)},
+				},
+			},
+		},
+		Data: packet,
+	}
+	return packetIn
+}
+
+// MkFlowStat is a helper method to build flows
+func MkFlowStat(fa *fu.FlowArgs) *ofp.OfpFlowStats {
+	//Build the matchfields
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range fa.MatchFields {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return FlowStatsEntryFromFlowModMessage(MkSimpleFlowMod(matchFields, fa.Actions, fa.Command, fa.KV))
+}
+
+func MkGroupStat(ga *fu.GroupArgs) *ofp.OfpGroupEntry {
+	return GroupEntryFromGroupMod(MkMulticastGroupMod(ga.GroupId, ga.Buckets, ga.Command))
+}
+
+type FlowDecomposer struct {
+	deviceMgr coreIf.DeviceManager
+}
+
+func NewFlowDecomposer(deviceMgr coreIf.DeviceManager) *FlowDecomposer {
+	var decomposer FlowDecomposer
+	decomposer.deviceMgr = deviceMgr
+	return &decomposer
+}
+
+//DecomposeRules decomposes per-device flows and flow-groups from the flows and groups defined on a logical device
+func (fd *FlowDecomposer) DecomposeRules(agent coreIf.LogicalDeviceAgent, flows ofp.Flows, groups ofp.FlowGroups) *fu.DeviceRules {
+	rules := agent.GetAllDefaultRules()
+	deviceRules := rules.Copy()
+
+	groupMap := make(map[uint32]*ofp.OfpGroupEntry)
+	for _, groupEntry := range groups.Items {
+		groupMap[groupEntry.Desc.GroupId] = groupEntry
+	}
+
+	var decomposedRules *fu.DeviceRules
+	for _, flow := range flows.Items {
+		decomposedRules = fd.decomposeFlow(agent, flow, groupMap)
+		for deviceId, flowAndGroups := range decomposedRules.Rules {
+			deviceRules.CreateEntryIfNotExist(deviceId)
+			deviceRules.Rules[deviceId].AddFrom(flowAndGroups)
+		}
+	}
+	return deviceRules
+}
+
+// Handles special case of any controller-bound flow for a parent device
+func (fd *FlowDecomposer) updateOutputPortForControllerBoundFlowForParentDevide(flow *ofp.OfpFlowStats,
+	dr *fu.DeviceRules) *fu.DeviceRules {
+	EAPOL := EthType(0x888e)
+	IGMP := IpProto(2)
+	UDP := IpProto(17)
+
+	newDeviceRules := dr.Copy()
+	//	Check whether we are dealing with a parent device
+	for deviceId, fg := range dr.GetRules() {
+		if root, _ := fd.deviceMgr.IsRootDevice(deviceId); root {
+			newDeviceRules.ClearFlows(deviceId)
+			for i := 0; i < fg.Flows.Len(); i++ {
+				f := fg.GetFlow(i)
+				UpdateOutPortNo := false
+				for _, field := range GetOfbFields(f) {
+					UpdateOutPortNo = (field.String() == EAPOL.String())
+					UpdateOutPortNo = UpdateOutPortNo || (field.String() == IGMP.String())
+					UpdateOutPortNo = UpdateOutPortNo || (field.String() == UDP.String())
+					if UpdateOutPortNo {
+						break
+					}
+				}
+				if UpdateOutPortNo {
+					f = UpdateOutputPortByActionType(f, uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS),
+						uint32(ofp.OfpPortNo_OFPP_CONTROLLER))
+				}
+				// Update flow Id as a change in the instruction field will result in a new flow ID
+				f.Id = hashFlowStats(f)
+				newDeviceRules.AddFlow(deviceId, (proto.Clone(f)).(*ofp.OfpFlowStats))
+			}
+		}
+	}
+	return newDeviceRules
+}
+
+//processControllerBoundFlow decomposes trap flows
+func (fd *FlowDecomposer) processControllerBoundFlow(agent coreIf.LogicalDeviceAgent, route []graph.RouteHop,
+	inPortNo uint32, outPortNo uint32, flow *ofp.OfpFlowStats) *fu.DeviceRules {
+
+	log.Debugw("trap-flow", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo, "flow": flow})
+	deviceRules := fu.NewDeviceRules()
+
+	egressHop := route[1]
+
+	fg := fu.NewFlowsAndGroups()
+	if agent.GetDeviceGraph().IsRootPort(inPortNo) {
+		log.Debug("trap-nni")
+		// no decomposition required - it is already an OLT flow from NNI
+		fg.AddFlow(flow)
+	} else {
+		// Trap flow for UNI port
+		log.Debug("trap-uni")
+
+		//inPortNo is 0 for wildcard input case, do not include upstream port for 4000 flow in input
+		var inPorts []uint32
+		if inPortNo == 0 {
+			inPorts = agent.GetWildcardInputPorts(egressHop.Egress) // exclude egress_hop.egress_port.port_no
+		} else {
+			inPorts = []uint32{inPortNo}
+		}
+		for _, inputPort := range inPorts {
+			var fa *fu.FlowArgs
+			// Upstream flow
+			fa = &fu.FlowArgs{
+				KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+				MatchFields: []*ofp.OfpOxmOfbField{
+					InPort(egressHop.Ingress),
+					VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | inputPort),
+				},
+				Actions: []*ofp.OfpAction{
+					PushVlan(0x8100),
+					SetField(VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 4000)),
+					Output(egressHop.Egress),
+				},
+			}
+			// Augment the matchfields with the ofpfields from the flow
+			fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT, VLAN_VID)...)
+			fg.AddFlow(MkFlowStat(fa))
+
+			// Downstream flow
+			fa = &fu.FlowArgs{
+				KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority)},
+				MatchFields: []*ofp.OfpOxmOfbField{
+					InPort(egressHop.Egress),
+					VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 4000),
+					VlanPcp(0),
+					Metadata_ofp(uint64(inputPort)),
+				},
+				Actions: []*ofp.OfpAction{
+					PopVlan(),
+					Output(egressHop.Ingress),
+				},
+			}
+			fg.AddFlow(MkFlowStat(fa))
+		}
+	}
+	deviceRules.AddFlowsAndGroup(egressHop.DeviceID, fg)
+	return deviceRules
+}
+
+// processUpstreamNonControllerBoundFlow processes non-controller bound flow. We assume that anything that is
+// upstream needs to get Q-in-Q treatment and that this is expressed via two flow rules, the first using the
+// goto-statement. We also assume that the inner tag is applied at the ONU, while the outer tag is
+// applied at the OLT
+func (fd *FlowDecomposer) processUpstreamNonControllerBoundFlow(agent coreIf.LogicalDeviceAgent,
+	route []graph.RouteHop, inPortNo uint32, outPortNo uint32, flow *ofp.OfpFlowStats) *fu.DeviceRules {
+
+	log.Debugw("upstream-non-controller-bound-flow", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo})
+	deviceRules := fu.NewDeviceRules()
+
+	ingressHop := route[0]
+	egressHop := route[1]
+
+	if HasNextTable(flow) {
+		log.Debugw("has-next-table", log.Fields{"table_id": flow.TableId})
+		if outPortNo != 0 {
+			log.Warnw("outPort-should-not-be-specified", log.Fields{"outPortNo": outPortNo})
+		}
+		var fa *fu.FlowArgs
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(ingressHop.Ingress),
+			},
+			Actions: GetActions(flow),
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+		// Augment the Actions
+		fa.Actions = append(fa.Actions, Output(ingressHop.Egress))
+
+		fg := fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(ingressHop.DeviceID, fg)
+	} else {
+		var actions []ofp.OfpActionType
+		var isOutputTypeInActions bool
+		for _, action := range GetActions(flow) {
+			actions = append(actions, action.Type)
+			if !isOutputTypeInActions && action.Type == OUTPUT {
+				isOutputTypeInActions = true
+			}
+		}
+		if len(actions) == 1 && isOutputTypeInActions {
+			var fa *fu.FlowArgs
+			// child device flow
+			fa = &fu.FlowArgs{
+				KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+				MatchFields: []*ofp.OfpOxmOfbField{
+					InPort(ingressHop.Ingress),
+				},
+				Actions: []*ofp.OfpAction{
+					Output(ingressHop.Egress),
+				},
+			}
+			// Augment the matchfields with the ofpfields from the flow
+			fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+			fg := fu.NewFlowsAndGroups()
+			fg.AddFlow(MkFlowStat(fa))
+			deviceRules.AddFlowsAndGroup(ingressHop.DeviceID, fg)
+
+			// parent device flow
+			fa = &fu.FlowArgs{
+				KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+				MatchFields: []*ofp.OfpOxmOfbField{
+					InPort(egressHop.Ingress), //egress_hop.ingress_port.port_no
+				},
+				Actions: []*ofp.OfpAction{
+					Output(egressHop.Egress),
+				},
+			}
+			// Augment the matchfields with the ofpfields from the flow
+			fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+			fg = fu.NewFlowsAndGroups()
+			fg.AddFlow(MkFlowStat(fa))
+			deviceRules.AddFlowsAndGroup(egressHop.DeviceID, fg)
+		} else {
+			if outPortNo == 0 {
+				log.Warnw("outPort-should-be-specified", log.Fields{"outPortNo": outPortNo})
+			}
+			var fa *fu.FlowArgs
+			fa = &fu.FlowArgs{
+				KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+				MatchFields: []*ofp.OfpOxmOfbField{
+					InPort(egressHop.Ingress),
+				},
+			}
+			// Augment the matchfields with the ofpfields from the flow
+			fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+			//Augment the actions
+			filteredAction := GetActions(flow, OUTPUT)
+			filteredAction = append(filteredAction, Output(egressHop.Egress))
+			fa.Actions = filteredAction
+
+			fg := fu.NewFlowsAndGroups()
+			fg.AddFlow(MkFlowStat(fa))
+			deviceRules.AddFlowsAndGroup(egressHop.DeviceID, fg)
+		}
+	}
+	return deviceRules
+}
+
+// processDownstreamFlowWithNextTable decomposes downstream flows containing next table ID instructions
+func (fd *FlowDecomposer) processDownstreamFlowWithNextTable(agent coreIf.LogicalDeviceAgent, route []graph.RouteHop,
+	inPortNo uint32, outPortNo uint32, flow *ofp.OfpFlowStats) *fu.DeviceRules {
+
+	log.Debugw("downstream-flow-with-next-table", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo})
+	deviceRules := fu.NewDeviceRules()
+
+	if outPortNo != 0 {
+		log.Warnw("outPort-should-not-be-specified", log.Fields{"outPortNo": outPortNo})
+	}
+	ingressHop := route[0]
+	egressHop := route[1]
+
+	if GetMetaData(flow) != 0 {
+		log.Debugw("creating-metadata-flow", log.Fields{"flow": flow})
+		portNumber := uint32(GetPortNumberFromMetadata(flow))
+		if portNumber != 0 {
+			recalculatedRoute := agent.GetRoute(inPortNo, portNumber)
+			switch len(recalculatedRoute) {
+			case 0:
+				log.Errorw("no-route-double-tag", log.Fields{"inPortNo": inPortNo, "outPortNo": portNumber, "comment": "deleting-flow", "metadata": GetMetaData64Bit(flow)})
+				//	TODO: Delete flow
+				return deviceRules
+			case 2:
+				log.Debugw("route-found", log.Fields{"ingressHop": ingressHop, "egressHop": egressHop})
+				break
+			default:
+				log.Errorw("invalid-route-length", log.Fields{"routeLen": len(route)})
+				return deviceRules
+			}
+			ingressHop = recalculatedRoute[0]
+		}
+		innerTag := GetInnerTagFromMetaData(flow)
+		if innerTag == 0 {
+			log.Errorw("no-inner-route-double-tag", log.Fields{"inPortNo": inPortNo, "outPortNo": portNumber, "comment": "deleting-flow", "metadata": GetMetaData64Bit(flow)})
+			//	TODO: Delete flow
+			return deviceRules
+		}
+		var fa *fu.FlowArgs
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(ingressHop.Ingress),
+				Metadata_ofp(innerTag),
+			},
+			Actions: GetActions(flow),
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT, METADATA)...)
+
+		// Augment the Actions
+		fa.Actions = append(fa.Actions, Output(ingressHop.Egress))
+
+		fg := fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(ingressHop.DeviceID, fg)
+	} else { // Create standard flow
+		log.Debugw("creating-standard-flow", log.Fields{"flow": flow})
+		var fa *fu.FlowArgs
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(ingressHop.Ingress),
+			},
+			Actions: GetActions(flow),
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+		// Augment the Actions
+		fa.Actions = append(fa.Actions, Output(ingressHop.Egress))
+
+		fg := fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(ingressHop.DeviceID, fg)
+	}
+	return deviceRules
+}
+
+// processUnicastFlow decomposes unicast flows
+func (fd *FlowDecomposer) processUnicastFlow(agent coreIf.LogicalDeviceAgent, route []graph.RouteHop,
+	inPortNo uint32, outPortNo uint32, flow *ofp.OfpFlowStats) *fu.DeviceRules {
+
+	log.Debugw("unicast-flow", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo})
+	deviceRules := fu.NewDeviceRules()
+
+	ingressHop := route[0]
+	egressHop := route[1]
+
+	var actions []ofp.OfpActionType
+	var isOutputTypeInActions bool
+	for _, action := range GetActions(flow) {
+		actions = append(actions, action.Type)
+		if !isOutputTypeInActions && action.Type == OUTPUT {
+			isOutputTypeInActions = true
+		}
+	}
+	if len(actions) == 1 && isOutputTypeInActions {
+		var fa *fu.FlowArgs
+		// Parent device flow
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(ingressHop.Ingress),
+			},
+			Actions: []*ofp.OfpAction{
+				Output(ingressHop.Egress),
+			},
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+		fg := fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(ingressHop.DeviceID, fg)
+
+		// Child device flow
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(egressHop.Ingress),
+			},
+			Actions: []*ofp.OfpAction{
+				Output(egressHop.Egress),
+			},
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+		fg = fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(egressHop.DeviceID, fg)
+	} else {
+		var fa *fu.FlowArgs
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(egressHop.Ingress),
+			},
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+		// Augment the Actions
+		filteredAction := GetActions(flow, OUTPUT)
+		filteredAction = append(filteredAction, Output(egressHop.Egress))
+		fa.Actions = filteredAction
+
+		fg := fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(egressHop.DeviceID, fg)
+	}
+	return deviceRules
+}
+
+// processMulticastFlow decompose multicast flows
+func (fd *FlowDecomposer) processMulticastFlow(agent coreIf.LogicalDeviceAgent, route []graph.RouteHop,
+	inPortNo uint32, outPortNo uint32, flow *ofp.OfpFlowStats, grpId uint32,
+	groupMap map[uint32]*ofp.OfpGroupEntry) *fu.DeviceRules {
+
+	log.Debugw("multicast-flow", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo})
+	deviceRules := fu.NewDeviceRules()
+
+	//having no Group yet is the same as having a Group with no buckets
+	var grp *ofp.OfpGroupEntry
+	var ok bool
+	if grp, ok = groupMap[grpId]; !ok {
+		log.Warnw("Group-id-not-present-in-map", log.Fields{"grpId": grpId, "groupMap": groupMap})
+		return deviceRules
+	}
+	if grp == nil || grp.Desc == nil {
+		log.Warnw("Group-or-desc-nil", log.Fields{"grpId": grpId, "grp": grp})
+		return deviceRules
+	}
+	for _, bucket := range grp.Desc.Buckets {
+		otherActions := make([]*ofp.OfpAction, 0)
+		for _, action := range bucket.Actions {
+			if action.Type == OUTPUT {
+				outPortNo = action.GetOutput().Port
+			} else if action.Type != POP_VLAN {
+				otherActions = append(otherActions, action)
+			}
+		}
+
+		route2 := agent.GetRoute(inPortNo, outPortNo)
+		switch len(route2) {
+		case 0:
+			log.Errorw("mc-no-route", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo, "comment": "deleting flow"})
+			//	TODO: Delete flow
+			return deviceRules
+		case 2:
+			log.Debugw("route-found", log.Fields{"ingressHop": route2[0], "egressHop": route2[1]})
+			break
+		default:
+			log.Errorw("invalid-route-length", log.Fields{"routeLen": len(route)})
+			return deviceRules
+		}
+
+		ingressHop := route[0]
+		ingressHop2 := route2[0]
+		egressHop := route2[1]
+
+		if ingressHop.Ingress != ingressHop2.Ingress {
+			log.Errorw("mc-ingress-hop-hop2-mismatch", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo, "comment": "ignoring flow"})
+			return deviceRules
+		}
+		// Set the parent device flow
+		var fa *fu.FlowArgs
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(ingressHop.Ingress),
+			},
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT)...)
+
+		// Augment the Actions
+		filteredAction := GetActions(flow, GROUP)
+		filteredAction = append(filteredAction, PopVlan())
+		filteredAction = append(filteredAction, Output(route2[1].Ingress))
+		fa.Actions = filteredAction
+
+		fg := fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(ingressHop.DeviceID, fg)
+
+		// Set the child device flow
+		fa = &fu.FlowArgs{
+			KV: fu.OfpFlowModArgs{"priority": uint64(flow.Priority), "cookie": flow.Cookie},
+			MatchFields: []*ofp.OfpOxmOfbField{
+				InPort(egressHop.Ingress),
+			},
+		}
+		// Augment the matchfields with the ofpfields from the flow
+		fa.MatchFields = append(fa.MatchFields, GetOfbFields(flow, IN_PORT, VLAN_VID, VLAN_PCP)...)
+
+		// Augment the Actions
+		otherActions = append(otherActions, Output(egressHop.Egress))
+		fa.Actions = otherActions
+
+		fg = fu.NewFlowsAndGroups()
+		fg.AddFlow(MkFlowStat(fa))
+		deviceRules.AddFlowsAndGroup(egressHop.DeviceID, fg)
+	}
+	return deviceRules
+}
+
+// decomposeFlow decomposes a flow for a logical device into flows for each physical device
+func (fd *FlowDecomposer) decomposeFlow(agent coreIf.LogicalDeviceAgent, flow *ofp.OfpFlowStats,
+	groupMap map[uint32]*ofp.OfpGroupEntry) *fu.DeviceRules {
+
+	inPortNo := GetInPort(flow)
+	outPortNo := GetOutPort(flow)
+
+	deviceRules := fu.NewDeviceRules()
+
+	route := agent.GetRoute(inPortNo, outPortNo)
+	switch len(route) {
+	case 0:
+		log.Errorw("no-route", log.Fields{"inPortNo": inPortNo, "outPortNo": outPortNo, "comment": "deleting-flow"})
+		//	TODO: Delete flow
+		return deviceRules
+	case 2:
+		log.Debugw("route-found", log.Fields{"ingressHop": route[0], "egressHop": route[1]})
+		break
+	default:
+		log.Errorw("invalid-route-length", log.Fields{"routeLen": len(route)})
+		return deviceRules
+	}
+
+	// Process controller bound flow
+	if outPortNo != 0 && (outPortNo&0x7fffffff) == uint32(ofp.OfpPortNo_OFPP_CONTROLLER) {
+		deviceRules = fd.processControllerBoundFlow(agent, route, inPortNo, outPortNo, flow)
+	} else {
+		var ingressDevice *voltha.Device
+		var err error
+		if ingressDevice, err = fd.deviceMgr.GetDevice(route[0].DeviceID); err != nil {
+			log.Errorw("ingress-device-not-found", log.Fields{"deviceId": route[0].DeviceID, "flow": flow})
+			return deviceRules
+		}
+		isUpstream := !ingressDevice.Root
+		if isUpstream {
+			deviceRules = fd.processUpstreamNonControllerBoundFlow(agent, route, inPortNo, outPortNo, flow)
+		} else if HasNextTable(flow) {
+			deviceRules = fd.processDownstreamFlowWithNextTable(agent, route, inPortNo, outPortNo, flow)
+		} else if outPortNo != 0 { // Unicast
+			deviceRules = fd.processUnicastFlow(agent, route, inPortNo, outPortNo, flow)
+		} else if grpId := GetGroup(flow); grpId != 0 { //Multicast
+			deviceRules = fd.processMulticastFlow(agent, route, inPortNo, outPortNo, flow, grpId, groupMap)
+		}
+	}
+	deviceRules = fd.updateOutputPortForControllerBoundFlowForParentDevide(flow, deviceRules)
+	return deviceRules
+}
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/graph/device_graph.go b/vendor/github.com/opencord/voltha-go/rw_core/graph/device_graph.go
new file mode 100644
index 0000000..376df16
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/rw_core/graph/device_graph.go
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package graph
+
+import (
+	"errors"
+	"fmt"
+	"github.com/gyuho/goraph"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-protos/go/voltha"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+func init() {
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
+type RouteHop struct {
+	DeviceID string
+	Ingress  uint32
+	Egress   uint32
+}
+
+type OFPortLink struct {
+	Ingress uint32
+	Egress  uint32
+}
+
+type ofPortLinkToPath struct {
+	link OFPortLink
+	path []RouteHop
+}
+
+type GetDeviceFunc func(id string) (*voltha.Device, error)
+
+type DeviceGraph struct {
+	logicalDeviceId    string
+	GGraph             goraph.Graph
+	getDeviceFromModel GetDeviceFunc
+	logicalPorts       []*voltha.LogicalPort
+	rootPortsString    map[string]uint32
+	nonRootPortsString map[string]uint32
+	RootPorts          map[uint32]uint32
+	rootPortsLock      sync.RWMutex
+	Routes             map[OFPortLink][]RouteHop
+	graphBuildLock     sync.RWMutex
+	boundaryPorts      map[string]uint32
+	boundaryPortsLock  sync.RWMutex
+	cachedDevices      map[string]*voltha.Device
+	cachedDevicesLock  sync.RWMutex
+	devicesAdded       map[string]string
+	portsAdded         map[string]string
+}
+
+func NewDeviceGraph(logicalDeviceId string, getDevice GetDeviceFunc) *DeviceGraph {
+	var dg DeviceGraph
+	dg.logicalDeviceId = logicalDeviceId
+	dg.GGraph = goraph.NewGraph()
+	dg.getDeviceFromModel = getDevice
+	dg.graphBuildLock = sync.RWMutex{}
+	dg.cachedDevicesLock = sync.RWMutex{}
+	dg.rootPortsLock = sync.RWMutex{}
+	dg.devicesAdded = make(map[string]string)
+	dg.portsAdded = make(map[string]string)
+	dg.rootPortsString = make(map[string]uint32)
+	dg.nonRootPortsString = make(map[string]uint32)
+	dg.RootPorts = make(map[uint32]uint32)
+	dg.boundaryPorts = make(map[string]uint32)
+	dg.Routes = make(map[OFPortLink][]RouteHop)
+	dg.cachedDevices = make(map[string]*voltha.Device)
+	log.Debug("new device graph created ...")
+	return &dg
+}
+
+//IsRootPort returns true if the port is a root port on a logical device
+func (dg *DeviceGraph) IsRootPort(port uint32) bool {
+	dg.rootPortsLock.RLock()
+	defer dg.rootPortsLock.RUnlock()
+	_, exist := dg.RootPorts[port]
+	return exist
+}
+
+//GetDeviceNodeIds retrieves all the nodes in the device graph
+func (dg *DeviceGraph) GetDeviceNodeIds() map[string]string {
+	dg.graphBuildLock.RLock()
+	defer dg.graphBuildLock.RUnlock()
+	nodeIds := make(map[string]string)
+	nodesMap := dg.GGraph.GetNodes()
+	for id, node := range nodesMap {
+		if len(strings.Split(node.String(), ":")) != 2 { // not port node
+			nodeIds[id.String()] = id.String()
+		}
+	}
+	return nodeIds
+}
+
+//ComputeRoutes creates a device graph from the logical ports and then calculates all the routes
+//between the logical ports.  This will clear up the graph and routes if there were any.
+func (dg *DeviceGraph) ComputeRoutes(lps []*voltha.LogicalPort) {
+	if dg == nil {
+		return
+	}
+	dg.graphBuildLock.Lock()
+	defer dg.graphBuildLock.Unlock()
+
+	// Clear the graph
+	dg.reset()
+
+	dg.logicalPorts = lps
+
+	// Set the root, non-root ports and boundary ports
+	for _, lp := range lps {
+		portId := concatDeviceIdPortId(lp.DeviceId, lp.DevicePortNo)
+		if lp.RootPort {
+			dg.rootPortsString[portId] = lp.OfpPort.PortNo
+			dg.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
+		} else {
+			dg.nonRootPortsString[portId] = lp.OfpPort.PortNo
+		}
+		dg.boundaryPorts[portId] = lp.OfpPort.PortNo
+	}
+
+	// Build the graph
+	var device *voltha.Device
+	for _, logicalPort := range dg.logicalPorts {
+		device, _ = dg.getDevice(logicalPort.DeviceId)
+		dg.GGraph = dg.addDevice(device, dg.GGraph, &dg.devicesAdded, &dg.portsAdded, dg.boundaryPorts)
+	}
+
+	dg.Routes = dg.buildRoutes()
+}
+
+// AddPort adds a port to the graph.  If the graph is empty it will just invoke ComputeRoutes function
+func (dg *DeviceGraph) AddPort(lp *voltha.LogicalPort) {
+	//  If the graph does not exist invoke ComputeRoutes.
+	if len(dg.boundaryPorts) == 0 {
+		dg.ComputeRoutes([]*voltha.LogicalPort{lp})
+		return
+	}
+
+	dg.graphBuildLock.Lock()
+	defer dg.graphBuildLock.Unlock()
+
+	portId := concatDeviceIdPortId(lp.DeviceId, lp.DevicePortNo)
+
+	//	If the port is already part of the boundary ports, do nothing
+	if dg.portExist(portId) {
+		fmt.Println("port exists")
+		return
+	}
+	// Add the device where this port is located to the device graph. If the device is already added then
+	// only the missing port will be added
+	device, _ := dg.getDevice(lp.DeviceId)
+	dg.GGraph = dg.addDevice(device, dg.GGraph, &dg.devicesAdded, &dg.portsAdded, dg.boundaryPorts)
+
+	if lp.RootPort {
+		// Compute the route from this root port to all non-root ports
+		dg.rootPortsString[portId] = lp.OfpPort.PortNo
+		dg.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
+		dg.Routes = dg.buildPathsToAllNonRootPorts(lp)
+	} else {
+		// Compute the route from this port to all root ports
+		dg.nonRootPortsString[portId] = lp.OfpPort.PortNo
+		dg.Routes = dg.buildPathsToAllRootPorts(lp)
+	}
+
+	dg.Print()
+}
+
+func (dg *DeviceGraph) Print() error {
+	if level, err := log.GetPackageLogLevel(); err == nil && level == log.DebugLevel {
+		output := ""
+		routeNumber := 1
+		for k, v := range dg.Routes {
+			key := fmt.Sprintf("LP:%d->LP:%d", k.Ingress, k.Egress)
+			val := ""
+			for _, i := range v {
+				val += fmt.Sprintf("{%d->%s->%d},", i.Ingress, i.DeviceID, i.Egress)
+			}
+			val = val[:len(val)-1]
+			output += fmt.Sprintf("%d:{%s=>%s}   ", routeNumber, key, fmt.Sprintf("[%s]", val))
+			routeNumber += 1
+		}
+		log.Debugw("graph_routes", log.Fields{"lDeviceId": dg.logicalDeviceId, "Routes": output})
+	}
+	return nil
+}
+
+//getDevice returns the device either from the local cache (default) or from the model.
+//TODO: Set a cache timeout such that we do not use invalid data.  The full device lifecycle should also
+//be taken in consideration
+func (dg *DeviceGraph) getDevice(id string) (*voltha.Device, error) {
+	dg.cachedDevicesLock.RLock()
+	if d, exist := dg.cachedDevices[id]; exist {
+		dg.cachedDevicesLock.RUnlock()
+		//log.Debugw("getDevice - returned from cache", log.Fields{"deviceId": id})
+		return d, nil
+	}
+	dg.cachedDevicesLock.RUnlock()
+	//	Not cached
+	if d, err := dg.getDeviceFromModel(id); err != nil {
+		log.Errorw("device-not-found", log.Fields{"deviceId": id, "error": err})
+		return nil, err
+	} else { // cache it
+		dg.cachedDevicesLock.Lock()
+		dg.cachedDevices[id] = d
+		dg.cachedDevicesLock.Unlock()
+		//log.Debugw("getDevice - returned from model", log.Fields{"deviceId": id})
+		return d, nil
+	}
+}
+
+// addDevice adds a device to a device graph and setup edges that represent the device connections to its peers
+func (dg *DeviceGraph) addDevice(device *voltha.Device, g goraph.Graph, devicesAdded *map[string]string, portsAdded *map[string]string,
+	boundaryPorts map[string]uint32) goraph.Graph {
+
+	if device == nil {
+		return g
+	}
+
+	if _, exist := (*devicesAdded)[device.Id]; !exist {
+		g.AddNode(goraph.NewNode(device.Id))
+		(*devicesAdded)[device.Id] = device.Id
+	}
+
+	var portId string
+	var peerPortId string
+	for _, port := range device.Ports {
+		portId = concatDeviceIdPortId(device.Id, port.PortNo)
+		if _, exist := (*portsAdded)[portId]; !exist {
+			(*portsAdded)[portId] = portId
+			g.AddNode(goraph.NewNode(portId))
+			g.AddEdge(goraph.StringID(device.Id), goraph.StringID(portId), 1)
+			g.AddEdge(goraph.StringID(portId), goraph.StringID(device.Id), 1)
+		}
+		for _, peer := range port.Peers {
+			if _, exist := (*devicesAdded)[peer.DeviceId]; !exist {
+				d, _ := dg.getDevice(peer.DeviceId)
+				g = dg.addDevice(d, g, devicesAdded, portsAdded, boundaryPorts)
+			} else {
+				peerPortId = concatDeviceIdPortId(peer.DeviceId, peer.PortNo)
+				g.AddEdge(goraph.StringID(portId), goraph.StringID(peerPortId), 1)
+				g.AddEdge(goraph.StringID(peerPortId), goraph.StringID(portId), 1)
+			}
+		}
+	}
+	return g
+}
+
+//portExist returns true if the port ID is already part of the boundary ports map.
+func (dg *DeviceGraph) portExist(id string) bool {
+	dg.boundaryPortsLock.RLock()
+	defer dg.boundaryPortsLock.RUnlock()
+	_, exist := dg.boundaryPorts[id]
+	return exist
+}
+
+// buildPathsToAllRootPorts builds all the paths from the non-root logical port to all root ports
+// on the logical device
+func (dg *DeviceGraph) buildPathsToAllRootPorts(lp *voltha.LogicalPort) map[OFPortLink][]RouteHop {
+	paths := dg.Routes
+	source := concatDeviceIdPortId(lp.DeviceId, lp.DevicePortNo)
+	sourcePort := lp.OfpPort.PortNo
+	ch := make(chan *ofPortLinkToPath)
+	numBuildRequest := 0
+	for target, targetPort := range dg.rootPortsString {
+		go dg.buildRoute(source, target, sourcePort, targetPort, ch)
+		numBuildRequest += 1
+	}
+	responseReceived := 0
+forloop:
+	for {
+		if responseReceived == numBuildRequest {
+			break
+		}
+		select {
+		case res, ok := <-ch:
+			if !ok {
+				log.Debug("channel closed")
+				break forloop
+			}
+			if res != nil && len(res.path) > 0 {
+				paths[res.link] = res.path
+				paths[OFPortLink{Ingress: res.link.Egress, Egress: res.link.Ingress}] = getReverseRoute(res.path)
+			}
+		}
+		responseReceived += 1
+	}
+	return paths
+}
+
+// buildPathsToAllNonRootPorts builds all the paths from the root logical port to all non-root ports
+// on the logical device
+func (dg *DeviceGraph) buildPathsToAllNonRootPorts(lp *voltha.LogicalPort) map[OFPortLink][]RouteHop {
+	paths := dg.Routes
+	source := concatDeviceIdPortId(lp.DeviceId, lp.DevicePortNo)
+	sourcePort := lp.OfpPort.PortNo
+	ch := make(chan *ofPortLinkToPath)
+	numBuildRequest := 0
+	for target, targetPort := range dg.nonRootPortsString {
+		go dg.buildRoute(source, target, sourcePort, targetPort, ch)
+		numBuildRequest += 1
+	}
+	responseReceived := 0
+forloop:
+	for {
+		if responseReceived == numBuildRequest {
+			break
+		}
+		select {
+		case res, ok := <-ch:
+			if !ok {
+				log.Debug("channel closed")
+				break forloop
+			}
+			if res != nil && len(res.path) > 0 {
+				paths[res.link] = res.path
+				paths[OFPortLink{Ingress: res.link.Egress, Egress: res.link.Ingress}] = getReverseRoute(res.path)
+			}
+		}
+		responseReceived += 1
+	}
+	return paths
+}
+
+//buildRoute builds a route between a source and a target logical port
+func (dg *DeviceGraph) buildRoute(sourceId, targetId string, sourcePort, targetPort uint32, ch chan *ofPortLinkToPath) {
+	var pathIds []goraph.ID
+	path := make([]RouteHop, 0)
+	var err error
+	var hop RouteHop
+	var result *ofPortLinkToPath
+
+	if sourceId == targetId {
+		ch <- result
+		return
+	}
+	//Ignore Root - Root Routes
+	if dg.IsRootPort(sourcePort) && dg.IsRootPort(targetPort) {
+		ch <- result
+		return
+	}
+
+	//Ignore non-Root - non-Root Routes
+	if !dg.IsRootPort(sourcePort) && !dg.IsRootPort(targetPort) {
+		ch <- result
+		return
+	}
+
+	if pathIds, _, err = goraph.Dijkstra(dg.GGraph, goraph.StringID(sourceId), goraph.StringID(targetId)); err != nil {
+		log.Errorw("no-path", log.Fields{"sourceId": sourceId, "targetId": targetId, "error": err})
+		ch <- result
+		return
+	}
+	if len(pathIds)%3 != 0 {
+		ch <- result
+		return
+	}
+	var deviceId string
+	var ingressPort uint32
+	var egressPort uint32
+	for i := 0; i < len(pathIds); i = i + 3 {
+		if deviceId, ingressPort, err = splitIntoDeviceIdPortId(pathIds[i].String()); err != nil {
+			log.Errorw("id-error", log.Fields{"sourceId": sourceId, "targetId": targetId, "error": err})
+			break
+		}
+		if _, egressPort, err = splitIntoDeviceIdPortId(pathIds[i+2].String()); err != nil {
+			log.Errorw("id-error", log.Fields{"sourceId": sourceId, "targetId": targetId, "error": err})
+			break
+		}
+		hop = RouteHop{Ingress: ingressPort, DeviceID: deviceId, Egress: egressPort}
+		path = append(path, hop)
+	}
+	result = &ofPortLinkToPath{link: OFPortLink{Ingress: sourcePort, Egress: targetPort}, path: path}
+	ch <- result
+}
+
+//buildRoutes build all routes between all the ports on the logical device
+func (dg *DeviceGraph) buildRoutes() map[OFPortLink][]RouteHop {
+	paths := make(map[OFPortLink][]RouteHop)
+	ch := make(chan *ofPortLinkToPath)
+	numBuildRequest := 0
+	for source, sourcePort := range dg.boundaryPorts {
+		for target, targetPort := range dg.boundaryPorts {
+			go dg.buildRoute(source, target, sourcePort, targetPort, ch)
+			numBuildRequest += 1
+		}
+	}
+	responseReceived := 0
+forloop:
+	for {
+		if responseReceived == numBuildRequest {
+			break
+		}
+		select {
+		case res, ok := <-ch:
+			if !ok {
+				log.Debug("channel closed")
+				break forloop
+			}
+			if res != nil && len(res.path) > 0 {
+				paths[res.link] = res.path
+			}
+		}
+		responseReceived += 1
+	}
+	return paths
+}
+
+// reset cleans up the device graph
+func (dg *DeviceGraph) reset() {
+	dg.devicesAdded = make(map[string]string)
+	dg.portsAdded = make(map[string]string)
+	dg.rootPortsString = make(map[string]uint32)
+	dg.nonRootPortsString = make(map[string]uint32)
+	dg.RootPorts = make(map[uint32]uint32)
+	dg.boundaryPorts = make(map[string]uint32)
+	dg.Routes = make(map[OFPortLink][]RouteHop)
+	dg.cachedDevices = make(map[string]*voltha.Device)
+}
+
+//concatDeviceIdPortId formats a portid using the device id and the port number
+func concatDeviceIdPortId(deviceId string, portNo uint32) string {
+	return fmt.Sprintf("%s:%d", deviceId, portNo)
+}
+
+// splitIntoDeviceIdPortId extracts the device id and port number from the portId
+func splitIntoDeviceIdPortId(id string) (string, uint32, error) {
+	result := strings.Split(id, ":")
+	if len(result) != 2 {
+		return "", 0, errors.New(fmt.Sprintf("invalid-id-%s", id))
+	}
+	if temp, err := strconv.ParseInt(result[1], 10, 32); err != nil {
+		return "", 0, errors.New(fmt.Sprintf("invalid-id-%s-%s", id, err.Error()))
+	} else {
+		return result[0], uint32(temp), nil
+	}
+}
+
+//getReverseRoute returns the reverse of the route in param
+func getReverseRoute(route []RouteHop) []RouteHop {
+	reverse := make([]RouteHop, len(route))
+	for i, j := 0, len(route)-1; i < j; i, j = i+1, j-1 {
+		reverse[i], reverse[j] = route[j], route[i]
+	}
+	return reverse
+}
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/utils/core_utils.go b/vendor/github.com/opencord/voltha-go/rw_core/utils/core_utils.go
new file mode 100644
index 0000000..1e1ed9f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/rw_core/utils/core_utils.go
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package utils
+
+type DeviceID struct {
+	Id string
+}
+
+type LogicalDeviceID struct {
+	Id string
+}
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go b/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
new file mode 100644
index 0000000..10be81a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package utils
+
+import (
+	"bytes"
+	"github.com/cevaris/ordered_map"
+	"github.com/gogo/protobuf/proto"
+	ofp "github.com/opencord/voltha-protos/go/openflow_13"
+	"strings"
+)
+
+type OfpFlowModArgs map[string]uint64
+
+type FlowArgs struct {
+	MatchFields []*ofp.OfpOxmOfbField
+	Actions     []*ofp.OfpAction
+	Command     *ofp.OfpFlowModCommand
+	Priority    uint32
+	KV          OfpFlowModArgs
+}
+
+type GroupArgs struct {
+	GroupId uint32
+	Buckets []*ofp.OfpBucket
+	Command *ofp.OfpGroupModCommand
+}
+
+type FlowsAndGroups struct {
+	Flows  *ordered_map.OrderedMap
+	Groups *ordered_map.OrderedMap
+}
+
+func NewFlowsAndGroups() *FlowsAndGroups {
+	var fg FlowsAndGroups
+	fg.Flows = ordered_map.NewOrderedMap()
+	fg.Groups = ordered_map.NewOrderedMap()
+	return &fg
+}
+
+func (fg *FlowsAndGroups) Copy() *FlowsAndGroups {
+	copyFG := NewFlowsAndGroups()
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			copyFG.Flows.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			copyFG.Groups.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	return copyFG
+}
+
+func (fg *FlowsAndGroups) GetFlow(index int) *ofp.OfpFlowStats {
+	iter := fg.Flows.IterFunc()
+	pos := 0
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if pos == index {
+			if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+				return protoMsg
+			}
+			return nil
+		}
+		pos += 1
+	}
+	return nil
+}
+
+func (fg *FlowsAndGroups) ListFlows() []*ofp.OfpFlowStats {
+	flows := make([]*ofp.OfpFlowStats, 0)
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			flows = append(flows, protoMsg)
+		}
+	}
+	return flows
+}
+
+func (fg *FlowsAndGroups) ListGroups() []*ofp.OfpGroupEntry {
+	groups := make([]*ofp.OfpGroupEntry, 0)
+	iter := fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			groups = append(groups, protoMsg)
+		}
+	}
+	return groups
+}
+
+func (fg *FlowsAndGroups) String() string {
+	var buffer bytes.Buffer
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			buffer.WriteString("\nFlow:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			buffer.WriteString("\nGroup:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	return buffer.String()
+}
+
+func (fg *FlowsAndGroups) AddFlow(flow *ofp.OfpFlowStats) {
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add flow only if absent
+	if _, exist := fg.Flows.Get(flow.Id); !exist {
+		fg.Flows.Set(flow.Id, flow)
+	}
+}
+
+//AddFrom add flows and groups from the argument into this structure only if they do not already exist
+func (fg *FlowsAndGroups) AddFrom(from *FlowsAndGroups) {
+	iter := from.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			if _, exist := fg.Flows.Get(protoMsg.Id); !exist {
+				fg.Flows.Set(protoMsg.Id, protoMsg)
+			}
+		}
+	}
+	iter = from.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			if _, exist := fg.Groups.Get(protoMsg.Stats.GroupId); !exist {
+				fg.Groups.Set(protoMsg.Stats.GroupId, protoMsg)
+			}
+		}
+	}
+}
+
+type DeviceRules struct {
+	Rules map[string]*FlowsAndGroups
+}
+
+func NewDeviceRules() *DeviceRules {
+	var dr DeviceRules
+	dr.Rules = make(map[string]*FlowsAndGroups)
+	return &dr
+}
+
+func (dr *DeviceRules) Copy() *DeviceRules {
+	copyDR := NewDeviceRules()
+	for key, val := range dr.Rules {
+		copyDR.Rules[key] = val.Copy()
+	}
+	return copyDR
+}
+
+func (dr *DeviceRules) ClearFlows(deviceId string) {
+	if _, exist := dr.Rules[deviceId]; exist {
+		dr.Rules[deviceId].Flows = ordered_map.NewOrderedMap()
+	}
+}
+
+func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
+	if _, exist := dr.Rules[deviceId]; !exist {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId].AddFlow(flow)
+}
+
+func (dr *DeviceRules) GetRules() map[string]*FlowsAndGroups {
+	return dr.Rules
+}
+
+func (dr *DeviceRules) String() string {
+	var buffer bytes.Buffer
+	for key, value := range dr.Rules {
+		buffer.WriteString("DeviceId:")
+		buffer.WriteString(key)
+		buffer.WriteString(value.String())
+		buffer.WriteString("\n\n")
+	}
+	return buffer.String()
+}
+
+func (dr *DeviceRules) AddFlowsAndGroup(deviceId string, fg *FlowsAndGroups) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId] = fg
+}
+
+// CreateEntryIfNotExist creates a new deviceId in the Map if it does not exist and assigns an
+// empty FlowsAndGroups to it.  Otherwise, it does nothing.
+func (dr *DeviceRules) CreateEntryIfNotExist(deviceId string) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+}
+
+/*
+ *  Common flow routines
+ */
+
+//FindOverlappingFlows return a list of overlapping flow(s) where mod is the flow request
+func FindOverlappingFlows(flows []*ofp.OfpFlowStats, mod *ofp.OfpFlowMod) []*ofp.OfpFlowStats {
+	return nil //TODO - complete implementation
+}
+
+// FindFlowById returns the index of the flow in the flows array if present. Otherwise, it returns -1
+func FindFlowById(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if flow.Id == f.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+// FindFlows returns the index in flows where flow if present.  Otherwise, it returns -1
+func FindFlows(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if FlowMatch(f, flow) {
+			return idx
+		}
+	}
+	return -1
+}
+
+//FlowMatch returns true if two flows matches on the following flow attributes:
+//TableId, Priority, Flags, Cookie, Match
+func FlowMatch(f1 *ofp.OfpFlowStats, f2 *ofp.OfpFlowStats) bool {
+	keysMatter := []string{"TableId", "Priority", "Flags", "Cookie", "Match"}
+	for _, key := range keysMatter {
+		switch key {
+		case "TableId":
+			if f1.TableId != f2.TableId {
+				return false
+			}
+		case "Priority":
+			if f1.Priority != f2.Priority {
+				return false
+			}
+		case "Flags":
+			if f1.Flags != f2.Flags {
+				return false
+			}
+		case "Cookie":
+			if f1.Cookie != f2.Cookie {
+				return false
+			}
+		case "Match":
+			if strings.Compare(f1.Match.String(), f2.Match.String()) != 0 {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+//FlowMatchesMod returns True if given flow is "covered" by the wildcard flow_mod, taking into consideration of
+//both exact matches as well as masks-based match fields if any. Otherwise return False
+func FlowMatchesMod(flow *ofp.OfpFlowStats, mod *ofp.OfpFlowMod) bool {
+	//Check if flow.cookie is covered by mod.cookie and mod.cookie_mask
+	if (flow.Cookie & mod.CookieMask) != (mod.Cookie & mod.CookieMask) {
+		return false
+	}
+
+	//Check if flow.table_id is covered by flow_mod.table_id
+	if mod.TableId != uint32(ofp.OfpTable_OFPTT_ALL) && flow.TableId != mod.TableId {
+		return false
+	}
+
+	//Check out_port
+	if (mod.OutPort&0x7fffffff) != uint32(ofp.OfpPortNo_OFPP_ANY) && !FlowHasOutPort(flow, mod.OutPort) {
+		return false
+	}
+
+	//	Check out_group
+	if (mod.OutGroup&0x7fffffff) != uint32(ofp.OfpGroup_OFPG_ANY) && !FlowHasOutGroup(flow, mod.OutGroup) {
+		return false
+	}
+
+	//Priority is ignored
+
+	//Check match condition
+	//If the flow_mod match field is empty, that is a special case and indicates the flow entry matches
+	if (mod.Match == nil) || (mod.Match.OxmFields == nil) {
+		//If we got this far and the match is empty in the flow spec, than the flow matches
+		return true
+	} // TODO : implement the flow match analysis
+	return false
+
+}
+
+//FlowHasOutPort returns True if flow has a output command with the given out_port
+func FlowHasOutPort(flow *ofp.OfpFlowStats, outPort uint32) bool {
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_OUTPUT {
+					if (action.GetOutput() != nil) && (action.GetOutput().Port == outPort) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FlowHasOutGroup return True if flow has a output command with the given out_group
+func FlowHasOutGroup(flow *ofp.OfpFlowStats, groupID uint32) bool {
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_GROUP {
+					if (action.GetGroup() != nil) && (action.GetGroup().GroupId == groupID) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FindGroup returns index of group if found, else returns -1
+func FindGroup(groups []*ofp.OfpGroupEntry, groupId uint32) int {
+	for idx, group := range groups {
+		if group.Desc.GroupId == groupId {
+			return idx
+		}
+	}
+	return -1
+}
+
+func FlowsDeleteByGroupId(flows []*ofp.OfpFlowStats, groupId uint32) (bool, []*ofp.OfpFlowStats) {
+	toKeep := make([]*ofp.OfpFlowStats, 0)
+
+	for _, f := range flows {
+		if !FlowHasOutGroup(f, groupId) {
+			toKeep = append(toKeep, f)
+		}
+	}
+	return len(toKeep) < len(flows), toKeep
+}