Updated Adapter to support to handle DHCP trap on NNI and packet-in/out and Bug Fixing.
Tested EAPOL/DHCP/HSIA functionality E2E with EdgeCore OLT and TWSH ONU KIT.

patch: PON port is derived from platform and sent to core and bug fixes

Retested EAPOL/DHCP/HSIA use case end to end with EdgeCore OLT and TWSH ONU KIT

Change-Id: I99df82fd7a1385c10878f6fe09ce0d30c48d8e99
diff --git a/vendor/github.com/mdlayher/ethernet/.travis.yml b/vendor/github.com/mdlayher/ethernet/.travis.yml
new file mode 100644
index 0000000..cc21599
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+  - 1.x
+os:
+  - linux
+before_install:
+  - go get golang.org/x/lint/golint
+  - go get honnef.co/go/tools/cmd/staticcheck
+  - go get -d ./...
+script:
+  - go build -tags=gofuzz ./...
+  - go vet ./...
+  - staticcheck ./...
+  - golint -set_exit_status ./...
+  - go test -v -race ./...
diff --git a/vendor/github.com/mdlayher/ethernet/LICENSE.md b/vendor/github.com/mdlayher/ethernet/LICENSE.md
new file mode 100644
index 0000000..75ed9de
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/LICENSE.md
@@ -0,0 +1,10 @@
+MIT License
+===========
+
+Copyright (C) 2015 Matt Layher
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mdlayher/ethernet/README.md b/vendor/github.com/mdlayher/ethernet/README.md
new file mode 100644
index 0000000..ec6f4fe
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/README.md
@@ -0,0 +1,8 @@
+ethernet [![Build Status](https://travis-ci.org/mdlayher/ethernet.svg?branch=master)](https://travis-ci.org/mdlayher/ethernet) [![GoDoc](https://godoc.org/github.com/mdlayher/ethernet?status.svg)](https://godoc.org/github.com/mdlayher/ethernet) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/ethernet)](https://goreportcard.com/report/github.com/mdlayher/ethernet)
+========
+
+Package `ethernet` implements marshaling and unmarshaling of IEEE 802.3
+Ethernet II frames and IEEE 802.1Q VLAN tags.  MIT Licensed.
+
+For more information about using Ethernet frames in Go, check out my blog
+post: [Network Protocol Breakdown: Ethernet and Go](https://medium.com/@mdlayher/network-protocol-breakdown-ethernet-and-go-de985d726cc1).
\ No newline at end of file
diff --git a/vendor/github.com/mdlayher/ethernet/ethernet.go b/vendor/github.com/mdlayher/ethernet/ethernet.go
new file mode 100644
index 0000000..d150d8e
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/ethernet.go
@@ -0,0 +1,301 @@
+// Package ethernet implements marshaling and unmarshaling of IEEE 802.3
+// Ethernet II frames and IEEE 802.1Q VLAN tags.
+package ethernet
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"net"
+)
+
+//go:generate stringer -output=string.go -type=EtherType
+
+const (
+	// minPayload is the minimum payload size for an Ethernet frame, assuming
+	// that no 802.1Q VLAN tags are present.
+	minPayload = 46
+)
+
+var (
+	// Broadcast is a special hardware address which indicates a Frame should
+	// be sent to every device on a given LAN segment.
+	Broadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+)
+
+var (
+	// ErrInvalidFCS is returned when Frame.UnmarshalFCS detects an incorrect
+	// Ethernet frame check sequence in a byte slice for a Frame.
+	ErrInvalidFCS = errors.New("invalid frame check sequence")
+)
+
+// An EtherType is a value used to identify an upper layer protocol
+// encapsulated in a Frame.
+//
+// A list of IANA-assigned EtherType values may be found here:
+// http://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml.
+type EtherType uint16
+
+// Common EtherType values frequently used in a Frame.
+const (
+	EtherTypeIPv4 EtherType = 0x0800
+	EtherTypeARP  EtherType = 0x0806
+	EtherTypeIPv6 EtherType = 0x86DD
+
+	// EtherTypeVLAN and EtherTypeServiceVLAN are used as 802.1Q Tag Protocol
+	// Identifiers (TPIDs).
+	EtherTypeVLAN        EtherType = 0x8100
+	EtherTypeServiceVLAN EtherType = 0x88a8
+)
+
+// A Frame is an IEEE 802.3 Ethernet II frame.  A Frame contains information
+// such as source and destination hardware addresses, zero or more optional
+// 802.1Q VLAN tags, an EtherType, and payload data.
+type Frame struct {
+	// Destination specifies the destination hardware address for this Frame.
+	//
+	// If this address is set to Broadcast, the Frame will be sent to every
+	// device on a given LAN segment.
+	Destination net.HardwareAddr
+
+	// Source specifies the source hardware address for this Frame.
+	//
+	// Typically, this is the hardware address of the network interface used to
+	// send this Frame.
+	Source net.HardwareAddr
+
+	// ServiceVLAN specifies an optional 802.1Q service VLAN tag, for use with
+	// 802.1ad double tagging, or "Q-in-Q". If ServiceVLAN is not nil, VLAN must
+	// not be nil as well.
+	//
+	// Most users should leave this field set to nil and use VLAN instead.
+	ServiceVLAN *VLAN
+
+	// VLAN specifies an optional 802.1Q customer VLAN tag, which may or may
+	// not be present in a Frame.  It is important to note that the operating
+	// system may automatically strip VLAN tags before they can be parsed.
+	VLAN *VLAN
+
+	// EtherType is a value used to identify an upper layer protocol
+	// encapsulated in this Frame.
+	EtherType EtherType
+
+	// Payload is a variable length data payload encapsulated by this Frame.
+	Payload []byte
+}
+
+// MarshalBinary allocates a byte slice and marshals a Frame into binary form.
+func (f *Frame) MarshalBinary() ([]byte, error) {
+	b := make([]byte, f.length())
+	_, err := f.read(b)
+	return b, err
+}
+
+// MarshalFCS allocates a byte slice, marshals a Frame into binary form, and
+// finally calculates and places a 4-byte IEEE CRC32 frame check sequence at
+// the end of the slice.
+//
+// Most users should use MarshalBinary instead.  MarshalFCS is provided as a
+// convenience for rare occasions when the operating system cannot
+// automatically generate a frame check sequence for an Ethernet frame.
+func (f *Frame) MarshalFCS() ([]byte, error) {
+	// Frame length with 4 extra bytes for frame check sequence
+	b := make([]byte, f.length()+4)
+	if _, err := f.read(b); err != nil {
+		return nil, err
+	}
+
+	// Compute IEEE CRC32 checksum of frame bytes and place it directly
+	// in the last four bytes of the slice
+	binary.BigEndian.PutUint32(b[len(b)-4:], crc32.ChecksumIEEE(b[0:len(b)-4]))
+	return b, nil
+}
+
+// read reads data from a Frame into b.  read is used to marshal a Frame
+// into binary form, but does not allocate on its own.
+func (f *Frame) read(b []byte) (int, error) {
+	// S-VLAN must also have accompanying C-VLAN.
+	if f.ServiceVLAN != nil && f.VLAN == nil {
+		return 0, ErrInvalidVLAN
+	}
+
+	copy(b[0:6], f.Destination)
+	copy(b[6:12], f.Source)
+
+	// Marshal each non-nil VLAN tag into bytes, inserting the appropriate
+	// EtherType/TPID before each, so devices know that one or more VLANs
+	// are present.
+	vlans := []struct {
+		vlan *VLAN
+		tpid EtherType
+	}{
+		{vlan: f.ServiceVLAN, tpid: EtherTypeServiceVLAN},
+		{vlan: f.VLAN, tpid: EtherTypeVLAN},
+	}
+
+	n := 12
+	for _, vt := range vlans {
+		if vt.vlan == nil {
+			continue
+		}
+
+		// Add VLAN EtherType and VLAN bytes.
+		binary.BigEndian.PutUint16(b[n:n+2], uint16(vt.tpid))
+		if _, err := vt.vlan.read(b[n+2 : n+4]); err != nil {
+			return 0, err
+		}
+		n += 4
+	}
+
+	// Marshal actual EtherType after any VLANs, copy payload into
+	// output bytes.
+	binary.BigEndian.PutUint16(b[n:n+2], uint16(f.EtherType))
+	copy(b[n+2:], f.Payload)
+
+	return len(b), nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a Frame.
+func (f *Frame) UnmarshalBinary(b []byte) error {
+	// Verify that both hardware addresses and a single EtherType are present
+	if len(b) < 14 {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Track offset in packet for reading data
+	n := 14
+
+	// Continue looping and parsing VLAN tags until no more VLAN EtherType
+	// values are detected
+	et := EtherType(binary.BigEndian.Uint16(b[n-2 : n]))
+	switch et {
+	case EtherTypeServiceVLAN, EtherTypeVLAN:
+		// VLAN type is hinted for further parsing.  An index is returned which
+		// indicates how many bytes were consumed by VLAN tags.
+		nn, err := f.unmarshalVLANs(et, b[n:])
+		if err != nil {
+			return err
+		}
+
+		n += nn
+	default:
+		// No VLANs detected.
+		f.EtherType = et
+	}
+
+	// Allocate single byte slice to store destination and source hardware
+	// addresses, and payload
+	bb := make([]byte, 6+6+len(b[n:]))
+	copy(bb[0:6], b[0:6])
+	f.Destination = bb[0:6]
+	copy(bb[6:12], b[6:12])
+	f.Source = bb[6:12]
+
+	// There used to be a minimum payload length restriction here, but as
+	// long as two hardware addresses and an EtherType are present, it
+	// doesn't really matter what is contained in the payload.  We will
+	// follow the "robustness principle".
+	copy(bb[12:], b[n:])
+	f.Payload = bb[12:]
+
+	return nil
+}
+
+// UnmarshalFCS computes the IEEE CRC32 frame check sequence of a Frame,
+// verifies it against the checksum present in the byte slice, and finally,
+// unmarshals a byte slice into a Frame.
+//
+// Most users should use UnmarshalBinary instead.  UnmarshalFCS is provided as
+// a convenience for rare occasions when the operating system cannot
+// automatically verify a frame check sequence for an Ethernet frame.
+func (f *Frame) UnmarshalFCS(b []byte) error {
+	// Must contain enough data for FCS, to avoid panics
+	if len(b) < 4 {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Verify checksum in slice versus newly computed checksum
+	want := binary.BigEndian.Uint32(b[len(b)-4:])
+	got := crc32.ChecksumIEEE(b[0 : len(b)-4])
+	if want != got {
+		return ErrInvalidFCS
+	}
+
+	return f.UnmarshalBinary(b[0 : len(b)-4])
+}
+
+// length calculates the number of bytes required to store a Frame.
+func (f *Frame) length() int {
+	// If payload is less than the required minimum length, we zero-pad up to
+	// the required minimum length
+	pl := len(f.Payload)
+	if pl < minPayload {
+		pl = minPayload
+	}
+
+	// Add additional length if VLAN tags are needed.
+	var vlanLen int
+	switch {
+	case f.ServiceVLAN != nil && f.VLAN != nil:
+		vlanLen = 8
+	case f.VLAN != nil:
+		vlanLen = 4
+	}
+
+	// 6 bytes: destination hardware address
+	// 6 bytes: source hardware address
+	// N bytes: VLAN tags (if present)
+	// 2 bytes: EtherType
+	// N bytes: payload length (may be padded)
+	return 6 + 6 + vlanLen + 2 + pl
+}
+
+// unmarshalVLANs unmarshals S/C-VLAN tags.  It is assumed that tpid
+// is a valid S/C-VLAN TPID.
+func (f *Frame) unmarshalVLANs(tpid EtherType, b []byte) (int, error) {
+	// 4 or more bytes must remain for valid S/C-VLAN tag and EtherType.
+	if len(b) < 4 {
+		return 0, io.ErrUnexpectedEOF
+	}
+
+	// Track how many bytes are consumed by VLAN tags.
+	var n int
+
+	switch tpid {
+	case EtherTypeServiceVLAN:
+		vlan := new(VLAN)
+		if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil {
+			return 0, err
+		}
+		f.ServiceVLAN = vlan
+
+		// Assume that a C-VLAN immediately trails an S-VLAN.
+		if EtherType(binary.BigEndian.Uint16(b[n+2:n+4])) != EtherTypeVLAN {
+			return 0, ErrInvalidVLAN
+		}
+
+		// 4 or more bytes must remain for valid C-VLAN tag and EtherType.
+		n += 4
+		if len(b[n:]) < 4 {
+			return 0, io.ErrUnexpectedEOF
+		}
+
+		// Continue to parse the C-VLAN.
+		fallthrough
+	case EtherTypeVLAN:
+		vlan := new(VLAN)
+		if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil {
+			return 0, err
+		}
+
+		f.VLAN = vlan
+		f.EtherType = EtherType(binary.BigEndian.Uint16(b[n+2 : n+4]))
+		n += 4
+	default:
+		panic(fmt.Sprintf("unknown VLAN TPID: %04x", tpid))
+	}
+
+	return n, nil
+}
diff --git a/vendor/github.com/mdlayher/ethernet/fuzz.go b/vendor/github.com/mdlayher/ethernet/fuzz.go
new file mode 100644
index 0000000..5d22532
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/fuzz.go
@@ -0,0 +1,24 @@
+// +build gofuzz
+
+package ethernet
+
+func Fuzz(data []byte) int {
+	f := new(Frame)
+	if err := f.UnmarshalBinary(data); err != nil {
+		return 0
+	}
+
+	if _, err := f.MarshalBinary(); err != nil {
+		panic(err)
+	}
+
+	if err := f.UnmarshalFCS(data); err != nil {
+		return 0
+	}
+
+	if _, err := f.MarshalFCS(); err != nil {
+		panic(err)
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/mdlayher/ethernet/go.mod b/vendor/github.com/mdlayher/ethernet/go.mod
new file mode 100644
index 0000000..4ac5e77
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/go.mod
@@ -0,0 +1,5 @@
+module github.com/mdlayher/ethernet
+
+go 1.12
+
+require github.com/mdlayher/raw v0.0.0-20190313224157-43dbcdd7739d
diff --git a/vendor/github.com/mdlayher/ethernet/go.sum b/vendor/github.com/mdlayher/ethernet/go.sum
new file mode 100644
index 0000000..9400186
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/go.sum
@@ -0,0 +1,11 @@
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/mdlayher/raw v0.0.0-20190313224157-43dbcdd7739d h1:rjAS0af7FIYCScTtEU5KjIldC6qVaEScUJhABHC+ccM=
+github.com/mdlayher/raw v0.0.0-20190313224157-43dbcdd7739d/go.mod h1:r1fbeITl2xL/zLbVnNHFyOzQJTgr/3fpf1lJX/cjzR8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977 h1:actzWV6iWn3GLqN8dZjzsB+CLt+gaV2+wsxroxiQI8I=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313 h1:pczuHS43Cp2ktBEEmLwScxgjWsBSzdaQiKzUyf3DTTc=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/mdlayher/ethernet/string.go b/vendor/github.com/mdlayher/ethernet/string.go
new file mode 100644
index 0000000..89a3e01
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/string.go
@@ -0,0 +1,38 @@
+// Code generated by "stringer -output=string.go -type=EtherType"; DO NOT EDIT.
+
+package ethernet
+
+import "fmt"
+
+const (
+	_EtherType_name_0 = "EtherTypeIPv4"
+	_EtherType_name_1 = "EtherTypeARP"
+	_EtherType_name_2 = "EtherTypeVLAN"
+	_EtherType_name_3 = "EtherTypeIPv6"
+	_EtherType_name_4 = "EtherTypeServiceVLAN"
+)
+
+var (
+	_EtherType_index_0 = [...]uint8{0, 13}
+	_EtherType_index_1 = [...]uint8{0, 12}
+	_EtherType_index_2 = [...]uint8{0, 13}
+	_EtherType_index_3 = [...]uint8{0, 13}
+	_EtherType_index_4 = [...]uint8{0, 20}
+)
+
+func (i EtherType) String() string {
+	switch {
+	case i == 2048:
+		return _EtherType_name_0
+	case i == 2054:
+		return _EtherType_name_1
+	case i == 33024:
+		return _EtherType_name_2
+	case i == 34525:
+		return _EtherType_name_3
+	case i == 34984:
+		return _EtherType_name_4
+	default:
+		return fmt.Sprintf("EtherType(%d)", i)
+	}
+}
diff --git a/vendor/github.com/mdlayher/ethernet/vlan.go b/vendor/github.com/mdlayher/ethernet/vlan.go
new file mode 100644
index 0000000..6ada6a9
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/vlan.go
@@ -0,0 +1,129 @@
+package ethernet
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+const (
+	// VLANNone is a special VLAN ID which indicates that no VLAN is being
+	// used in a Frame.  In this case, the VLAN's other fields may be used
+	// to indicate a Frame's priority.
+	VLANNone = 0x000
+
+	// VLANMax is a reserved VLAN ID which may indicate a wildcard in some
+	// management systems, but may not be configured or transmitted in a
+	// VLAN tag.
+	VLANMax = 0xfff
+)
+
+var (
+	// ErrInvalidVLAN is returned when a VLAN tag is invalid due to one of the
+	// following reasons:
+	//   - Priority of greater than 7 is detected
+	//   - ID of greater than 4094 (0xffe) is detected
+	//   - A customer VLAN does not follow a service VLAN (when using Q-in-Q)
+	ErrInvalidVLAN = errors.New("invalid VLAN")
+)
+
+// Priority is an IEEE P802.1p priority level.  Priority can be any value from
+// 0 to 7.
+//
+// It is important to note that priority 1 (PriorityBackground) actually has
+// a lower priority than 0 (PriorityBestEffort).  All other Priority constants
+// indicate higher priority as the integer values increase.
+type Priority uint8
+
+// IEEE P802.1p recommended priority levels.  Note that PriorityBackground has
+// a lower priority than PriorityBestEffort.
+const (
+	PriorityBackground           Priority = 1
+	PriorityBestEffort           Priority = 0
+	PriorityExcellentEffort      Priority = 2
+	PriorityCriticalApplications Priority = 3
+	PriorityVideo                Priority = 4
+	PriorityVoice                Priority = 5
+	PriorityInternetworkControl  Priority = 6
+	PriorityNetworkControl       Priority = 7
+)
+
+// A VLAN is an IEEE 802.1Q Virtual LAN (VLAN) tag.  A VLAN contains
+// information regarding traffic priority and a VLAN identifier for
+// a given Frame.
+type VLAN struct {
+	// Priority specifies a IEEE P802.1p priority level.  Priority can be any
+	// value from 0 to 7.
+	Priority Priority
+
+	// DropEligible indicates if a Frame is eligible to be dropped in the
+	// presence of network congestion.
+	DropEligible bool
+
+	// ID specifies the VLAN ID for a Frame.  ID can be any value from 0 to
+	// 4094 (0x000 to 0xffe), allowing up to 4094 VLANs.
+	//
+	// If ID is 0 (0x000, VLANNone), no VLAN is specified, and the other fields
+	// simply indicate a Frame's priority.
+	ID uint16
+}
+
+// MarshalBinary allocates a byte slice and marshals a VLAN into binary form.
+func (v *VLAN) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 2)
+	_, err := v.read(b)
+	return b, err
+}
+
+// read reads data from a VLAN into b.  read is used to marshal a VLAN into
+// binary form, but does not allocate on its own.
+func (v *VLAN) read(b []byte) (int, error) {
+	// Check for VLAN priority in valid range
+	if v.Priority > PriorityNetworkControl {
+		return 0, ErrInvalidVLAN
+	}
+
+	// Check for VLAN ID in valid range
+	if v.ID >= VLANMax {
+		return 0, ErrInvalidVLAN
+	}
+
+	// 3 bits: priority
+	ub := uint16(v.Priority) << 13
+
+	// 1 bit: drop eligible
+	var drop uint16
+	if v.DropEligible {
+		drop = 1
+	}
+	ub |= drop << 12
+
+	// 12 bits: VLAN ID
+	ub |= v.ID
+
+	binary.BigEndian.PutUint16(b, ub)
+	return 2, nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a VLAN.
+func (v *VLAN) UnmarshalBinary(b []byte) error {
+	// VLAN tag is always 2 bytes
+	if len(b) != 2 {
+		return io.ErrUnexpectedEOF
+	}
+
+	//  3 bits: priority
+	//  1 bit : drop eligible
+	// 12 bits: VLAN ID
+	ub := binary.BigEndian.Uint16(b[0:2])
+	v.Priority = Priority(uint8(ub >> 13))
+	v.DropEligible = ub&0x1000 != 0
+	v.ID = ub & 0x0fff
+
+	// Check for VLAN ID in valid range
+	if v.ID >= VLANMax {
+		return ErrInvalidVLAN
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
index 5bbd176..e2d79fc 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
@@ -178,6 +178,27 @@
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
+func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
+	log.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
+	rpc := "DeleteAllPorts"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	log.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
 	connStatus voltha.ConnectStatus_ConnectStatus, operStatus voltha.OperStatus_OperStatus) error {
 	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
@@ -211,7 +232,7 @@
 
 func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
 	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) error {
-	log.Debugw("ChildDeviceDetected", log.Fields{"pPeviceId": parentDeviceId, "channelId": channelId})
+	log.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
 	rpc := "ChildDeviceDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -261,6 +282,46 @@
 
 }
 
+func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
+	log.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+	rpc := "ChildDevicesLost"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	log.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
+	log.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+	rpc := "ChildDevicesDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	log.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
 func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
 	log.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
 	rpc := "GetDevice"
@@ -356,3 +417,32 @@
 		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
 	}
 }
+
+func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
+	log.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+	rpc := "PacketIn"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	portNo := &ic.IntType{Val: int64(port)}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: portNo,
+	}
+	pkt := &ic.Packet{Payload: pktPayload}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: pkt,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	log.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
index 8b582b8..3ac5c4f 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
@@ -191,6 +191,40 @@
 }
 
 func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Disable_device API on the adapter
+	if err := rhp.adapter.Delete_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
 	return new(empty.Empty), nil
 }
 
@@ -250,6 +284,45 @@
 }
 
 func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
+	log.Debugw("Receive_packet_out", log.Fields{"args": args})
+	if len(args) < 3 {
+		log.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &ic.StrType{}
+	egressPort := &ic.IntType{}
+	packet := &openflow_13.OfpPacketOut{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				log.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+				return nil, err
+			}
+		case "outPort":
+			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
+				log.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+				return nil, err
+			}
+		case "packet":
+			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
+				log.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	log.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
 	return new(empty.Empty), nil
 }
 
diff --git a/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go
index a1dfa16..05df234 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go
@@ -33,12 +33,12 @@
 	Reenable_device(device *voltha.Device) error
 	Reboot_device(device *voltha.Device) error
 	Self_test_device(device *voltha.Device) error
-	Gelete_device(device *voltha.Device) error
+	Delete_device(device *voltha.Device) error
 	Get_device_details(device *voltha.Device) error
 	Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups) error
 	Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges) error
 	Update_pm_config(device *voltha.Device, pm_configs *voltha.PmConfigs) error
-	Receive_packet_out(device *voltha.Device, egress_port_no int, msg openflow_13.PacketOut) error
+	Receive_packet_out(deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
 	Suppress_alarm(filter *voltha.AlarmFilter) error
 	Unsuppress_alarm(filter *voltha.AlarmFilter) error
 	Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error)
diff --git a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
index c37307b..2873dbc 100755
--- a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
@@ -133,6 +133,7 @@
 	SharedResourceMgrs map[string]*PONResourceManager
 	SharedIdxByType    map[string]string
 	IntfIDs            []uint32 // list of pon interface IDs
+	Globalorlocal      string
 }
 
 func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
@@ -254,10 +255,14 @@
 	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
 
 	if StartID != 0 {
-		PONRMgr.PonResourceRanges[StartIDx] = StartID
+		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+			PONRMgr.PonResourceRanges[StartIDx] = StartID
+		}
 	}
 	if EndID != 0 {
-		PONRMgr.PonResourceRanges[EndIDx] = EndID
+		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+			PONRMgr.PonResourceRanges[EndIDx] = EndID
+		}
 	}
 	//if SharedPoolID != 0 {
 	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
@@ -567,6 +572,7 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
 	}
+	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
 
 	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
index 9f7bebf..2879e99 100644
--- a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
@@ -178,12 +178,23 @@
 }
 
 type iScheduler struct {
-	AllocID   uint32    `json:"alloc_id"`
-	Scheduler Scheduler `json:"scheduler"`
+	AllocID      uint32 `json:"alloc_id"`
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
 }
 type iGemPortAttribute struct {
-	GemportID    uint32           `json:"gem_port_id"`
-	GemAttribute GemPortAttribute `json:"gem_attribute"`
+	GemportID        uint32        `json:"gemport_id"`
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    int           `json:"priority_q"`
+	Weight           int           `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
 }
 
 type TechProfileMgr struct {
@@ -207,7 +218,7 @@
 	ProfileType                    string              `json:"profile_type"`
 	Version                        int                 `json:"version"`
 	NumGemPorts                    uint32              `json:"num_gem_ports"`
-	NumTconts                      uint32              `json:"num_tconts"`
+	NumTconts                      uint32              `json:"num_of_tconts"`
 	InstanceCtrl                   InstanceControl     `json:"instance_control"`
 	UsScheduler                    iScheduler          `json:"us_scheduler"`
 	DsScheduler                    iScheduler          `json:"ds_scheduler"`
@@ -371,7 +382,7 @@
 		log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
 		return nil
 	}
-	fmt.Println("Num GEM ports in TP:", tp.NumGemPorts)
+	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
 		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
@@ -380,10 +391,24 @@
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.UpstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 		dsGemPortAttributeList = append(dsGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.DownstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
 	}
 	return &TechProfile{
 		SubscriberIdentifier: uniPortName,
@@ -394,11 +419,19 @@
 		NumTconts:            numOfTconts,
 		InstanceCtrl:         tp.InstanceCtrl,
 		UsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.UsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
 		DsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.DsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
 		UpstreamGemPortAttributeList:   usGemPortAttributeList,
 		DownstreamGemPortAttributeList: dsGemPortAttributeList}
 }
@@ -498,17 +531,17 @@
 }
 
 func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for upstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
 		return nil
@@ -516,24 +549,24 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.UsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.UsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
 func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
 
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for downstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
 		return nil
@@ -542,8 +575,8 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.DsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.DsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
@@ -561,7 +594,6 @@
 		}
 	}
 	tconts := []*openolt_pb.Tcont{}
-	// TODO: Fix me , UPSTREAM direction is not proper
 	// upstream scheduler
 	tcont_us := &openolt_pb.Tcont{
 		Direction: usSched.Direction,
diff --git a/vendor/github.com/opencord/voltha-go/db/model/branch.go b/vendor/github.com/opencord/voltha-go/db/model/branch.go
index ca89df0..5502e63 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/branch.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/branch.go
@@ -93,18 +93,21 @@
 
 		// Go through list of children names in current revision and new revision
 		// and then compare the resulting outputs to ensure that we have not lost any entries.
-		var previousNames, latestNames, missingNames []string
 
-		if previousNames = b.retrieveChildrenNames(b.Latest); len(previousNames) > 0 {
-			log.Debugw("children-of-previous-revision", log.Fields{"hash": b.Latest.GetHash(), "names": previousNames})
-		}
+		if level, _ := log.GetPackageLogLevel(); level == log.DebugLevel {
+			var previousNames, latestNames, missingNames []string
 
-		if latestNames = b.retrieveChildrenNames(b.Latest); len(latestNames) > 0 {
-			log.Debugw("children-of-latest-revision", log.Fields{"hash": latest.GetHash(), "names": latestNames})
-		}
+			if previousNames = b.retrieveChildrenNames(b.Latest); len(previousNames) > 0 {
+				log.Debugw("children-of-previous-revision", log.Fields{"hash": b.Latest.GetHash(), "names": previousNames})
+			}
 
-		if missingNames = b.findMissingChildrenNames(previousNames, latestNames); len(missingNames) > 0 {
-			log.Debugw("children-missing-in-latest-revision", log.Fields{"hash": latest.GetHash(), "names": missingNames})
+			if latestNames = b.retrieveChildrenNames(b.Latest); len(latestNames) > 0 {
+				log.Debugw("children-of-latest-revision", log.Fields{"hash": latest.GetHash(), "names": latestNames})
+			}
+
+			if missingNames = b.findMissingChildrenNames(previousNames, latestNames); len(missingNames) > 0 {
+				log.Debugw("children-missing-in-latest-revision", log.Fields{"hash": latest.GetHash(), "names": missingNames})
+			}
 		}
 
 	} else {
diff --git a/vendor/github.com/opencord/voltha-go/db/model/node.go b/vendor/github.com/opencord/voltha-go/db/model/node.go
index 7bfdca0..1621b6f 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/node.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/node.go
@@ -24,7 +24,6 @@
 	"github.com/golang/protobuf/proto"
 	"github.com/opencord/voltha-go/common/log"
 	"reflect"
-	"runtime/debug"
 	"strings"
 	"sync"
 )
@@ -127,6 +126,9 @@
 
 	// If anything is new, then set the revision as the latest
 	if branch.GetLatest() == nil || revision.GetHash() != branch.GetLatest().GetHash() {
+		if revision.GetName() != "" {
+			GetRevCache().Cache.Store(revision.GetName(), revision)
+		}
 		branch.SetLatest(revision)
 	}
 
@@ -275,7 +277,7 @@
 
 	var result interface{}
 	var prList []interface{}
-	if pr := rev.LoadFromPersistence(path, txid); pr != nil {
+	if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil {
 		for _, revEntry := range pr {
 			prList = append(prList, revEntry.GetData())
 		}
@@ -288,6 +290,7 @@
 // Get retrieves the data from a node tree that resides at the specified path
 func (n *node) Get(path string, hash string, depth int, reconcile bool, txid string) interface{} {
 	log.Debugw("node-get-request", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
+
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
 	}
@@ -307,9 +310,15 @@
 
 	var result interface{}
 
-	// If there is not request to reconcile, try to get it from memory
+	// If there is no request to reconcile, try to get it from memory
 	if !reconcile {
-		if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+		// Try to find an entry matching the path value from one of these sources
+		// 1.  Start with the cache which stores revisions by watch names
+		// 2.  Then look in the revision tree, especially if it's a sub-path such as /devices/1234/flows
+		// 3.  As a last effort, move on to the KV store
+		if entry, exists := GetRevCache().Cache.Load(path); exists && entry.(Revision) != nil {
+			return proto.Clone(entry.(Revision).GetData().(proto.Message))
+		} else if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
 			return result
 		}
 	}
@@ -317,14 +326,14 @@
 	// If we got to this point, we are either trying to reconcile with the db or
 	// or we simply failed at getting information from memory
 	if n.Root.KvStore != nil {
-		var prList []interface{}
-		if pr := rev.LoadFromPersistence(path, txid); pr != nil && len(pr) > 0 {
+		if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil && len(pr) > 0 {
 			// Did we receive a single or multiple revisions?
 			if len(pr) > 1 {
+				var revs []interface{}
 				for _, revEntry := range pr {
-					prList = append(prList, revEntry.GetData())
+					revs = append(revs, revEntry.GetData())
 				}
-				result = prList
+				result = revs
 			} else {
 				result = pr[0].GetData()
 			}
@@ -334,7 +343,7 @@
 	return result
 }
 
-// getPath traverses the specified path and retrieves the data associated to it
+//getPath traverses the specified path and retrieves the data associated to it
 func (n *node) getPath(rev Revision, path string, depth int) interface{} {
 	if path == "" {
 		return n.getData(rev, depth)
@@ -472,6 +481,7 @@
 			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
 
 			if childRev == nil {
+				log.Debugw("child-revision-is-nil", log.Fields{"key": keyValue})
 				return branch.GetLatest()
 			}
 
@@ -490,6 +500,7 @@
 					log.Debug("clear-hash - %s %+v", newChildRev.GetHash(), newChildRev)
 					newChildRev.ClearHash()
 				}
+				log.Debugw("child-revisions-have-matching-hash", log.Fields{"hash": childRev.GetHash(), "key": keyValue})
 				return branch.GetLatest()
 			}
 
@@ -505,15 +516,15 @@
 			// Prefix the hash value with the data type (e.g. devices, logical_devices, adapters)
 			newChildRev.SetName(name + "/" + _keyValueType)
 
+			branch.LatestLock.Lock()
+			defer branch.LatestLock.Unlock()
+
 			if idx >= 0 {
 				children[idx] = newChildRev
 			} else {
 				children = append(children, newChildRev)
 			}
 
-			branch.LatestLock.Lock()
-			defer branch.LatestLock.Unlock()
-
 			updatedRev := rev.UpdateChildren(name, children, branch)
 
 			n.makeLatest(branch, updatedRev, nil)
@@ -544,13 +555,11 @@
 }
 
 func (n *node) doUpdate(branch *Branch, data interface{}, strict bool) Revision {
-	log.Debugf("Comparing types - expected: %+v, actual: %+v &&&&&& %s", reflect.ValueOf(n.Type).Type(),
-		reflect.TypeOf(data),
-		string(debug.Stack()))
+	log.Debugw("comparing-types", log.Fields{"expected": reflect.ValueOf(n.Type).Type(), "actual": reflect.TypeOf(data)})
 
 	if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
 		// TODO raise error
-		log.Errorf("data does not match type: %+v", n.Type)
+		log.Errorw("types-do-not-match: %+v", log.Fields{"actual": reflect.TypeOf(data), "expected": n.Type})
 		return nil
 	}
 
@@ -675,7 +684,10 @@
 			newChildRev := childNode.Add(path, data, txid, makeBranch)
 
 			// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
-			childRev.SetName(name + "/" + keyValue.(string))
+			newChildRev.SetName(name + "/" + keyValue.(string))
+
+			branch.LatestLock.Lock()
+			defer branch.LatestLock.Unlock()
 
 			if idx >= 0 {
 				children[idx] = newChildRev
@@ -683,9 +695,6 @@
 				children = append(children, newChildRev)
 			}
 
-			branch.LatestLock.Lock()
-			defer branch.LatestLock.Unlock()
-
 			updatedRev := rev.UpdateChildren(name, children, branch)
 			n.makeLatest(branch, updatedRev, nil)
 
@@ -758,15 +767,15 @@
 					}
 					newChildRev := childNode.Remove(path, txid, makeBranch)
 
+					branch.LatestLock.Lock()
+					defer branch.LatestLock.Unlock()
+
 					if idx >= 0 {
 						children[idx] = newChildRev
 					} else {
 						children = append(children, newChildRev)
 					}
 
-					branch.LatestLock.Lock()
-					defer branch.LatestLock.Unlock()
-
 					rev.SetChildren(name, children)
 					branch.GetLatest().Drop(txid, false)
 					n.makeLatest(branch, rev, nil)
@@ -784,6 +793,7 @@
 				}
 
 				childRev.StorageDrop(txid, true)
+				GetRevCache().Cache.Delete(childRev.GetName())
 
 				branch.LatestLock.Lock()
 				defer branch.LatestLock.Unlock()
diff --git a/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
index 0ccc58e..d7b0b58 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
@@ -21,30 +21,28 @@
 	"fmt"
 	"github.com/golang/protobuf/proto"
 	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
 	"reflect"
-	"runtime/debug"
 	"sort"
 	"sync"
 )
 
 // TODO: Cache logic will have to be revisited to cleanup unused entries in memory (disabled for now)
 //
-//type revCacheSingleton struct {
-//	sync.RWMutex
-//	//Cache map[string]interface{}
-//	Cache sync.Map
-//}
-//
-//var revCacheInstance *revCacheSingleton
-//var revCacheOnce sync.Once
-//
-//func GetRevCache() *revCacheSingleton {
-//	revCacheOnce.Do(func() {
-//		//revCacheInstance = &revCacheSingleton{Cache: make(map[string]interface{})}
-//		revCacheInstance = &revCacheSingleton{Cache: sync.Map{}}
-//	})
-//	return revCacheInstance
-//}
+type revCacheSingleton struct {
+	sync.RWMutex
+	Cache sync.Map
+}
+
+var revCacheInstance *revCacheSingleton
+var revCacheOnce sync.Once
+
+func GetRevCache() *revCacheSingleton {
+	revCacheOnce.Do(func() {
+		revCacheInstance = &revCacheSingleton{Cache: sync.Map{}}
+	})
+	return revCacheInstance
+}
 
 type NonPersistedRevision struct {
 	mutex        sync.RWMutex
@@ -409,7 +407,7 @@
 
 // Drop is used to indicate when a revision is no longer required
 func (npr *NonPersistedRevision) Drop(txid string, includeConfig bool) {
-	log.Debugw("dropping-revision", log.Fields{"hash": npr.GetHash(), "stack": string(debug.Stack())})
+	log.Debugw("dropping-revision", log.Fields{"hash": npr.GetHash(), "name": npr.GetName()})
 	npr.discarded = true
 }
 
@@ -428,7 +426,7 @@
 	}
 }
 
-func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
 	// stub... required by interface
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
index a56b776..ea99cf7 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
@@ -23,7 +23,6 @@
 	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/db/kvstore"
 	"reflect"
-	"runtime/debug"
 	"strings"
 	"sync"
 )
@@ -72,10 +71,7 @@
 		return
 	}
 
-	if pair, _ := pr.kvStore.Get(pr.GetName()); pair != nil && skipOnExist {
-		log.Debugw("revision-config-already-exists", log.Fields{"hash": pr.GetHash(), "name": pr.GetName()})
-		return
-	}
+	log.Debugw("ready-to-store-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
 
 	if blob, err := proto.Marshal(pr.GetConfig().Data.(proto.Message)); err != nil {
 		// TODO report error
@@ -89,10 +85,9 @@
 		}
 
 		if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
-			log.Warnw("problem-storing-revision-config",
-				log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			log.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
 		} else {
-			log.Debugw("storing-revision-config", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
 			pr.isStored = true
 		}
 	}
@@ -100,7 +95,7 @@
 
 func (pr *PersistedRevision) SetupWatch(key string) {
 	if key == "" {
-		log.Debugw("ignoring-watch", log.Fields{"key": key, "revision-hash": pr.GetHash(), "stack": string(debug.Stack())})
+		log.Debugw("ignoring-watch", log.Fields{"key": key, "revision-hash": pr.GetHash()})
 		return
 	}
 
@@ -111,7 +106,7 @@
 	if pr.events == nil {
 		pr.events = make(chan *kvstore.Event)
 
-		log.Debugw("setting-watch-channel", log.Fields{"key": key, "revision-hash": pr.GetHash(), "stack": string(debug.Stack())})
+		log.Debugw("setting-watch-channel", log.Fields{"key": key, "revision-hash": pr.GetHash()})
 
 		pr.SetName(key)
 		pr.events = pr.kvStore.CreateWatch(key)
@@ -120,7 +115,7 @@
 	if !pr.isWatched {
 		pr.isWatched = true
 
-		log.Debugw("setting-watch-routine", log.Fields{"key": key, "revision-hash": pr.GetHash(), "stack": string(debug.Stack())})
+		log.Debugw("setting-watch-routine", log.Fields{"key": key, "revision-hash": pr.GetHash()})
 
 		// Start watching
 		go pr.startWatching()
@@ -128,7 +123,7 @@
 }
 
 func (pr *PersistedRevision) startWatching() {
-	log.Debugw("starting-watch", log.Fields{"key": pr.GetHash(), "stack": string(debug.Stack())})
+	log.Debugw("starting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
 
 StopWatchLoop:
 	for {
@@ -154,17 +149,106 @@
 			case kvstore.PUT:
 				log.Debugw("update-in-memory", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
 
-				if dataPair, err := pr.kvStore.Get(pr.GetName()); err != nil || dataPair == nil {
-					log.Errorw("update-in-memory--key-retrieval-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
-				} else {
-					data := reflect.New(reflect.TypeOf(pr.GetData()).Elem())
+				data := reflect.New(reflect.TypeOf(pr.GetData()).Elem())
 
-					if err := proto.Unmarshal(dataPair.Value.([]byte), data.Interface().(proto.Message)); err != nil {
-						log.Errorw("update-in-memory--unmarshal-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
-					} else {
-						if pr.GetNode().GetProxy() != nil {
-							pr.LoadFromPersistence(pr.GetNode().GetProxy().getFullPath(), "")
+				if err := proto.Unmarshal(event.Value.([]byte), data.Interface().(proto.Message)); err != nil {
+					log.Errorw("failed-to-unmarshal-watch-data", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
+				} else {
+					var pathLock string
+					var pac *proxyAccessControl
+					var blobs map[string]*kvstore.KVPair
+
+					// The watch reported new persistence data.
+					// Construct an object that will be used to update the memory
+					blobs = make(map[string]*kvstore.KVPair)
+					key, _ := kvstore.ToString(event.Key)
+					blobs[key] = &kvstore.KVPair{
+						Key:     key,
+						Value:   event.Value,
+						Session: "",
+						Lease:   0,
+					}
+
+					if pr.GetNode().GetProxy() != nil {
+						//
+						// If a proxy exists for this revision, use it to lock access to the path
+						// and prevent simultaneous updates to the object in memory
+						//
+						pathLock, _ = pr.GetNode().GetProxy().parseForControlledPath(pr.GetNode().GetProxy().getFullPath())
+
+						//If the proxy already has a request in progress, then there is no need to process the watch
+						log.Debugw("checking-if-path-is-locked", log.Fields{"key": pr.GetHash(), "pathLock": pathLock})
+						if PAC().IsReserved(pathLock) {
+							log.Debugw("operation-in-progress", log.Fields{
+								"key":       pr.GetHash(),
+								"path":      pr.GetNode().GetProxy().getFullPath(),
+								"operation": pr.GetNode().GetRoot().GetProxy().Operation,
+							})
+
+							continue
+
+							// TODO/FIXME: keep logic for now in case we need to control based on running operation
+							//
+							// The code below seems to revert the in-memory/persistence value (occasionally) with
+							// the one received from the watch event.
+							//
+							// The same problem may occur, in the scenario where the core owning a device
+							// receives a watch event for an update made by another core, and when the owning core is
+							// also processing an update.  Need to investigate...
+							//
+							//switch pr.GetNode().GetRoot().GetProxy().Operation {
+							//case PROXY_UPDATE:
+							//	// We will need to reload once the update operation completes.
+							//	// Therefore, the data of the current event is most likely out-dated
+							//	// and should be ignored
+							//	log.Debugw("reload-required", log.Fields{
+							//		"key":       pr.GetHash(),
+							//		"path":      pr.GetNode().GetProxy().getFullPath(),
+							//		"operation": pr.GetNode().GetRoot().GetProxy().Operation,
+							//	})
+							//
+							//	// Eliminate the object constructed earlier
+							//	blobs = nil
+							//
+							//case PROXY_ADD:
+							//	fallthrough
+							//
+							//case PROXY_REMOVE:
+							//	fallthrough
+							//
+							//case PROXY_GET:
+							//	fallthrough
+							//
+							//default:
+							//	// No need to process the event ... move on
+							//	log.Debugw("", log.Fields{
+							//		"key":       pr.GetHash(),
+							//		"path":      pr.GetNode().GetProxy().getFullPath(),
+							//		"operation": pr.GetNode().GetRoot().GetProxy().Operation,
+							//	})
+							//
+							//	continue
+							//}
 						}
+
+						// Reserve the path to prevent others to modify while we reload from persistence
+						log.Debugw("reserve-and-lock-path", log.Fields{"key": pr.GetHash(), "path": pathLock})
+						pac = PAC().ReservePath(pr.GetNode().GetProxy().getFullPath(), pr.GetNode().GetProxy(), pathLock)
+						pac.SetProxy(pr.GetNode().GetProxy())
+						pac.lock()
+
+						// Load changes and apply to memory
+						pr.LoadFromPersistence(pr.GetName(), "", blobs)
+
+						log.Debugw("release-and-unlock-path", log.Fields{"key": pr.GetHash(), "path": pathLock})
+						pac.unlock()
+						PAC().ReleasePath(pathLock)
+
+					} else {
+						log.Debugw("revision-with-no-proxy", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+
+						// Load changes and apply to memory
+						pr.LoadFromPersistence(pr.GetName(), "", blobs)
 					}
 				}
 
@@ -176,7 +260,7 @@
 
 	Watches().Cache.Delete(pr.GetName() + "-" + pr.GetHash())
 
-	log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "stack": string(debug.Stack())})
+	log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
 }
 
 // UpdateData modifies the information in the data model and saves it in the persistent storage
@@ -196,7 +280,7 @@
 		newPR.isWatched = false
 		newPR.isStored = false
 		pr.Drop(branch.Txid, false)
-		newPR.SetupWatch(newPR.GetName())
+		pr.Drop(branch.Txid, false)
 	} else {
 		newPR.isWatched = true
 		newPR.isStored = true
@@ -206,7 +290,8 @@
 }
 
 // UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
-func (pr *PersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+func (pr *PersistedRevision) UpdateChildren(name string, children []Revision,
+	branch *Branch) Revision {
 	log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
 
 	newNPR := pr.Revision.UpdateChildren(name, children, branch)
@@ -222,7 +307,6 @@
 		newPR.isWatched = false
 		newPR.isStored = false
 		pr.Drop(branch.Txid, false)
-		newPR.SetupWatch(newPR.GetName())
 	} else {
 		newPR.isWatched = true
 		newPR.isStored = true
@@ -248,7 +332,6 @@
 		newPR.isWatched = false
 		newPR.isStored = false
 		pr.Drop(branch.Txid, false)
-		newPR.SetupWatch(newPR.GetName())
 	} else {
 		newPR.isWatched = true
 		newPR.isStored = true
@@ -267,7 +350,7 @@
 // and its associated config when required
 func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
 	log.Debugw("dropping-revision",
-		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "stack": string(debug.Stack())})
+		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash})
 
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
@@ -297,25 +380,28 @@
 
 // verifyPersistedEntry validates if the provided data is available or not in memory and applies updates as required
 func (pr *PersistedRevision) verifyPersistedEntry(data interface{}, typeName string, keyName string, keyValue string, txid string) (response Revision) {
-	rev := pr
+	//rev := pr
 
-	children := make([]Revision, len(rev.GetBranch().GetLatest().GetChildren(typeName)))
-	copy(children, rev.GetBranch().GetLatest().GetChildren(typeName))
+	children := make([]Revision, len(pr.GetBranch().GetLatest().GetChildren(typeName)))
+	copy(children, pr.GetBranch().GetLatest().GetChildren(typeName))
 
 	// Verify if the revision contains a child that matches that key
-	if childIdx, childRev := rev.GetNode().findRevByKey(rev.GetBranch().GetLatest().GetChildren(typeName), keyName, keyValue); childRev != nil {
+	if childIdx, childRev := pr.GetNode().findRevByKey(pr.GetBranch().GetLatest().GetChildren(typeName), keyName,
+		keyValue); childRev != nil {
 		// A child matching the provided key exists in memory
 		// Verify if the data differs to what was retrieved from persistence
 		if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() {
 			log.Debugw("verify-persisted-entry--data-is-different", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
+				"data": childRev.GetData(),
 			})
 
 			// Data has changed; replace the child entry and update the parent revision
-			updatedChildRev := childRev.UpdateData(data, childRev.GetBranch())
-			updatedChildRev.SetupWatch(updatedChildRev.GetName())
 			childRev.Drop(txid, false)
+			updatedChildRev := childRev.UpdateData(data, childRev.GetBranch())
+			updatedChildRev.GetNode().SetProxy(childRev.GetNode().GetProxy())
+			updatedChildRev.SetupWatch(updatedChildRev.GetName())
 
 			if childIdx >= 0 {
 				children[childIdx] = updatedChildRev
@@ -323,18 +409,19 @@
 				children = append(children, updatedChildRev)
 			}
 
-			rev.GetBranch().LatestLock.Lock()
-			updatedRev := rev.UpdateChildren(typeName, children, rev.GetBranch())
-			rev.GetBranch().Node.makeLatest(rev.GetBranch(), updatedRev, nil)
-			rev.GetBranch().LatestLock.Unlock()
+			pr.GetBranch().LatestLock.Lock()
+			updatedRev := pr.GetBranch().Node.Latest().UpdateChildren(typeName, children, pr.GetBranch())
+			pr.GetBranch().Node.makeLatest(pr.GetBranch(), updatedRev, nil)
+			pr.GetBranch().LatestLock.Unlock()
 
 			// Drop the previous child revision
-			rev.GetBranch().Node.Latest().ChildDrop(typeName, childRev.GetHash())
+			pr.GetBranch().Node.Latest().ChildDrop(typeName, childRev.GetHash())
 
 			if updatedChildRev != nil {
 				log.Debugw("verify-persisted-entry--adding-child", log.Fields{
 					"key":  updatedChildRev.GetHash(),
 					"name": updatedChildRev.GetName(),
+					"data": updatedChildRev.GetData(),
 				})
 				response = updatedChildRev
 			}
@@ -343,11 +430,13 @@
 			log.Debugw("verify-persisted-entry--same-data", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
+				"data": childRev.GetData(),
 			})
 			if childRev != nil {
 				log.Debugw("verify-persisted-entry--keeping-child", log.Fields{
 					"key":  childRev.GetHash(),
 					"name": childRev.GetName(),
+					"data": childRev.GetData(),
 				})
 				response = childRev
 			}
@@ -358,29 +447,32 @@
 		log.Debugw("verify-persisted-entry--no-such-entry", log.Fields{
 			"key":  keyValue,
 			"name": typeName,
+			"data": data,
 		})
 
 		// Construct a new child node with the retrieved persistence data
-		childRev = rev.GetBranch().Node.MakeNode(data, txid).Latest(txid)
+		childRev = pr.GetBranch().Node.MakeNode(data, txid).Latest(txid)
 
 		// We need to start watching this entry for future changes
 		childRev.SetName(typeName + "/" + keyValue)
 
 		// Add the child to the parent revision
-		rev.GetBranch().LatestLock.Lock()
+		pr.GetBranch().LatestLock.Lock()
 		children = append(children, childRev)
-		updatedRev := rev.GetBranch().Node.Latest().UpdateChildren(typeName, children, rev.GetBranch())
+		updatedRev := pr.GetBranch().Node.Latest().UpdateChildren(typeName, children, pr.GetBranch())
+		updatedRev.GetNode().SetProxy(pr.GetNode().GetProxy())
 		childRev.SetupWatch(childRev.GetName())
 
 		//rev.GetBranch().Node.Latest().Drop(txid, false)
-		rev.GetBranch().Node.makeLatest(rev.GetBranch(), updatedRev, nil)
-		rev.GetBranch().LatestLock.Unlock()
+		pr.GetBranch().Node.makeLatest(pr.GetBranch(), updatedRev, nil)
+		pr.GetBranch().LatestLock.Unlock()
 
 		// Child entry is valid and can be included in the response object
 		if childRev != nil {
 			log.Debugw("verify-persisted-entry--adding-child", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
+				"data": childRev.GetData(),
 			})
 			response = childRev
 		}
@@ -391,39 +483,46 @@
 
 // LoadFromPersistence retrieves data from kv store at the specified location and refreshes the memory
 // by adding missing entries, updating changed entries and ignoring unchanged ones
-func (pr *PersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+func (pr *PersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
 
 	log.Debugw("loading-from-persistence", log.Fields{"path": path, "txid": txid})
 
 	var response []Revision
-	var rev Revision
 
-	rev = pr
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
 
 	if pr.kvStore != nil && path != "" {
-		blobMap, _ := pr.kvStore.List(path)
+		if blobs == nil || len(blobs) == 0 {
+			log.Debugw("retrieve-from-kv", log.Fields{"path": path, "txid": txid})
+			blobs, _ = pr.kvStore.List(path)
+		}
 
 		partition := strings.SplitN(path, "/", 2)
 		name := partition[0]
 
+		var nodeType interface{}
 		if len(partition) < 2 {
 			path = ""
+			nodeType = pr.GetBranch().Node.Type
 		} else {
 			path = partition[1]
+			nodeType = pr.GetBranch().Node.Root.Type
 		}
 
-		field := ChildrenFields(rev.GetBranch().Node.Type)[name]
+		field := ChildrenFields(nodeType)[name]
 
 		if field != nil && field.IsContainer {
 			log.Debugw("load-from-persistence--start-blobs", log.Fields{
 				"path": path,
 				"name": name,
-				"size": len(blobMap),
+				"size": len(blobs),
 			})
 
-			for _, blob := range blobMap {
+			for _, blob := range blobs {
 				output := blob.Value.([]byte)
 
 				data := reflect.New(field.ClassType.Elem())
@@ -440,7 +539,8 @@
 						// based on the field's key attribute
 						_, key := GetAttributeValue(data.Interface(), field.Key, 0)
 
-						if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, key.String(), txid); entry != nil {
+						if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, key.String(),
+							txid); entry != nil {
 							response = append(response, entry)
 						}
 					}
@@ -456,7 +556,8 @@
 					}
 					keyValue := field.KeyFromStr(key)
 
-					if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, keyValue.(string), txid); entry != nil {
+					if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, keyValue.(string),
+						txid); entry != nil {
 						response = append(response, entry)
 					}
 				}
@@ -465,7 +566,8 @@
 			log.Debugw("load-from-persistence--end-blobs", log.Fields{"path": path, "name": name})
 		} else {
 			log.Debugw("load-from-persistence--cannot-process-field", log.Fields{
-				"type": rev.GetBranch().Node.Type,
+
+				"type": pr.GetBranch().Node.Type,
 				"name": name,
 			})
 		}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/proxy.go b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
index b45fb1d..2933464 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/proxy.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
@@ -186,11 +186,20 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+	log.Debugw("proxy-list", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
+	p.Operation = PROXY_LIST
 	pac.SetProxy(p)
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	rv := pac.List(path, depth, deep, txid, controlled)
 
@@ -208,10 +217,16 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+	log.Debugw("proxy-get", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
+	p.Operation = PROXY_GET
 	pac.SetProxy(p)
 
 	rv := pac.Get(path, depth, deep, txid, controlled)
@@ -237,7 +252,13 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s, Controlled: %b", path, effectivePath, fullPath, pathLock, controlled)
+	log.Debugw("proxy-update", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
@@ -247,7 +268,6 @@
 	defer func(op ProxyOperation) {
 		pac.getProxy().Operation = op
 	}(PROXY_GET)
-
 	log.Debugw("proxy-operation--update", log.Fields{"operation": p.Operation})
 
 	return pac.Update(fullPath, data, strict, txid, controlled)
@@ -273,15 +293,21 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-add-with-id", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(path, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_ADD
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	pac.SetProxy(p)
 
@@ -308,16 +334,22 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-add", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(path, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_ADD
 	pac.SetProxy(p)
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
 
@@ -342,16 +374,22 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-remove", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_REMOVE
 	pac.SetProxy(p)
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	log.Debugw("proxy-operation--remove", log.Fields{"operation": p.Operation})
 
@@ -377,16 +415,22 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-create", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(path, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_CREATE
 	pac.SetProxy(p)
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	log.Debugw("proxy-operation--create-proxy", log.Fields{"operation": p.Operation})
 
diff --git a/vendor/github.com/opencord/voltha-go/db/model/revision.go b/vendor/github.com/opencord/voltha-go/db/model/revision.go
index 79620e1..74ae3f7 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/revision.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/revision.go
@@ -15,6 +15,10 @@
  */
 package model
 
+import (
+	"github.com/opencord/voltha-go/db/kvstore"
+)
+
 type Revision interface {
 	Finalize(bool)
 	IsDiscarded() bool
@@ -38,7 +42,7 @@
 	Get(int) interface{}
 	GetData() interface{}
 	GetNode() *node
-	LoadFromPersistence(path string, txid string) []Revision
+	LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision
 	UpdateData(data interface{}, branch *Branch) Revision
 	UpdateChildren(name string, children []Revision, branch *Branch) Revision
 	UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
index ec191dc..367f442 100644
--- a/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
+++ b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
@@ -25,4 +25,13 @@
 type DeviceManager interface {
 	GetDevice(string) (*voltha.Device, error)
 	IsRootDevice(string) (bool, error)
+	NotifyInvalidTransition(*voltha.Device) error
+	SetAdminStateToEnable(*voltha.Device) error
+	CreateLogicalDevice(*voltha.Device) error
+	SetupUNILogicalPorts(*voltha.Device) error
+	DisableAllChildDevices(cDevice *voltha.Device) error
+	DeleteLogicalDevice(cDevice *voltha.Device) error
+	DeleteLogicalPorts(cDevice *voltha.Device) error
+	DeleteAllChildDevices(cDevice *voltha.Device) error
+	RunPostDeviceDelete(cDevice *voltha.Device) error
 }
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go b/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
index 0c485bb..c2c9287 100644
--- a/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
@@ -171,9 +171,11 @@
 
 func (dr *DeviceRules) Copy() *DeviceRules {
 	copyDR := NewDeviceRules()
-	for key, val := range dr.Rules {
-		if val != nil {
-			copyDR.Rules[key] = val.Copy()
+	if dr != nil {
+		for key, val := range dr.Rules {
+			if val != nil {
+				copyDR.Rules[key] = val.Copy()
+			}
 		}
 	}
 	return copyDR