Updated Adapter to support to handle DHCP trap on NNI and packet-in/out and Bug Fixing.
Tested EAPOL/DHCP/HSIA functionality E2E with EdgeCore OLT and TWSH ONU KIT.

patch: PON port is derived from platform and sent to core and bug fixes

Retested EAPOL/DHCP/HSIA use case end to end with EdgeCore OLT and TWSH ONU KIT

Change-Id: I99df82fd7a1385c10878f6fe09ce0d30c48d8e99
diff --git a/Gopkg.lock b/Gopkg.lock
index a5d667f..f7a6095 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -199,6 +199,14 @@
   version = "v0.8.2"
 
 [[projects]]
+  branch = "master"
+  digest = "1:399231b1d0c4fc8204517cfe50e3f959b2404a9a8640983634dd258d7bf2821f"
+  name = "github.com/mdlayher/ethernet"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "5b5fc417d966b71d3781b0d5860413e9fae947c1"
+
+[[projects]]
   digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
   name = "github.com/mitchellh/go-homedir"
   packages = ["."]
@@ -216,7 +224,7 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:89203eb51605633cffa2a11f70f3547c2ac040fcf1b8e45e6a16124312caf7b4"
+  digest = "1:b442c37d803107ffcd43329b8e6792c360cd3f05fb228a5439cdbffd510caef2"
   name = "github.com/opencord/voltha-go"
   packages = [
     "adapters",
@@ -233,7 +241,7 @@
     "rw_core/utils",
   ]
   pruneopts = "UT"
-  revision = "3ab34888e669e50c0ff7e412eba61adaefff48ed"
+  revision = "f6516ddf375c4e1b3b46b07d9da8af15b4c3deba"
 
 [[projects]]
   branch = "master"
@@ -436,6 +444,7 @@
   input-imports = [
     "github.com/gogo/protobuf/proto",
     "github.com/golang/protobuf/ptypes",
+    "github.com/mdlayher/ethernet",
     "github.com/opencord/voltha-go/adapters",
     "github.com/opencord/voltha-go/adapters/common",
     "github.com/opencord/voltha-go/common/log",
diff --git a/Gopkg.toml b/Gopkg.toml
index 413989e..7f6e5e6 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -45,6 +45,10 @@
   name = "google.golang.org/grpc"
   version = "1.19.1"
 
+[[constraint]]
+  branch = "master"
+  name = "github.com/mdlayher/ethernet"
+
 [[override]]
   branch = "master"
   name = "go.etcd.io/etcd"
diff --git a/adaptercore/device_handler.go b/adaptercore/device_handler.go
index 3bb34db..866f815 100644
--- a/adaptercore/device_handler.go
+++ b/adaptercore/device_handler.go
@@ -27,6 +27,7 @@
 
 	"github.com/gogo/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
+	"github.com/mdlayher/ethernet"
 	com "github.com/opencord/voltha-go/adapters/common"
 	"github.com/opencord/voltha-go/common/log"
 	rsrcMgr "github.com/opencord/voltha-openolt-adapter/adaptercore/resourcemanager"
@@ -128,12 +129,7 @@
 	} else {
 		operStatus = voltha.OperStatus_DISCOVERED
 	}
-	// portNum := IntfIdToPortNo(intfId,portType)
-	portNum := intfId
-	if portType == voltha.Port_ETHERNET_NNI {
-		portNum = IntfIdToPortNo(intfId, portType)
-	}
-	// portNum := IntfIdToPortNo(intfId,portType)
+	portNum := IntfIdToPortNo(intfId, portType)
 	label := GetportLabel(portNum, portType)
 	if len(label) == 0 {
 		log.Errorw("Invalid-port-label", log.Fields{"portNum": portNum, "portType": portType})
@@ -251,6 +247,7 @@
 		case *oop.Indication_PktInd:
 			pktInd := indication.GetPktInd()
 			log.Infow("Received pakcet indication ", log.Fields{"PktInd": pktInd})
+			go dh.handlePacketIndication(pktInd)
 		case *oop.Indication_PortStats:
 			portStats := indication.GetPortStats()
 			log.Infow("Received port stats indication", log.Fields{"PortStats": portStats})
@@ -311,7 +308,7 @@
 		log.Errorw("Device info is nil", log.Fields{})
 		return errors.New("Failed to get device info from OLT")
 	}
-
+	log.Debugw("Fetched device info", log.Fields{"deviceInfo": deviceInfo})
 	dh.device.Root = true
 	dh.device.Vendor = deviceInfo.Vendor
 	dh.device.Model = deviceInfo.Model
@@ -319,7 +316,7 @@
 	dh.device.SerialNumber = deviceInfo.DeviceSerialNumber
 	dh.device.HardwareVersion = deviceInfo.HardwareVersion
 	dh.device.FirmwareVersion = deviceInfo.FirmwareVersion
-	// TODO : Check whether this MAC address is learnt from SDPON or need to send from device
+	// FIXME: Remove Hardcodings
 	dh.device.MacAddress = "0a:0b:0c:0d:0e:0f"
 
 	// Synchronous call to update device - this method is run in its own go routine
@@ -394,10 +391,10 @@
 func (dh *DeviceHandler) omciIndication(omciInd *oop.OmciIndication) error {
 	log.Debugw("omci indication", log.Fields{"intfId": omciInd.IntfId, "onuId": omciInd.OnuId})
 
-	//        ponPort := IntfIdToPortNo(omciInd.GetIntfId(),voltha.Port_PON_OLT)
+	ponPort := IntfIdToPortNo(omciInd.GetIntfId(), voltha.Port_PON_OLT)
 	kwargs := make(map[string]interface{})
 	kwargs["onu_id"] = omciInd.OnuId
-	kwargs["parent_port_no"] = omciInd.GetIntfId()
+	kwargs["parent_port_no"] = ponPort
 
 	if onuDevice, err := dh.coreProxy.GetChildDevice(nil, dh.device.Id, kwargs); err != nil {
 		log.Errorw("onu not found", log.Fields{"intfId": omciInd.IntfId, "onuId": omciInd.OnuId})
@@ -416,7 +413,6 @@
 
 // Process_inter_adapter_message process inter adater message
 func (dh *DeviceHandler) Process_inter_adapter_message(msg *ic.InterAdapterMessage) error {
-	// TODO
 	log.Debugw("Process_inter_adapter_message", log.Fields{"msgId": msg.Header.Id})
 	if msg.Header.Type == ic.InterAdapterMessageType_OMCI_REQUEST {
 		msgId := msg.Header.Id
@@ -462,6 +458,7 @@
 
 func (dh *DeviceHandler) activateONU(intfId uint32, onuId int64, serialNum *oop.SerialNumber, serialNumber string) {
 	log.Debugw("activate-onu", log.Fields{"intfId": intfId, "onuId": onuId, "serialNum": serialNum, "serialNumber": serialNumber})
+	dh.flowMgr.UpdateOnuInfo(intfId, uint32(onuId), serialNumber)
 	// TODO: need resource manager
 	var pir uint32 = 1000000
 	Onu := oop.Onu{IntfId: intfId, OnuId: uint32(onuId), SerialNumber: serialNum, Pir: pir}
@@ -473,10 +470,8 @@
 }
 
 func (dh *DeviceHandler) onuDiscIndication(onuDiscInd *oop.OnuDiscIndication, onuId uint32, sn string) error {
-	//channelId := MkUniPortNum(onuDiscInd.GetIntfId(), onuId, uint32(0))
-	//parentPortNo := IntfIdToPortNo(onuDiscInd.GetIntfId(),voltha.Port_PON_OLT)
 	channelId := onuDiscInd.GetIntfId()
-	parentPortNo := onuDiscInd.GetIntfId()
+	parentPortNo := IntfIdToPortNo(onuDiscInd.GetIntfId(), voltha.Port_PON_OLT)
 	if err := dh.coreProxy.ChildDeviceDetected(nil, dh.device.Id, int(parentPortNo), "brcm_openomci_onu", int(channelId), string(onuDiscInd.SerialNumber.GetVendorId()), sn, int64(onuId)); err != nil {
 		log.Errorw("Create onu error", log.Fields{"parent_id": dh.device.Id, "ponPort": onuDiscInd.GetIntfId(), "onuId": onuId, "sn": sn, "error": err})
 		return err
@@ -484,7 +479,7 @@
 
 	kwargs := make(map[string]interface{})
 	kwargs["onu_id"] = onuId
-	kwargs["parent_port_no"] = onuDiscInd.GetIntfId()
+	kwargs["parent_port_no"] = parentPortNo
 
 	for i := 0; i < 10; i++ {
 		if onuDevice, _ := dh.coreProxy.GetChildDevice(nil, dh.device.Id, kwargs); onuDevice != nil {
@@ -503,19 +498,18 @@
 	serialNumber := dh.stringifySerialNumber(onuInd.SerialNumber)
 
 	kwargs := make(map[string]interface{})
-	//        ponPort := IntfIdToPortNo(onuInd.GetIntfId(),voltha.Port_PON_OLT)
+	ponPort := IntfIdToPortNo(onuInd.GetIntfId(), voltha.Port_PON_OLT)
 
 	if serialNumber != "" {
 		kwargs["serial_number"] = serialNumber
 	} else {
 		kwargs["onu_id"] = onuInd.OnuId
-		kwargs["parent_port_no"] = onuInd.GetIntfId()
+		kwargs["parent_port_no"] = ponPort
 	}
 	if onuDevice, _ := dh.coreProxy.GetChildDevice(nil, dh.device.Id, kwargs); onuDevice != nil {
-		//if intfIdFromPortNo(onuDevice.ParentPortNo) != onuInd.GetIntfId() {
-		if onuDevice.ParentPortNo != onuInd.GetIntfId() {
+		if onuDevice.ParentPortNo != ponPort {
 			//log.Warnw("ONU-is-on-a-different-intf-id-now", log.Fields{"previousIntfId": intfIdFromPortNo(onuDevice.ParentPortNo), "currentIntfId": onuInd.GetIntfId()})
-			log.Warnw("ONU-is-on-a-different-intf-id-now", log.Fields{"previousIntfId": onuDevice.ParentPortNo, "currentIntfId": onuInd.GetIntfId()})
+			log.Warnw("ONU-is-on-a-different-intf-id-now", log.Fields{"previousIntfId": onuDevice.ParentPortNo, "currentIntfId": ponPort})
 		}
 
 		if onuDevice.ProxyAddress.OnuId != onuInd.OnuId {
@@ -608,6 +602,15 @@
 	return onuDevice
 }
 
+func (dh *DeviceHandler) SendPacketInToCore(logicalPort uint32, packetPayload []byte) {
+	log.Debugw("SendPacketInToCore", log.Fields{"port": logicalPort, "packetPayload": packetPayload})
+	if err := dh.coreProxy.SendPacketIn(nil, dh.device.Id, logicalPort, packetPayload); err != nil {
+		log.Errorw("Error sending packetin to core", log.Fields{"error": err})
+		return
+	}
+	log.Debug("Sent packet-in to core successfully")
+}
+
 func (dh *DeviceHandler) UpdateFlowsIncrementally(device *voltha.Device, flows *of.FlowChanges, groups *of.FlowGroupChanges) error {
 	log.Debugw("In UpdateFlowsIncrementally", log.Fields{"deviceId": device.Id, "flows": flows, "groups": groups})
 	if flows != nil {
@@ -683,3 +686,73 @@
 
 	return nil
 }
+
+func (dh *DeviceHandler) handlePacketIndication(packetIn *oop.PacketIndication) {
+	log.Debugw("Received packet-in", log.Fields{"packet-indication": *packetIn})
+	logicalPortNum, err := dh.flowMgr.GetLogicalPortFromPacketIn(packetIn)
+	if err != nil {
+		log.Errorw("Error getting logical port from packet-in", log.Fields{"error": err})
+		return
+	}
+	log.Debugw("sending packet-in to core", log.Fields{"logicalPortNum": logicalPortNum, "packet": *packetIn})
+	if err := dh.coreProxy.SendPacketIn(nil, dh.device.Id, logicalPortNum, packetIn.Pkt); err != nil {
+		log.Errorw("Error sending packet-in to core", log.Fields{"error": err})
+		return
+	}
+	log.Debug("Success sending packet-in to core!")
+}
+
+func (dh *DeviceHandler) PacketOut(egress_port_no int, packet *of.OfpPacketOut) error {
+	log.Debugw("PacketOut", log.Fields{"deviceId": dh.deviceId, "egress_port_no": egress_port_no, "pkt-length": len(packet.Data)})
+	var etherFrame ethernet.Frame
+	err := (&etherFrame).UnmarshalBinary(packet.Data)
+	if err != nil {
+		log.Errorw("Failed to unmarshal into ethernet frame", log.Fields{"err": err, "pkt-length": len(packet.Data)})
+		return err
+	}
+	log.Debugw("Ethernet Frame", log.Fields{"Frame": etherFrame})
+	egressPortType := IntfIdToPortTypeName(uint32(egress_port_no))
+	if egressPortType == voltha.Port_ETHERNET_UNI {
+		if etherFrame.VLAN != nil { // If double tag, remove the outer tag
+			nextEthType := (uint16(packet.Data[16]) << 8) | uint16(packet.Data[17])
+			if nextEthType == 0x8100 {
+				etherFrame.VLAN = nil
+				packet.Data, err = etherFrame.MarshalBinary()
+				if err != nil {
+					log.Fatalf("failed to marshal frame: %v", err)
+					return err
+				}
+				if err := (&etherFrame).UnmarshalBinary(packet.Data); err != nil {
+					log.Fatalf("failed to unmarshal frame: %v", err)
+					return err
+				}
+				log.Debug("Double tagged packet , removed outer vlan", log.Fields{"New frame": etherFrame})
+			}
+		}
+		intfId := IntfIdFromUniPortNum(uint32(egress_port_no))
+		onuId := OnuIdFromPortNum(uint32(egress_port_no))
+		uniId := UniIdFromPortNum(uint32(egress_port_no))
+		/*gemPortId, err := dh.flowMgr.GetPacketOutGemPortId(intfId, onuId, uint32(egress_port_no))
+		  if err != nil{
+		      log.Errorw("Error while getting gemport to packet-out",log.Fields{"error": err})
+		      return err
+		  }*/
+		onuPkt := oop.OnuPacket{IntfId: intfId, OnuId: onuId, PortNo: uint32(egress_port_no), Pkt: packet.Data}
+		log.Debug("sending-packet-to-ONU", log.Fields{"egress_port_no": egress_port_no, "IntfId": intfId, "onuId": onuId,
+			"uniId": uniId, "packet": packet.Data})
+		if _, err := dh.Client.OnuPacketOut(context.Background(), &onuPkt); err != nil {
+			log.Errorw("Error while sending packet-out to ONU", log.Fields{"error": err})
+			return err
+		}
+	} else if egressPortType == voltha.Port_ETHERNET_NNI {
+		uplinkPkt := oop.UplinkPacket{IntfId: IntfIdFromNniPortNum(uint32(egress_port_no)), Pkt: packet.Data}
+		log.Debug("sending-packet-to-uplink", log.Fields{"uplink_pkt": uplinkPkt})
+		if _, err := dh.Client.UplinkPacketOut(context.Background(), &uplinkPkt); err != nil {
+			log.Errorw("Error while sending packet-out to uplink", log.Fields{"error": err})
+			return err
+		}
+	} else {
+		log.Warnw("Packet-out-to-this-interface-type-not-implemented", log.Fields{"egress_port_no": egress_port_no, "egressPortType": egressPortType})
+	}
+	return nil
+}
diff --git a/adaptercore/olt_platform.go b/adaptercore/olt_platform.go
index 131234a..4cc4765 100644
--- a/adaptercore/olt_platform.go
+++ b/adaptercore/olt_platform.go
@@ -129,7 +129,7 @@
 		if (intfId & (1 << 16)) == (1 << 16) {
 			return voltha.Port_ETHERNET_NNI
 		} else {
-			return voltha.Port_UNKNOWN
+			return voltha.Port_ETHERNET_UNI
 		}
 	}
 }
diff --git a/adaptercore/openolt.go b/adaptercore/openolt.go
index 2e5174f..594ceb3 100644
--- a/adaptercore/openolt.go
+++ b/adaptercore/openolt.go
@@ -210,7 +210,7 @@
 	return errors.New("UnImplemented")
 }
 
-func (oo *OpenOLT) Gelete_device(device *voltha.Device) error {
+func (oo *OpenOLT) Delete_device(device *voltha.Device) error {
 	return errors.New("UnImplemented")
 }
 
@@ -235,8 +235,13 @@
 	return errors.New("UnImplemented")
 }
 
-func (oo *OpenOLT) Receive_packet_out(device *voltha.Device, egress_port_no int, msg openflow_13.PacketOut) error {
-	return errors.New("UnImplemented")
+func (oo *OpenOLT) Receive_packet_out(deviceId string, egress_port_no int, packet *openflow_13.OfpPacketOut) error {
+	log.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId, "egress_port_no": egress_port_no, "pkt": packet})
+	if handler := oo.getDeviceHandler(deviceId); handler != nil {
+		return handler.PacketOut(egress_port_no, packet)
+	}
+	log.Errorw("Receive_packet_out failed-device-handler-not-set", log.Fields{"deviceId": deviceId, "egressport": egress_port_no, "packet": packet})
+	return errors.New("device-handler-not-set")
 }
 
 func (oo *OpenOLT) Suppress_alarm(filter *voltha.AlarmFilter) error {
diff --git a/adaptercore/openolt_flowmgr.go b/adaptercore/openolt_flowmgr.go
index 0b8120b..8d9361e 100644
--- a/adaptercore/openolt_flowmgr.go
+++ b/adaptercore/openolt_flowmgr.go
@@ -80,10 +80,36 @@
 	TRAP_TO_HOST = "trap_to_host"
 )
 
+type onuInfo struct {
+	intfId       uint32
+	onuId        uint32
+	serialNumber string
+}
+
+type onuIdKey struct {
+	intfId uint32
+	onuId  uint32
+}
+
+type gemPortKey struct {
+	intfId  uint32
+	gemPort uint32
+}
+
+type packetInInfoKey struct {
+	intfId      uint32
+	onuId       uint32
+	logicalPort uint32
+}
+
 type OpenOltFlowMgr struct {
-	techprofile   []*tp.TechProfileMgr
-	deviceHandler *DeviceHandler
-	resourceMgr   *rsrcMgr.OpenOltResourceMgr
+	techprofile      []*tp.TechProfileMgr
+	deviceHandler    *DeviceHandler
+	resourceMgr      *rsrcMgr.OpenOltResourceMgr
+	onuIds           map[onuIdKey]onuInfo       //OnuId -> OnuInfo
+	onuSerialNumbers map[string]onuInfo         //onu serial_number (string) -> OnuInfo
+	onuGemPortIds    map[gemPortKey]onuInfo     //GemPortId -> OnuInfo
+	packetInGemPort  map[packetInInfoKey]uint32 //packet in gem port
 }
 
 func NewFlowManager(dh *DeviceHandler, rsrcMgr *rsrcMgr.OpenOltResourceMgr) *OpenOltFlowMgr {
@@ -95,6 +121,10 @@
 		log.Error("Error while populating tech profile mgr\n")
 		return nil
 	}
+	flowMgr.onuIds = make(map[onuIdKey]onuInfo)
+	flowMgr.onuSerialNumbers = make(map[string]onuInfo)
+	flowMgr.onuGemPortIds = make(map[gemPortKey]onuInfo)
+	flowMgr.packetInGemPort = make(map[packetInInfoKey]uint32)
 	log.Info("Initialization of  flow manager success!!")
 	return &flowMgr
 }
@@ -238,6 +268,9 @@
 		log.Error("Errow while uploading gemtopon map to KV store")
 	}
 	log.Debug("Stored tconts and GEM into KV store successfully")
+	for _, gemPort := range gemPortIDs {
+		f.addGemPortToOnuInfoMap(intfId, onuId, gemPort)
+	}
 }
 
 func (f *OpenOltFlowMgr) populateTechProfilePerPonPort() error {
@@ -357,8 +390,8 @@
 	}
 
 	action[TRAP_TO_HOST] = true
-	classifier[UDP_SRC] = 68
-	classifier[UDP_DST] = 67
+	classifier[UDP_SRC] = uint32(68)
+	classifier[UDP_DST] = uint32(67)
 	classifier[PACKET_TAG_TYPE] = SINGLE_TAG
 	delete(classifier, VLAN_VID)
 
@@ -557,7 +590,7 @@
 		classifier.IpProto = ipProto.(uint32)
 	}
 	if vlanId, ok := classifierInfo[VLAN_VID]; ok {
-		classifier.OVid = vlanId.(uint32)
+		classifier.OVid = (vlanId.(uint32)) & 0xFFF
 	}
 	if metadata, ok := classifierInfo[METADATA]; ok { // TODO: Revisit
 		classifier.IVid = uint32(metadata.(uint64))
@@ -647,8 +680,17 @@
 
 func (f *OpenOltFlowMgr) getUpdatedFlowInfo(flow *openolt_pb2.Flow, flowStoreCookie uint64, flowCategory string) *[]rsrcMgr.FlowInfo {
 	var flows []rsrcMgr.FlowInfo = []rsrcMgr.FlowInfo{rsrcMgr.FlowInfo{Flow: flow, FlowCategory: flowCategory, FlowStoreCookie: flowStoreCookie}}
-	// Get existing flow for flowid for given subscriber from KV store
-	existingFlows := f.resourceMgr.GetFlowIDInfo(uint32(flow.AccessIntfId), uint32(flow.OnuId), uint32(flow.UniId), flow.FlowId)
+	var intfId uint32
+	/* For flows which trap out of the NNI, the AccessIntfId is invalid
+	   (set to -1). In such cases, we need to refer to the NetworkIntfId .
+	*/
+	if flow.AccessIntfId != -1 {
+		intfId = uint32(flow.AccessIntfId)
+	} else {
+		intfId = uint32(flow.NetworkIntfId)
+	}
+	// Get existing flows matching flowid for given subscriber from KV store
+	existingFlows := f.resourceMgr.GetFlowIDInfo(intfId, uint32(flow.OnuId), uint32(flow.UniId), flow.FlowId)
 	if existingFlows != nil {
 		log.Debugw("Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
 		for _, f := range *existingFlows {
@@ -709,8 +751,7 @@
 
 func (f *OpenOltFlowMgr) getOnuChildDevice(intfId uint32, onuId uint32) (*voltha.Device, error) {
 	log.Debugw("GetChildDevice", log.Fields{"pon port": intfId, "onuId": onuId})
-	//parentPortNo := IntfIdToPortNo(intfId, voltha.Port_PON_OLT)
-	parentPortNo := intfId
+	parentPortNo := IntfIdToPortNo(intfId, voltha.Port_PON_OLT)
 	onuDevice := f.deviceHandler.GetChildDevice(parentPortNo, onuId)
 	if onuDevice == nil {
 		log.Errorw("onu not found", log.Fields{"intfId": parentPortNo, "onuId": onuId})
@@ -867,6 +908,17 @@
 	}
 	log.Infow("Flow ports", log.Fields{"classifierInfo_inport": classifierInfo[IN_PORT], "action_output": actionInfo[OUTPUT]})
 	portNo, intfId, onuId, uniId := ExtractAccessFromFlow(classifierInfo[IN_PORT].(uint32), actionInfo[OUTPUT].(uint32))
+	if ipProto, ok := classifierInfo[IP_PROTO]; ok {
+		if ipProto.(uint32) == IP_PROTO_DHCP {
+			if udpSrc, ok := classifierInfo[UDP_SRC]; ok {
+				if udpSrc.(uint32) == uint32(67) {
+					log.Debug("trap-dhcp-from-nni-flow")
+					f.addDHCPTrapFlowOnNNI(flow, classifierInfo, portNo)
+					return
+				}
+			}
+		}
+	}
 	f.divideAndAddFlow(intfId, onuId, uniId, portNo, classifierInfo, actionInfo, flow)
 }
 
@@ -898,3 +950,153 @@
 	log.Debugw("success Sending Load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"msg": tpDownloadMsg})
 	return nil
 }
+
+// This function adds onu info to cache
+func (f *OpenOltFlowMgr) UpdateOnuInfo(intfID uint32, onuID uint32, serialNum string) {
+	onu := onuInfo{intfId: intfID, onuId: onuID, serialNumber: serialNum}
+	onuIDkey := onuIdKey{intfId: intfID, onuId: onuID}
+	f.onuIds[onuIDkey] = onu
+	log.Debugw("Updated onuinfo", log.Fields{"intfID": intfID, "onuID": onuID, "serialNum": serialNum})
+}
+
+// This function stores adds GEMport to ONU map
+func (f *OpenOltFlowMgr) addGemPortToOnuInfoMap(intfId uint32, onuId uint32, gemPort uint32) {
+	onuIDkey := onuIdKey{intfId: intfId, onuId: onuId}
+	if val, ok := f.onuIds[onuIDkey]; ok {
+		onuInfo := val
+		gemPortKey := gemPortKey{intfId: intfId, gemPort: gemPort}
+		f.onuGemPortIds[gemPortKey] = onuInfo
+		log.Debugw("Cached Gemport to Onuinfo map", log.Fields{"GemPort": gemPort, "intfId": onuInfo.intfId, "onuId": onuInfo.onuId})
+		return
+	}
+	log.Errorw("OnuInfo not found", log.Fields{"intfId": intfId, "onuId": onuId, "gemPort": gemPort})
+}
+
+// This function Lookup maps  by serialNumber or (intfId, gemPort)
+// Returns OnuID,nil if found or set 0,error if no onuId is found for serialNumber or (intfId, gemPort)
+func (f *OpenOltFlowMgr) getOnuIdfromGemPortMap(serialNumber string, intfId uint32, gemPortId uint32) (uint32, error) {
+	log.Debugw("Getting ONU ID from GEM port and PON port", log.Fields{"serialNumber": serialNumber, "intfId": intfId, "gemPortId": gemPortId})
+	if serialNumber != "" {
+		if onuInfo, ok := f.onuSerialNumbers[serialNumber]; ok {
+			return onuInfo.onuId, nil
+		}
+	} else {
+		gemPortKey := gemPortKey{intfId: intfId, gemPort: gemPortId}
+		if onuInfo, ok := f.onuGemPortIds[gemPortKey]; ok {
+			log.Debugw("Retrived onu info from access", log.Fields{"intfId": intfId, "gemPortId": gemPortId, "onuId": onuInfo.onuId})
+			return onuInfo.onuId, nil
+		}
+	}
+	log.Errorw("ONU ID  is not found", log.Fields{"serialNumber": serialNumber, "intfId": intfId, "gemPort": gemPortId})
+	return uint32(0), errors.New("Key Error ,ONU ID  is not found") // ONU ID 0 is not a valid one
+}
+
+// This function computes logical port UNI/NNI port from packet-in indication and returns the same
+func (f *OpenOltFlowMgr) GetLogicalPortFromPacketIn(packetIn *openolt_pb2.PacketIndication) (uint32, error) {
+	var logicalPortNum uint32
+	var onuId uint32
+	var err error
+
+	if packetIn.IntfType == "pon" {
+		// packet indication does not have serial number , so sending as nil
+		if onuId, err = f.getOnuIdfromGemPortMap("", packetIn.IntfId, packetIn.GemportId); err != nil {
+			log.Errorw("Unable to get ONU ID from GEM/PON port", log.Fields{"pon port": packetIn.IntfId, "gemport": packetIn.GemportId})
+			return logicalPortNum, err
+		}
+		if packetIn.PortNo != 0 {
+			logicalPortNum = packetIn.PortNo
+		} else {
+			uniId := uint32(0) //  FIXME - multi-uni support
+			logicalPortNum = MkUniPortNum(packetIn.IntfId, onuId, uniId)
+		}
+		// Store the gem port through which the packet_in came. Use the same gem port for packet_out
+		pktInkey := packetInInfoKey{intfId: packetIn.IntfId, onuId: onuId, logicalPort: logicalPortNum}
+		f.packetInGemPort[pktInkey] = packetIn.GemportId
+	} else if packetIn.IntfType == "nni" {
+		logicalPortNum = IntfIdToPortNo(packetIn.IntfId, voltha.Port_ETHERNET_NNI)
+	}
+	log.Debugw("Retrieved logicalport from  packet-in", log.Fields{"logicalPortNum": logicalPortNum, "IntfType": packetIn.IntfType})
+	return logicalPortNum, nil
+}
+
+func (f *OpenOltFlowMgr) GetPacketOutGemPortId(intfId uint32, onuId uint32, portNum uint32) (uint32, error) {
+	var gemPortId uint32
+	var err error
+	key := packetInInfoKey{intfId: intfId, onuId: onuId, logicalPort: portNum}
+	if val, ok := f.packetInGemPort[key]; ok {
+		gemPortId = val
+	} else {
+		log.Errorw("Key-Error while fetching packet-out GEM port", log.Fields{"key": key})
+		err = errors.New("Key-Error while fetching packet-out GEM port")
+	}
+	return gemPortId, err
+}
+
+func (f *OpenOltFlowMgr) addDHCPTrapFlowOnNNI(logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) {
+	log.Debug("Adding trap-dhcp-of-nni-flow")
+	action := make(map[string]interface{})
+	classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
+	action[TRAP_TO_HOST] = true
+	/* We manage flowId resource pool on per PON port basis.
+	   Since this situation is tricky, as a hack, we pass the NNI port
+	   index (network_intf_id) as PON port Index for the flowId resource
+	   pool. Also, there is no ONU Id available for trapping DHCP packets
+	   on NNI port, use onu_id as -1 (invalid)
+	   ****************** CAVEAT *******************
+	   This logic works if the NNI Port Id falls within the same valid
+	   range of PON Port Ids. If this doesn't work for some OLT Vendor
+	   we need to have a re-look at this.
+	   *********************************************
+	*/
+	onuId := -1
+	uniId := -1
+	gemPortId := -1
+	allocId := -1
+	networkInterfaceId := DEFAULT_NETWORK_INTERFACE_ID
+	flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(uint32(networkInterfaceId), uint32(onuId), uint32(uniId), flowStoreCookie); present {
+		log.Debug("Flow-exists--not-re-adding")
+		return
+	}
+	flowId, err := f.resourceMgr.GetFlowID(uint32(networkInterfaceId), uint32(onuId), uint32(uniId), flowStoreCookie, "")
+	if err != nil {
+		log.Errorw("Flow id unavailable for DHCP traponNNI flow", log.Fields{"error": err})
+		return
+	}
+	var classifierProto *openolt_pb2.Classifier
+	var actionProto *openolt_pb2.Action
+	if classifierProto = makeOpenOltClassifierField(classifier); classifierProto == nil {
+		log.Error("Error in making classifier protobuf for  dhcp trap on nni flow")
+		return
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	if actionProto = makeOpenOltActionField(action); actionProto == nil {
+		log.Error("Error in making action protobuf for dhcp trap on nni flow")
+		return
+	}
+	log.Debugw("Created action proto", log.Fields{"action": *actionProto})
+	downstreamflow := openolt_pb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
+		OnuId:         int32(onuId), // OnuId not required
+		UniId:         int32(uniId), // UniId not used
+		FlowId:        flowId,
+		FlowType:      DOWNSTREAM,
+		AllocId:       int32(allocId),               // AllocId not used
+		NetworkIntfId: DEFAULT_NETWORK_INTERFACE_ID, // one NNI port is supported now
+		GemportId:     int32(gemPortId),             // GemportId not used
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+	if ok := f.addFlowToDevice(&downstreamflow); ok {
+		log.Debug("DHCP trap on NNI flow added to device successfully")
+		flowsToKVStore := f.getUpdatedFlowInfo(&downstreamflow, flowStoreCookie, "")
+		if err := f.updateFlowInfoToKVStore(int32(networkInterfaceId),
+			int32(onuId),
+			int32(uniId),
+			flowId, flowsToKVStore); err != nil {
+			log.Errorw("Error uploading DHCP DL  flow into KV store", log.Fields{"flow": downstreamflow, "error": err})
+		}
+	}
+	return
+}
diff --git a/adaptercore/resourcemanager/resourcemanager.go b/adaptercore/resourcemanager/resourcemanager.go
index 2bd84a7..f87ffd1 100755
--- a/adaptercore/resourcemanager/resourcemanager.go
+++ b/adaptercore/resourcemanager/resourcemanager.go
@@ -170,7 +170,7 @@
 		if GlobalPONRsrcMgr == nil {
 			GlobalPONRsrcMgr = RsrcMgrsByTech[technology]
 		}
-		for IntfId := range TechRange.IntfIds {
+		for _, IntfId := range TechRange.IntfIds {
 			ResourceMgr.ResourceMgrs[uint32(IntfId)] = RsrcMgrsByTech[technology]
 		}
 		//self.initialize_device_resource_range_and_pool(resource_mgr, global_resource_mgr, arange)
@@ -222,17 +222,24 @@
 	FlowIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
 	FlowIDSharedPoolID := uint32(0)
 
-	var GlobalPoolID uint32
 	var FirstIntfPoolID uint32
 	var SharedPoolID uint32
 
+	/*
+	 * As a zero check is made against SharedPoolID to check whether the resources are shared across all intfs
+	 * if resources are shared accross interfaces then SharedPoolID is given a positive number.
+	 */
 	for _, FirstIntfPoolID = range TechRange.IntfIds {
+		// skip the intf id 0
+		if FirstIntfPoolID == 0 {
+			continue
+		}
 		break
 	}
 
 	for _, RangePool := range TechRange.Pools {
 		if RangePool.Sharing == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
-			SharedPoolID = GlobalPoolID
+			SharedPoolID = FirstIntfPoolID
 		} else if RangePool.Sharing == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_SAME_TECH {
 			SharedPoolID = FirstIntfPoolID
 		} else {
@@ -406,7 +413,6 @@
 		for _, flowId := range FlowIDs {
 			FlowInfo := RsrcMgr.GetFlowIDInfo(PONIntfID, ONUID, UNIID, uint32(flowId))
 			if FlowInfo != nil {
-				log.Debugw("Found flows", log.Fields{"flows": *FlowInfo, "flowId": flowId})
 				for _, Info := range *FlowInfo {
 					if FlowCategory != "" && Info.FlowCategory == FlowCategory {
 						log.Debug("Found flow matching with flow catagory", log.Fields{"flowId": flowId, "FlowCategory": FlowCategory})
@@ -426,7 +432,7 @@
 	if err != nil {
 		log.Errorf("Failed to get resource for interface %d for type %s",
 			PONIntfID, ponrmgr.FLOW_ID)
-		return FlowIDs[0], err
+		return uint32(0), err
 	}
 	if FlowIDs != nil {
 		RsrcMgr.ResourceMgrs[PONIntfID].UpdateFlowIDForOnu(FlowPath, FlowIDs[0], true)
@@ -672,25 +678,25 @@
 	}
 }
 
-/* TODO once the flow id info structure is known
-def is_flow_cookie_on_kv_store(self, intf_id, onu_id, uni_id, flow_store_cookie):
-  '''
-  Note: For flows which trap from the NNI and not really associated with any particular
-  ONU (like LLDP), the onu_id and uni_id is set as -1. The intf_id is the NNI intf_id.
-  '''
-  intf_onu_id = (intf_id, onu_id, uni_id)
-  try:
-      flow_ids = self.resource_mgrs[intf_id]. \
-          get_current_flow_ids_for_onu(intf_onu_id)
-      if flow_ids is not None:
-          for flow_id in flow_ids:
-              flows = self.get_flow_id_info(intf_id, onu_id, uni_id, flow_id)
-              assert (isinstance(flows, list))
-              for flow in flows:
-                  if flow['flow_store_cookie'] == flow_store_cookie:
-                      return True
-  except Exception as e:
-      self.log.error("error-retrieving-flow-info", e=e)
+func (RsrcMgr *OpenOltResourceMgr) IsFlowCookieOnKVStore(PONIntfID uint32, ONUID uint32, UNIID uint32,
+	FlowStoreCookie uint64) bool {
 
-  return False
-*/
+	FlowPath := fmt.Sprintf("%d,%d,%d", PONIntfID, ONUID, UNIID)
+	FlowIDs := RsrcMgr.ResourceMgrs[PONIntfID].GetCurrentFlowIDsForOnu(FlowPath)
+	if FlowIDs != nil {
+		log.Debugw("Found flowId(s) for this ONU", log.Fields{"pon": PONIntfID, "ONUID": ONUID, "UNIID": UNIID, "KVpath": FlowPath})
+		for _, flowId := range FlowIDs {
+			FlowInfo := RsrcMgr.GetFlowIDInfo(PONIntfID, ONUID, UNIID, uint32(flowId))
+			if FlowInfo != nil {
+				log.Debugw("Found flows", log.Fields{"flows": *FlowInfo, "flowId": flowId})
+				for _, Info := range *FlowInfo {
+					if Info.FlowStoreCookie == FlowStoreCookie {
+						log.Debug("Found flow matching with flowStore cookie", log.Fields{"flowId": flowId, "FlowStoreCookie": FlowStoreCookie})
+						return true
+					}
+				}
+			}
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/mdlayher/ethernet/.travis.yml b/vendor/github.com/mdlayher/ethernet/.travis.yml
new file mode 100644
index 0000000..cc21599
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+  - 1.x
+os:
+  - linux
+before_install:
+  - go get golang.org/x/lint/golint
+  - go get honnef.co/go/tools/cmd/staticcheck
+  - go get -d ./...
+script:
+  - go build -tags=gofuzz ./...
+  - go vet ./...
+  - staticcheck ./...
+  - golint -set_exit_status ./...
+  - go test -v -race ./...
diff --git a/vendor/github.com/mdlayher/ethernet/LICENSE.md b/vendor/github.com/mdlayher/ethernet/LICENSE.md
new file mode 100644
index 0000000..75ed9de
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/LICENSE.md
@@ -0,0 +1,10 @@
+MIT License
+===========
+
+Copyright (C) 2015 Matt Layher
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mdlayher/ethernet/README.md b/vendor/github.com/mdlayher/ethernet/README.md
new file mode 100644
index 0000000..ec6f4fe
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/README.md
@@ -0,0 +1,8 @@
+ethernet [![Build Status](https://travis-ci.org/mdlayher/ethernet.svg?branch=master)](https://travis-ci.org/mdlayher/ethernet) [![GoDoc](https://godoc.org/github.com/mdlayher/ethernet?status.svg)](https://godoc.org/github.com/mdlayher/ethernet) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/ethernet)](https://goreportcard.com/report/github.com/mdlayher/ethernet)
+========
+
+Package `ethernet` implements marshaling and unmarshaling of IEEE 802.3
+Ethernet II frames and IEEE 802.1Q VLAN tags.  MIT Licensed.
+
+For more information about using Ethernet frames in Go, check out my blog
+post: [Network Protocol Breakdown: Ethernet and Go](https://medium.com/@mdlayher/network-protocol-breakdown-ethernet-and-go-de985d726cc1).
\ No newline at end of file
diff --git a/vendor/github.com/mdlayher/ethernet/ethernet.go b/vendor/github.com/mdlayher/ethernet/ethernet.go
new file mode 100644
index 0000000..d150d8e
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/ethernet.go
@@ -0,0 +1,301 @@
+// Package ethernet implements marshaling and unmarshaling of IEEE 802.3
+// Ethernet II frames and IEEE 802.1Q VLAN tags.
+package ethernet
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"net"
+)
+
+//go:generate stringer -output=string.go -type=EtherType
+
+const (
+	// minPayload is the minimum payload size for an Ethernet frame, assuming
+	// that no 802.1Q VLAN tags are present.
+	minPayload = 46
+)
+
+var (
+	// Broadcast is a special hardware address which indicates a Frame should
+	// be sent to every device on a given LAN segment.
+	Broadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+)
+
+var (
+	// ErrInvalidFCS is returned when Frame.UnmarshalFCS detects an incorrect
+	// Ethernet frame check sequence in a byte slice for a Frame.
+	ErrInvalidFCS = errors.New("invalid frame check sequence")
+)
+
+// An EtherType is a value used to identify an upper layer protocol
+// encapsulated in a Frame.
+//
+// A list of IANA-assigned EtherType values may be found here:
+// http://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml.
+type EtherType uint16
+
+// Common EtherType values frequently used in a Frame.
+const (
+	EtherTypeIPv4 EtherType = 0x0800
+	EtherTypeARP  EtherType = 0x0806
+	EtherTypeIPv6 EtherType = 0x86DD
+
+	// EtherTypeVLAN and EtherTypeServiceVLAN are used as 802.1Q Tag Protocol
+	// Identifiers (TPIDs).
+	EtherTypeVLAN        EtherType = 0x8100
+	EtherTypeServiceVLAN EtherType = 0x88a8
+)
+
+// A Frame is an IEEE 802.3 Ethernet II frame.  A Frame contains information
+// such as source and destination hardware addresses, zero or more optional
+// 802.1Q VLAN tags, an EtherType, and payload data.
+type Frame struct {
+	// Destination specifies the destination hardware address for this Frame.
+	//
+	// If this address is set to Broadcast, the Frame will be sent to every
+	// device on a given LAN segment.
+	Destination net.HardwareAddr
+
+	// Source specifies the source hardware address for this Frame.
+	//
+	// Typically, this is the hardware address of the network interface used to
+	// send this Frame.
+	Source net.HardwareAddr
+
+	// ServiceVLAN specifies an optional 802.1Q service VLAN tag, for use with
+	// 802.1ad double tagging, or "Q-in-Q". If ServiceVLAN is not nil, VLAN must
+	// not be nil as well.
+	//
+	// Most users should leave this field set to nil and use VLAN instead.
+	ServiceVLAN *VLAN
+
+	// VLAN specifies an optional 802.1Q customer VLAN tag, which may or may
+	// not be present in a Frame.  It is important to note that the operating
+	// system may automatically strip VLAN tags before they can be parsed.
+	VLAN *VLAN
+
+	// EtherType is a value used to identify an upper layer protocol
+	// encapsulated in this Frame.
+	EtherType EtherType
+
+	// Payload is a variable length data payload encapsulated by this Frame.
+	Payload []byte
+}
+
+// MarshalBinary allocates a byte slice and marshals a Frame into binary form.
+func (f *Frame) MarshalBinary() ([]byte, error) {
+	b := make([]byte, f.length())
+	_, err := f.read(b)
+	return b, err
+}
+
+// MarshalFCS allocates a byte slice, marshals a Frame into binary form, and
+// finally calculates and places a 4-byte IEEE CRC32 frame check sequence at
+// the end of the slice.
+//
+// Most users should use MarshalBinary instead.  MarshalFCS is provided as a
+// convenience for rare occasions when the operating system cannot
+// automatically generate a frame check sequence for an Ethernet frame.
+func (f *Frame) MarshalFCS() ([]byte, error) {
+	// Frame length with 4 extra bytes for frame check sequence
+	b := make([]byte, f.length()+4)
+	if _, err := f.read(b); err != nil {
+		return nil, err
+	}
+
+	// Compute IEEE CRC32 checksum of frame bytes and place it directly
+	// in the last four bytes of the slice
+	binary.BigEndian.PutUint32(b[len(b)-4:], crc32.ChecksumIEEE(b[0:len(b)-4]))
+	return b, nil
+}
+
+// read reads data from a Frame into b.  read is used to marshal a Frame
+// into binary form, but does not allocate on its own.
+func (f *Frame) read(b []byte) (int, error) {
+	// S-VLAN must also have accompanying C-VLAN.
+	if f.ServiceVLAN != nil && f.VLAN == nil {
+		return 0, ErrInvalidVLAN
+	}
+
+	copy(b[0:6], f.Destination)
+	copy(b[6:12], f.Source)
+
+	// Marshal each non-nil VLAN tag into bytes, inserting the appropriate
+	// EtherType/TPID before each, so devices know that one or more VLANs
+	// are present.
+	vlans := []struct {
+		vlan *VLAN
+		tpid EtherType
+	}{
+		{vlan: f.ServiceVLAN, tpid: EtherTypeServiceVLAN},
+		{vlan: f.VLAN, tpid: EtherTypeVLAN},
+	}
+
+	n := 12
+	for _, vt := range vlans {
+		if vt.vlan == nil {
+			continue
+		}
+
+		// Add VLAN EtherType and VLAN bytes.
+		binary.BigEndian.PutUint16(b[n:n+2], uint16(vt.tpid))
+		if _, err := vt.vlan.read(b[n+2 : n+4]); err != nil {
+			return 0, err
+		}
+		n += 4
+	}
+
+	// Marshal actual EtherType after any VLANs, copy payload into
+	// output bytes.
+	binary.BigEndian.PutUint16(b[n:n+2], uint16(f.EtherType))
+	copy(b[n+2:], f.Payload)
+
+	return len(b), nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a Frame.
+func (f *Frame) UnmarshalBinary(b []byte) error {
+	// Verify that both hardware addresses and a single EtherType are present
+	if len(b) < 14 {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Track offset in packet for reading data
+	n := 14
+
+	// Continue looping and parsing VLAN tags until no more VLAN EtherType
+	// values are detected
+	et := EtherType(binary.BigEndian.Uint16(b[n-2 : n]))
+	switch et {
+	case EtherTypeServiceVLAN, EtherTypeVLAN:
+		// VLAN type is hinted for further parsing.  An index is returned which
+		// indicates how many bytes were consumed by VLAN tags.
+		nn, err := f.unmarshalVLANs(et, b[n:])
+		if err != nil {
+			return err
+		}
+
+		n += nn
+	default:
+		// No VLANs detected.
+		f.EtherType = et
+	}
+
+	// Allocate single byte slice to store destination and source hardware
+	// addresses, and payload
+	bb := make([]byte, 6+6+len(b[n:]))
+	copy(bb[0:6], b[0:6])
+	f.Destination = bb[0:6]
+	copy(bb[6:12], b[6:12])
+	f.Source = bb[6:12]
+
+	// There used to be a minimum payload length restriction here, but as
+	// long as two hardware addresses and an EtherType are present, it
+	// doesn't really matter what is contained in the payload.  We will
+	// follow the "robustness principle".
+	copy(bb[12:], b[n:])
+	f.Payload = bb[12:]
+
+	return nil
+}
+
+// UnmarshalFCS computes the IEEE CRC32 frame check sequence of a Frame,
+// verifies it against the checksum present in the byte slice, and finally,
+// unmarshals a byte slice into a Frame.
+//
+// Most users should use UnmarshalBinary instead.  UnmarshalFCS is provided as
+// a convenience for rare occasions when the operating system cannot
+// automatically verify a frame check sequence for an Ethernet frame.
+func (f *Frame) UnmarshalFCS(b []byte) error {
+	// Must contain enough data for FCS, to avoid panics
+	if len(b) < 4 {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Verify checksum in slice versus newly computed checksum
+	want := binary.BigEndian.Uint32(b[len(b)-4:])
+	got := crc32.ChecksumIEEE(b[0 : len(b)-4])
+	if want != got {
+		return ErrInvalidFCS
+	}
+
+	return f.UnmarshalBinary(b[0 : len(b)-4])
+}
+
+// length calculates the number of bytes required to store a Frame.
+func (f *Frame) length() int {
+	// If payload is less than the required minimum length, we zero-pad up to
+	// the required minimum length
+	pl := len(f.Payload)
+	if pl < minPayload {
+		pl = minPayload
+	}
+
+	// Add additional length if VLAN tags are needed.
+	var vlanLen int
+	switch {
+	case f.ServiceVLAN != nil && f.VLAN != nil:
+		vlanLen = 8
+	case f.VLAN != nil:
+		vlanLen = 4
+	}
+
+	// 6 bytes: destination hardware address
+	// 6 bytes: source hardware address
+	// N bytes: VLAN tags (if present)
+	// 2 bytes: EtherType
+	// N bytes: payload length (may be padded)
+	return 6 + 6 + vlanLen + 2 + pl
+}
+
+// unmarshalVLANs unmarshals S/C-VLAN tags.  It is assumed that tpid
+// is a valid S/C-VLAN TPID.
+func (f *Frame) unmarshalVLANs(tpid EtherType, b []byte) (int, error) {
+	// 4 or more bytes must remain for valid S/C-VLAN tag and EtherType.
+	if len(b) < 4 {
+		return 0, io.ErrUnexpectedEOF
+	}
+
+	// Track how many bytes are consumed by VLAN tags.
+	var n int
+
+	switch tpid {
+	case EtherTypeServiceVLAN:
+		vlan := new(VLAN)
+		if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil {
+			return 0, err
+		}
+		f.ServiceVLAN = vlan
+
+		// Assume that a C-VLAN immediately trails an S-VLAN.
+		if EtherType(binary.BigEndian.Uint16(b[n+2:n+4])) != EtherTypeVLAN {
+			return 0, ErrInvalidVLAN
+		}
+
+		// 4 or more bytes must remain for valid C-VLAN tag and EtherType.
+		n += 4
+		if len(b[n:]) < 4 {
+			return 0, io.ErrUnexpectedEOF
+		}
+
+		// Continue to parse the C-VLAN.
+		fallthrough
+	case EtherTypeVLAN:
+		vlan := new(VLAN)
+		if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil {
+			return 0, err
+		}
+
+		f.VLAN = vlan
+		f.EtherType = EtherType(binary.BigEndian.Uint16(b[n+2 : n+4]))
+		n += 4
+	default:
+		panic(fmt.Sprintf("unknown VLAN TPID: %04x", tpid))
+	}
+
+	return n, nil
+}
diff --git a/vendor/github.com/mdlayher/ethernet/fuzz.go b/vendor/github.com/mdlayher/ethernet/fuzz.go
new file mode 100644
index 0000000..5d22532
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/fuzz.go
@@ -0,0 +1,24 @@
+// +build gofuzz
+
+package ethernet
+
+func Fuzz(data []byte) int {
+	f := new(Frame)
+	if err := f.UnmarshalBinary(data); err != nil {
+		return 0
+	}
+
+	if _, err := f.MarshalBinary(); err != nil {
+		panic(err)
+	}
+
+	if err := f.UnmarshalFCS(data); err != nil {
+		return 0
+	}
+
+	if _, err := f.MarshalFCS(); err != nil {
+		panic(err)
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/mdlayher/ethernet/go.mod b/vendor/github.com/mdlayher/ethernet/go.mod
new file mode 100644
index 0000000..4ac5e77
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/go.mod
@@ -0,0 +1,5 @@
+module github.com/mdlayher/ethernet
+
+go 1.12
+
+require github.com/mdlayher/raw v0.0.0-20190313224157-43dbcdd7739d
diff --git a/vendor/github.com/mdlayher/ethernet/go.sum b/vendor/github.com/mdlayher/ethernet/go.sum
new file mode 100644
index 0000000..9400186
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/go.sum
@@ -0,0 +1,11 @@
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/mdlayher/raw v0.0.0-20190313224157-43dbcdd7739d h1:rjAS0af7FIYCScTtEU5KjIldC6qVaEScUJhABHC+ccM=
+github.com/mdlayher/raw v0.0.0-20190313224157-43dbcdd7739d/go.mod h1:r1fbeITl2xL/zLbVnNHFyOzQJTgr/3fpf1lJX/cjzR8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977 h1:actzWV6iWn3GLqN8dZjzsB+CLt+gaV2+wsxroxiQI8I=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313 h1:pczuHS43Cp2ktBEEmLwScxgjWsBSzdaQiKzUyf3DTTc=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/mdlayher/ethernet/string.go b/vendor/github.com/mdlayher/ethernet/string.go
new file mode 100644
index 0000000..89a3e01
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/string.go
@@ -0,0 +1,38 @@
+// Code generated by "stringer -output=string.go -type=EtherType"; DO NOT EDIT.
+
+package ethernet
+
+import "fmt"
+
+const (
+	_EtherType_name_0 = "EtherTypeIPv4"
+	_EtherType_name_1 = "EtherTypeARP"
+	_EtherType_name_2 = "EtherTypeVLAN"
+	_EtherType_name_3 = "EtherTypeIPv6"
+	_EtherType_name_4 = "EtherTypeServiceVLAN"
+)
+
+var (
+	_EtherType_index_0 = [...]uint8{0, 13}
+	_EtherType_index_1 = [...]uint8{0, 12}
+	_EtherType_index_2 = [...]uint8{0, 13}
+	_EtherType_index_3 = [...]uint8{0, 13}
+	_EtherType_index_4 = [...]uint8{0, 20}
+)
+
+func (i EtherType) String() string {
+	switch {
+	case i == 2048:
+		return _EtherType_name_0
+	case i == 2054:
+		return _EtherType_name_1
+	case i == 33024:
+		return _EtherType_name_2
+	case i == 34525:
+		return _EtherType_name_3
+	case i == 34984:
+		return _EtherType_name_4
+	default:
+		return fmt.Sprintf("EtherType(%d)", i)
+	}
+}
diff --git a/vendor/github.com/mdlayher/ethernet/vlan.go b/vendor/github.com/mdlayher/ethernet/vlan.go
new file mode 100644
index 0000000..6ada6a9
--- /dev/null
+++ b/vendor/github.com/mdlayher/ethernet/vlan.go
@@ -0,0 +1,129 @@
+package ethernet
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+const (
+	// VLANNone is a special VLAN ID which indicates that no VLAN is being
+	// used in a Frame.  In this case, the VLAN's other fields may be used
+	// to indicate a Frame's priority.
+	VLANNone = 0x000
+
+	// VLANMax is a reserved VLAN ID which may indicate a wildcard in some
+	// management systems, but may not be configured or transmitted in a
+	// VLAN tag.
+	VLANMax = 0xfff
+)
+
+var (
+	// ErrInvalidVLAN is returned when a VLAN tag is invalid due to one of the
+	// following reasons:
+	//   - Priority of greater than 7 is detected
+	//   - ID of greater than 4094 (0xffe) is detected
+	//   - A customer VLAN does not follow a service VLAN (when using Q-in-Q)
+	ErrInvalidVLAN = errors.New("invalid VLAN")
+)
+
+// Priority is an IEEE P802.1p priority level.  Priority can be any value from
+// 0 to 7.
+//
+// It is important to note that priority 1 (PriorityBackground) actually has
+// a lower priority than 0 (PriorityBestEffort).  All other Priority constants
+// indicate higher priority as the integer values increase.
+type Priority uint8
+
+// IEEE P802.1p recommended priority levels.  Note that PriorityBackground has
+// a lower priority than PriorityBestEffort.
+const (
+	PriorityBackground           Priority = 1
+	PriorityBestEffort           Priority = 0
+	PriorityExcellentEffort      Priority = 2
+	PriorityCriticalApplications Priority = 3
+	PriorityVideo                Priority = 4
+	PriorityVoice                Priority = 5
+	PriorityInternetworkControl  Priority = 6
+	PriorityNetworkControl       Priority = 7
+)
+
+// A VLAN is an IEEE 802.1Q Virtual LAN (VLAN) tag.  A VLAN contains
+// information regarding traffic priority and a VLAN identifier for
+// a given Frame.
+type VLAN struct {
+	// Priority specifies a IEEE P802.1p priority level.  Priority can be any
+	// value from 0 to 7.
+	Priority Priority
+
+	// DropEligible indicates if a Frame is eligible to be dropped in the
+	// presence of network congestion.
+	DropEligible bool
+
+	// ID specifies the VLAN ID for a Frame.  ID can be any value from 0 to
+	// 4094 (0x000 to 0xffe), allowing up to 4094 VLANs.
+	//
+	// If ID is 0 (0x000, VLANNone), no VLAN is specified, and the other fields
+	// simply indicate a Frame's priority.
+	ID uint16
+}
+
+// MarshalBinary allocates a byte slice and marshals a VLAN into binary form.
+func (v *VLAN) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 2)
+	_, err := v.read(b)
+	return b, err
+}
+
+// read reads data from a VLAN into b.  read is used to marshal a VLAN into
+// binary form, but does not allocate on its own.
+func (v *VLAN) read(b []byte) (int, error) {
+	// Check for VLAN priority in valid range
+	if v.Priority > PriorityNetworkControl {
+		return 0, ErrInvalidVLAN
+	}
+
+	// Check for VLAN ID in valid range
+	if v.ID >= VLANMax {
+		return 0, ErrInvalidVLAN
+	}
+
+	// 3 bits: priority
+	ub := uint16(v.Priority) << 13
+
+	// 1 bit: drop eligible
+	var drop uint16
+	if v.DropEligible {
+		drop = 1
+	}
+	ub |= drop << 12
+
+	// 12 bits: VLAN ID
+	ub |= v.ID
+
+	binary.BigEndian.PutUint16(b, ub)
+	return 2, nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a VLAN.
+func (v *VLAN) UnmarshalBinary(b []byte) error {
+	// VLAN tag is always 2 bytes
+	if len(b) != 2 {
+		return io.ErrUnexpectedEOF
+	}
+
+	//  3 bits: priority
+	//  1 bit : drop eligible
+	// 12 bits: VLAN ID
+	ub := binary.BigEndian.Uint16(b[0:2])
+	v.Priority = Priority(uint8(ub >> 13))
+	v.DropEligible = ub&0x1000 != 0
+	v.ID = ub & 0x0fff
+
+	// Check for VLAN ID in valid range
+	if v.ID >= VLANMax {
+		return ErrInvalidVLAN
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
index 5bbd176..e2d79fc 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/core_proxy.go
@@ -178,6 +178,27 @@
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
+func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
+	log.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
+	rpc := "DeleteAllPorts"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	log.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
 	connStatus voltha.ConnectStatus_ConnectStatus, operStatus voltha.OperStatus_OperStatus) error {
 	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
@@ -211,7 +232,7 @@
 
 func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
 	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) error {
-	log.Debugw("ChildDeviceDetected", log.Fields{"pPeviceId": parentDeviceId, "channelId": channelId})
+	log.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
 	rpc := "ChildDeviceDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -261,6 +282,46 @@
 
 }
 
+func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
+	log.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+	rpc := "ChildDevicesLost"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	log.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
+	log.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+	rpc := "ChildDevicesDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	log.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
 func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
 	log.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
 	rpc := "GetDevice"
@@ -356,3 +417,32 @@
 		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
 	}
 }
+
+func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
+	log.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+	rpc := "PacketIn"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	portNo := &ic.IntType{Val: int64(port)}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: portNo,
+	}
+	pkt := &ic.Packet{Payload: pktPayload}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: pkt,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	log.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
index 8b582b8..3ac5c4f 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/common/request_handler.go
@@ -191,6 +191,40 @@
 }
 
 func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Disable_device API on the adapter
+	if err := rhp.adapter.Delete_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
 	return new(empty.Empty), nil
 }
 
@@ -250,6 +284,45 @@
 }
 
 func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
+	log.Debugw("Receive_packet_out", log.Fields{"args": args})
+	if len(args) < 3 {
+		log.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &ic.StrType{}
+	egressPort := &ic.IntType{}
+	packet := &openflow_13.OfpPacketOut{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				log.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+				return nil, err
+			}
+		case "outPort":
+			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
+				log.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+				return nil, err
+			}
+		case "packet":
+			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
+				log.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	log.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
 	return new(empty.Empty), nil
 }
 
diff --git a/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go
index a1dfa16..05df234 100644
--- a/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-go/adapters/iAdapter.go
@@ -33,12 +33,12 @@
 	Reenable_device(device *voltha.Device) error
 	Reboot_device(device *voltha.Device) error
 	Self_test_device(device *voltha.Device) error
-	Gelete_device(device *voltha.Device) error
+	Delete_device(device *voltha.Device) error
 	Get_device_details(device *voltha.Device) error
 	Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups) error
 	Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges) error
 	Update_pm_config(device *voltha.Device, pm_configs *voltha.PmConfigs) error
-	Receive_packet_out(device *voltha.Device, egress_port_no int, msg openflow_13.PacketOut) error
+	Receive_packet_out(deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
 	Suppress_alarm(filter *voltha.AlarmFilter) error
 	Unsuppress_alarm(filter *voltha.AlarmFilter) error
 	Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error)
diff --git a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
index c37307b..2873dbc 100755
--- a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
@@ -133,6 +133,7 @@
 	SharedResourceMgrs map[string]*PONResourceManager
 	SharedIdxByType    map[string]string
 	IntfIDs            []uint32 // list of pon interface IDs
+	Globalorlocal      string
 }
 
 func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
@@ -254,10 +255,14 @@
 	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
 
 	if StartID != 0 {
-		PONRMgr.PonResourceRanges[StartIDx] = StartID
+		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+			PONRMgr.PonResourceRanges[StartIDx] = StartID
+		}
 	}
 	if EndID != 0 {
-		PONRMgr.PonResourceRanges[EndIDx] = EndID
+		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+			PONRMgr.PonResourceRanges[EndIDx] = EndID
+		}
 	}
 	//if SharedPoolID != 0 {
 	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
@@ -567,6 +572,7 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
 	}
+	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
 
 	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
index 9f7bebf..2879e99 100644
--- a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
@@ -178,12 +178,23 @@
 }
 
 type iScheduler struct {
-	AllocID   uint32    `json:"alloc_id"`
-	Scheduler Scheduler `json:"scheduler"`
+	AllocID      uint32 `json:"alloc_id"`
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
 }
 type iGemPortAttribute struct {
-	GemportID    uint32           `json:"gem_port_id"`
-	GemAttribute GemPortAttribute `json:"gem_attribute"`
+	GemportID        uint32        `json:"gemport_id"`
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    int           `json:"priority_q"`
+	Weight           int           `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
 }
 
 type TechProfileMgr struct {
@@ -207,7 +218,7 @@
 	ProfileType                    string              `json:"profile_type"`
 	Version                        int                 `json:"version"`
 	NumGemPorts                    uint32              `json:"num_gem_ports"`
-	NumTconts                      uint32              `json:"num_tconts"`
+	NumTconts                      uint32              `json:"num_of_tconts"`
 	InstanceCtrl                   InstanceControl     `json:"instance_control"`
 	UsScheduler                    iScheduler          `json:"us_scheduler"`
 	DsScheduler                    iScheduler          `json:"ds_scheduler"`
@@ -371,7 +382,7 @@
 		log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
 		return nil
 	}
-	fmt.Println("Num GEM ports in TP:", tp.NumGemPorts)
+	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
 		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
@@ -380,10 +391,24 @@
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.UpstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 		dsGemPortAttributeList = append(dsGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.DownstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
 	}
 	return &TechProfile{
 		SubscriberIdentifier: uniPortName,
@@ -394,11 +419,19 @@
 		NumTconts:            numOfTconts,
 		InstanceCtrl:         tp.InstanceCtrl,
 		UsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.UsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
 		DsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.DsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
 		UpstreamGemPortAttributeList:   usGemPortAttributeList,
 		DownstreamGemPortAttributeList: dsGemPortAttributeList}
 }
@@ -498,17 +531,17 @@
 }
 
 func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for upstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
 		return nil
@@ -516,24 +549,24 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.UsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.UsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
 func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
 
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for downstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
 		return nil
@@ -542,8 +575,8 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.DsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.DsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
@@ -561,7 +594,6 @@
 		}
 	}
 	tconts := []*openolt_pb.Tcont{}
-	// TODO: Fix me , UPSTREAM direction is not proper
 	// upstream scheduler
 	tcont_us := &openolt_pb.Tcont{
 		Direction: usSched.Direction,
diff --git a/vendor/github.com/opencord/voltha-go/db/model/branch.go b/vendor/github.com/opencord/voltha-go/db/model/branch.go
index ca89df0..5502e63 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/branch.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/branch.go
@@ -93,18 +93,21 @@
 
 		// Go through list of children names in current revision and new revision
 		// and then compare the resulting outputs to ensure that we have not lost any entries.
-		var previousNames, latestNames, missingNames []string
 
-		if previousNames = b.retrieveChildrenNames(b.Latest); len(previousNames) > 0 {
-			log.Debugw("children-of-previous-revision", log.Fields{"hash": b.Latest.GetHash(), "names": previousNames})
-		}
+		if level, _ := log.GetPackageLogLevel(); level == log.DebugLevel {
+			var previousNames, latestNames, missingNames []string
 
-		if latestNames = b.retrieveChildrenNames(b.Latest); len(latestNames) > 0 {
-			log.Debugw("children-of-latest-revision", log.Fields{"hash": latest.GetHash(), "names": latestNames})
-		}
+			if previousNames = b.retrieveChildrenNames(b.Latest); len(previousNames) > 0 {
+				log.Debugw("children-of-previous-revision", log.Fields{"hash": b.Latest.GetHash(), "names": previousNames})
+			}
 
-		if missingNames = b.findMissingChildrenNames(previousNames, latestNames); len(missingNames) > 0 {
-			log.Debugw("children-missing-in-latest-revision", log.Fields{"hash": latest.GetHash(), "names": missingNames})
+			if latestNames = b.retrieveChildrenNames(b.Latest); len(latestNames) > 0 {
+				log.Debugw("children-of-latest-revision", log.Fields{"hash": latest.GetHash(), "names": latestNames})
+			}
+
+			if missingNames = b.findMissingChildrenNames(previousNames, latestNames); len(missingNames) > 0 {
+				log.Debugw("children-missing-in-latest-revision", log.Fields{"hash": latest.GetHash(), "names": missingNames})
+			}
 		}
 
 	} else {
diff --git a/vendor/github.com/opencord/voltha-go/db/model/node.go b/vendor/github.com/opencord/voltha-go/db/model/node.go
index 7bfdca0..1621b6f 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/node.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/node.go
@@ -24,7 +24,6 @@
 	"github.com/golang/protobuf/proto"
 	"github.com/opencord/voltha-go/common/log"
 	"reflect"
-	"runtime/debug"
 	"strings"
 	"sync"
 )
@@ -127,6 +126,9 @@
 
 	// If anything is new, then set the revision as the latest
 	if branch.GetLatest() == nil || revision.GetHash() != branch.GetLatest().GetHash() {
+		if revision.GetName() != "" {
+			GetRevCache().Cache.Store(revision.GetName(), revision)
+		}
 		branch.SetLatest(revision)
 	}
 
@@ -275,7 +277,7 @@
 
 	var result interface{}
 	var prList []interface{}
-	if pr := rev.LoadFromPersistence(path, txid); pr != nil {
+	if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil {
 		for _, revEntry := range pr {
 			prList = append(prList, revEntry.GetData())
 		}
@@ -288,6 +290,7 @@
 // Get retrieves the data from a node tree that resides at the specified path
 func (n *node) Get(path string, hash string, depth int, reconcile bool, txid string) interface{} {
 	log.Debugw("node-get-request", log.Fields{"path": path, "hash": hash, "depth": depth, "reconcile": reconcile, "txid": txid})
+
 	for strings.HasPrefix(path, "/") {
 		path = path[1:]
 	}
@@ -307,9 +310,15 @@
 
 	var result interface{}
 
-	// If there is not request to reconcile, try to get it from memory
+	// If there is no request to reconcile, try to get it from memory
 	if !reconcile {
-		if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+		// Try to find an entry matching the path value from one of these sources
+		// 1.  Start with the cache which stores revisions by watch names
+		// 2.  Then look in the revision tree, especially if it's a sub-path such as /devices/1234/flows
+		// 3.  As a last effort, move on to the KV store
+		if entry, exists := GetRevCache().Cache.Load(path); exists && entry.(Revision) != nil {
+			return proto.Clone(entry.(Revision).GetData().(proto.Message))
+		} else if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
 			return result
 		}
 	}
@@ -317,14 +326,14 @@
 	// If we got to this point, we are either trying to reconcile with the db or
 	// or we simply failed at getting information from memory
 	if n.Root.KvStore != nil {
-		var prList []interface{}
-		if pr := rev.LoadFromPersistence(path, txid); pr != nil && len(pr) > 0 {
+		if pr := rev.LoadFromPersistence(path, txid, nil); pr != nil && len(pr) > 0 {
 			// Did we receive a single or multiple revisions?
 			if len(pr) > 1 {
+				var revs []interface{}
 				for _, revEntry := range pr {
-					prList = append(prList, revEntry.GetData())
+					revs = append(revs, revEntry.GetData())
 				}
-				result = prList
+				result = revs
 			} else {
 				result = pr[0].GetData()
 			}
@@ -334,7 +343,7 @@
 	return result
 }
 
-// getPath traverses the specified path and retrieves the data associated to it
+//getPath traverses the specified path and retrieves the data associated to it
 func (n *node) getPath(rev Revision, path string, depth int) interface{} {
 	if path == "" {
 		return n.getData(rev, depth)
@@ -472,6 +481,7 @@
 			idx, childRev := n.findRevByKey(children, field.Key, keyValue)
 
 			if childRev == nil {
+				log.Debugw("child-revision-is-nil", log.Fields{"key": keyValue})
 				return branch.GetLatest()
 			}
 
@@ -490,6 +500,7 @@
 					log.Debug("clear-hash - %s %+v", newChildRev.GetHash(), newChildRev)
 					newChildRev.ClearHash()
 				}
+				log.Debugw("child-revisions-have-matching-hash", log.Fields{"hash": childRev.GetHash(), "key": keyValue})
 				return branch.GetLatest()
 			}
 
@@ -505,15 +516,15 @@
 			// Prefix the hash value with the data type (e.g. devices, logical_devices, adapters)
 			newChildRev.SetName(name + "/" + _keyValueType)
 
+			branch.LatestLock.Lock()
+			defer branch.LatestLock.Unlock()
+
 			if idx >= 0 {
 				children[idx] = newChildRev
 			} else {
 				children = append(children, newChildRev)
 			}
 
-			branch.LatestLock.Lock()
-			defer branch.LatestLock.Unlock()
-
 			updatedRev := rev.UpdateChildren(name, children, branch)
 
 			n.makeLatest(branch, updatedRev, nil)
@@ -544,13 +555,11 @@
 }
 
 func (n *node) doUpdate(branch *Branch, data interface{}, strict bool) Revision {
-	log.Debugf("Comparing types - expected: %+v, actual: %+v &&&&&& %s", reflect.ValueOf(n.Type).Type(),
-		reflect.TypeOf(data),
-		string(debug.Stack()))
+	log.Debugw("comparing-types", log.Fields{"expected": reflect.ValueOf(n.Type).Type(), "actual": reflect.TypeOf(data)})
 
 	if reflect.TypeOf(data) != reflect.ValueOf(n.Type).Type() {
 		// TODO raise error
-		log.Errorf("data does not match type: %+v", n.Type)
+		log.Errorw("types-do-not-match: %+v", log.Fields{"actual": reflect.TypeOf(data), "expected": n.Type})
 		return nil
 	}
 
@@ -675,7 +684,10 @@
 			newChildRev := childNode.Add(path, data, txid, makeBranch)
 
 			// Prefix the hash with the data type (e.g. devices, logical_devices, adapters)
-			childRev.SetName(name + "/" + keyValue.(string))
+			newChildRev.SetName(name + "/" + keyValue.(string))
+
+			branch.LatestLock.Lock()
+			defer branch.LatestLock.Unlock()
 
 			if idx >= 0 {
 				children[idx] = newChildRev
@@ -683,9 +695,6 @@
 				children = append(children, newChildRev)
 			}
 
-			branch.LatestLock.Lock()
-			defer branch.LatestLock.Unlock()
-
 			updatedRev := rev.UpdateChildren(name, children, branch)
 			n.makeLatest(branch, updatedRev, nil)
 
@@ -758,15 +767,15 @@
 					}
 					newChildRev := childNode.Remove(path, txid, makeBranch)
 
+					branch.LatestLock.Lock()
+					defer branch.LatestLock.Unlock()
+
 					if idx >= 0 {
 						children[idx] = newChildRev
 					} else {
 						children = append(children, newChildRev)
 					}
 
-					branch.LatestLock.Lock()
-					defer branch.LatestLock.Unlock()
-
 					rev.SetChildren(name, children)
 					branch.GetLatest().Drop(txid, false)
 					n.makeLatest(branch, rev, nil)
@@ -784,6 +793,7 @@
 				}
 
 				childRev.StorageDrop(txid, true)
+				GetRevCache().Cache.Delete(childRev.GetName())
 
 				branch.LatestLock.Lock()
 				defer branch.LatestLock.Unlock()
diff --git a/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
index 0ccc58e..d7b0b58 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/non_persisted_revision.go
@@ -21,30 +21,28 @@
 	"fmt"
 	"github.com/golang/protobuf/proto"
 	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/kvstore"
 	"reflect"
-	"runtime/debug"
 	"sort"
 	"sync"
 )
 
 // TODO: Cache logic will have to be revisited to cleanup unused entries in memory (disabled for now)
 //
-//type revCacheSingleton struct {
-//	sync.RWMutex
-//	//Cache map[string]interface{}
-//	Cache sync.Map
-//}
-//
-//var revCacheInstance *revCacheSingleton
-//var revCacheOnce sync.Once
-//
-//func GetRevCache() *revCacheSingleton {
-//	revCacheOnce.Do(func() {
-//		//revCacheInstance = &revCacheSingleton{Cache: make(map[string]interface{})}
-//		revCacheInstance = &revCacheSingleton{Cache: sync.Map{}}
-//	})
-//	return revCacheInstance
-//}
+type revCacheSingleton struct {
+	sync.RWMutex
+	Cache sync.Map
+}
+
+var revCacheInstance *revCacheSingleton
+var revCacheOnce sync.Once
+
+func GetRevCache() *revCacheSingleton {
+	revCacheOnce.Do(func() {
+		revCacheInstance = &revCacheSingleton{Cache: sync.Map{}}
+	})
+	return revCacheInstance
+}
 
 type NonPersistedRevision struct {
 	mutex        sync.RWMutex
@@ -409,7 +407,7 @@
 
 // Drop is used to indicate when a revision is no longer required
 func (npr *NonPersistedRevision) Drop(txid string, includeConfig bool) {
-	log.Debugw("dropping-revision", log.Fields{"hash": npr.GetHash(), "stack": string(debug.Stack())})
+	log.Debugw("dropping-revision", log.Fields{"hash": npr.GetHash(), "name": npr.GetName()})
 	npr.discarded = true
 }
 
@@ -428,7 +426,7 @@
 	}
 }
 
-func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+func (npr *NonPersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
 	// stub... required by interface
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
index a56b776..ea99cf7 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/persisted_revision.go
@@ -23,7 +23,6 @@
 	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/db/kvstore"
 	"reflect"
-	"runtime/debug"
 	"strings"
 	"sync"
 )
@@ -72,10 +71,7 @@
 		return
 	}
 
-	if pair, _ := pr.kvStore.Get(pr.GetName()); pair != nil && skipOnExist {
-		log.Debugw("revision-config-already-exists", log.Fields{"hash": pr.GetHash(), "name": pr.GetName()})
-		return
-	}
+	log.Debugw("ready-to-store-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetData()})
 
 	if blob, err := proto.Marshal(pr.GetConfig().Data.(proto.Message)); err != nil {
 		// TODO report error
@@ -89,10 +85,9 @@
 		}
 
 		if err := pr.kvStore.Put(pr.GetName(), blob); err != nil {
-			log.Warnw("problem-storing-revision-config",
-				log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			log.Warnw("problem-storing-revision", log.Fields{"error": err, "hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
 		} else {
-			log.Debugw("storing-revision-config", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
+			log.Debugw("storing-revision", log.Fields{"hash": pr.GetHash(), "name": pr.GetName(), "data": pr.GetConfig().Data})
 			pr.isStored = true
 		}
 	}
@@ -100,7 +95,7 @@
 
 func (pr *PersistedRevision) SetupWatch(key string) {
 	if key == "" {
-		log.Debugw("ignoring-watch", log.Fields{"key": key, "revision-hash": pr.GetHash(), "stack": string(debug.Stack())})
+		log.Debugw("ignoring-watch", log.Fields{"key": key, "revision-hash": pr.GetHash()})
 		return
 	}
 
@@ -111,7 +106,7 @@
 	if pr.events == nil {
 		pr.events = make(chan *kvstore.Event)
 
-		log.Debugw("setting-watch-channel", log.Fields{"key": key, "revision-hash": pr.GetHash(), "stack": string(debug.Stack())})
+		log.Debugw("setting-watch-channel", log.Fields{"key": key, "revision-hash": pr.GetHash()})
 
 		pr.SetName(key)
 		pr.events = pr.kvStore.CreateWatch(key)
@@ -120,7 +115,7 @@
 	if !pr.isWatched {
 		pr.isWatched = true
 
-		log.Debugw("setting-watch-routine", log.Fields{"key": key, "revision-hash": pr.GetHash(), "stack": string(debug.Stack())})
+		log.Debugw("setting-watch-routine", log.Fields{"key": key, "revision-hash": pr.GetHash()})
 
 		// Start watching
 		go pr.startWatching()
@@ -128,7 +123,7 @@
 }
 
 func (pr *PersistedRevision) startWatching() {
-	log.Debugw("starting-watch", log.Fields{"key": pr.GetHash(), "stack": string(debug.Stack())})
+	log.Debugw("starting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
 
 StopWatchLoop:
 	for {
@@ -154,17 +149,106 @@
 			case kvstore.PUT:
 				log.Debugw("update-in-memory", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
 
-				if dataPair, err := pr.kvStore.Get(pr.GetName()); err != nil || dataPair == nil {
-					log.Errorw("update-in-memory--key-retrieval-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
-				} else {
-					data := reflect.New(reflect.TypeOf(pr.GetData()).Elem())
+				data := reflect.New(reflect.TypeOf(pr.GetData()).Elem())
 
-					if err := proto.Unmarshal(dataPair.Value.([]byte), data.Interface().(proto.Message)); err != nil {
-						log.Errorw("update-in-memory--unmarshal-failed", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
-					} else {
-						if pr.GetNode().GetProxy() != nil {
-							pr.LoadFromPersistence(pr.GetNode().GetProxy().getFullPath(), "")
+				if err := proto.Unmarshal(event.Value.([]byte), data.Interface().(proto.Message)); err != nil {
+					log.Errorw("failed-to-unmarshal-watch-data", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "error": err})
+				} else {
+					var pathLock string
+					var pac *proxyAccessControl
+					var blobs map[string]*kvstore.KVPair
+
+					// The watch reported new persistence data.
+					// Construct an object that will be used to update the memory
+					blobs = make(map[string]*kvstore.KVPair)
+					key, _ := kvstore.ToString(event.Key)
+					blobs[key] = &kvstore.KVPair{
+						Key:     key,
+						Value:   event.Value,
+						Session: "",
+						Lease:   0,
+					}
+
+					if pr.GetNode().GetProxy() != nil {
+						//
+						// If a proxy exists for this revision, use it to lock access to the path
+						// and prevent simultaneous updates to the object in memory
+						//
+						pathLock, _ = pr.GetNode().GetProxy().parseForControlledPath(pr.GetNode().GetProxy().getFullPath())
+
+						//If the proxy already has a request in progress, then there is no need to process the watch
+						log.Debugw("checking-if-path-is-locked", log.Fields{"key": pr.GetHash(), "pathLock": pathLock})
+						if PAC().IsReserved(pathLock) {
+							log.Debugw("operation-in-progress", log.Fields{
+								"key":       pr.GetHash(),
+								"path":      pr.GetNode().GetProxy().getFullPath(),
+								"operation": pr.GetNode().GetRoot().GetProxy().Operation,
+							})
+
+							continue
+
+							// TODO/FIXME: keep logic for now in case we need to control based on running operation
+							//
+							// The code below seems to revert the in-memory/persistence value (occasionally) with
+							// the one received from the watch event.
+							//
+							// The same problem may occur, in the scenario where the core owning a device
+							// receives a watch event for an update made by another core, and when the owning core is
+							// also processing an update.  Need to investigate...
+							//
+							//switch pr.GetNode().GetRoot().GetProxy().Operation {
+							//case PROXY_UPDATE:
+							//	// We will need to reload once the update operation completes.
+							//	// Therefore, the data of the current event is most likely out-dated
+							//	// and should be ignored
+							//	log.Debugw("reload-required", log.Fields{
+							//		"key":       pr.GetHash(),
+							//		"path":      pr.GetNode().GetProxy().getFullPath(),
+							//		"operation": pr.GetNode().GetRoot().GetProxy().Operation,
+							//	})
+							//
+							//	// Eliminate the object constructed earlier
+							//	blobs = nil
+							//
+							//case PROXY_ADD:
+							//	fallthrough
+							//
+							//case PROXY_REMOVE:
+							//	fallthrough
+							//
+							//case PROXY_GET:
+							//	fallthrough
+							//
+							//default:
+							//	// No need to process the event ... move on
+							//	log.Debugw("", log.Fields{
+							//		"key":       pr.GetHash(),
+							//		"path":      pr.GetNode().GetProxy().getFullPath(),
+							//		"operation": pr.GetNode().GetRoot().GetProxy().Operation,
+							//	})
+							//
+							//	continue
+							//}
 						}
+
+						// Reserve the path to prevent others to modify while we reload from persistence
+						log.Debugw("reserve-and-lock-path", log.Fields{"key": pr.GetHash(), "path": pathLock})
+						pac = PAC().ReservePath(pr.GetNode().GetProxy().getFullPath(), pr.GetNode().GetProxy(), pathLock)
+						pac.SetProxy(pr.GetNode().GetProxy())
+						pac.lock()
+
+						// Load changes and apply to memory
+						pr.LoadFromPersistence(pr.GetName(), "", blobs)
+
+						log.Debugw("release-and-unlock-path", log.Fields{"key": pr.GetHash(), "path": pathLock})
+						pac.unlock()
+						PAC().ReleasePath(pathLock)
+
+					} else {
+						log.Debugw("revision-with-no-proxy", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
+
+						// Load changes and apply to memory
+						pr.LoadFromPersistence(pr.GetName(), "", blobs)
 					}
 				}
 
@@ -176,7 +260,7 @@
 
 	Watches().Cache.Delete(pr.GetName() + "-" + pr.GetHash())
 
-	log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName(), "stack": string(debug.Stack())})
+	log.Debugw("exiting-watch", log.Fields{"key": pr.GetHash(), "watch": pr.GetName()})
 }
 
 // UpdateData modifies the information in the data model and saves it in the persistent storage
@@ -196,7 +280,7 @@
 		newPR.isWatched = false
 		newPR.isStored = false
 		pr.Drop(branch.Txid, false)
-		newPR.SetupWatch(newPR.GetName())
+		pr.Drop(branch.Txid, false)
 	} else {
 		newPR.isWatched = true
 		newPR.isStored = true
@@ -206,7 +290,8 @@
 }
 
 // UpdateChildren modifies the children of a revision and of a specific component and saves it in the persistent storage
-func (pr *PersistedRevision) UpdateChildren(name string, children []Revision, branch *Branch) Revision {
+func (pr *PersistedRevision) UpdateChildren(name string, children []Revision,
+	branch *Branch) Revision {
 	log.Debugw("updating-persisted-children", log.Fields{"hash": pr.GetHash()})
 
 	newNPR := pr.Revision.UpdateChildren(name, children, branch)
@@ -222,7 +307,6 @@
 		newPR.isWatched = false
 		newPR.isStored = false
 		pr.Drop(branch.Txid, false)
-		newPR.SetupWatch(newPR.GetName())
 	} else {
 		newPR.isWatched = true
 		newPR.isStored = true
@@ -248,7 +332,6 @@
 		newPR.isWatched = false
 		newPR.isStored = false
 		pr.Drop(branch.Txid, false)
-		newPR.SetupWatch(newPR.GetName())
 	} else {
 		newPR.isWatched = true
 		newPR.isStored = true
@@ -267,7 +350,7 @@
 // and its associated config when required
 func (pr *PersistedRevision) StorageDrop(txid string, includeConfig bool) {
 	log.Debugw("dropping-revision",
-		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash, "stack": string(debug.Stack())})
+		log.Fields{"txid": txid, "hash": pr.GetHash(), "config-hash": pr.GetConfig().Hash})
 
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
@@ -297,25 +380,28 @@
 
 // verifyPersistedEntry validates if the provided data is available or not in memory and applies updates as required
 func (pr *PersistedRevision) verifyPersistedEntry(data interface{}, typeName string, keyName string, keyValue string, txid string) (response Revision) {
-	rev := pr
+	//rev := pr
 
-	children := make([]Revision, len(rev.GetBranch().GetLatest().GetChildren(typeName)))
-	copy(children, rev.GetBranch().GetLatest().GetChildren(typeName))
+	children := make([]Revision, len(pr.GetBranch().GetLatest().GetChildren(typeName)))
+	copy(children, pr.GetBranch().GetLatest().GetChildren(typeName))
 
 	// Verify if the revision contains a child that matches that key
-	if childIdx, childRev := rev.GetNode().findRevByKey(rev.GetBranch().GetLatest().GetChildren(typeName), keyName, keyValue); childRev != nil {
+	if childIdx, childRev := pr.GetNode().findRevByKey(pr.GetBranch().GetLatest().GetChildren(typeName), keyName,
+		keyValue); childRev != nil {
 		// A child matching the provided key exists in memory
 		// Verify if the data differs to what was retrieved from persistence
 		if childRev.GetData().(proto.Message).String() != data.(proto.Message).String() {
 			log.Debugw("verify-persisted-entry--data-is-different", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
+				"data": childRev.GetData(),
 			})
 
 			// Data has changed; replace the child entry and update the parent revision
-			updatedChildRev := childRev.UpdateData(data, childRev.GetBranch())
-			updatedChildRev.SetupWatch(updatedChildRev.GetName())
 			childRev.Drop(txid, false)
+			updatedChildRev := childRev.UpdateData(data, childRev.GetBranch())
+			updatedChildRev.GetNode().SetProxy(childRev.GetNode().GetProxy())
+			updatedChildRev.SetupWatch(updatedChildRev.GetName())
 
 			if childIdx >= 0 {
 				children[childIdx] = updatedChildRev
@@ -323,18 +409,19 @@
 				children = append(children, updatedChildRev)
 			}
 
-			rev.GetBranch().LatestLock.Lock()
-			updatedRev := rev.UpdateChildren(typeName, children, rev.GetBranch())
-			rev.GetBranch().Node.makeLatest(rev.GetBranch(), updatedRev, nil)
-			rev.GetBranch().LatestLock.Unlock()
+			pr.GetBranch().LatestLock.Lock()
+			updatedRev := pr.GetBranch().Node.Latest().UpdateChildren(typeName, children, pr.GetBranch())
+			pr.GetBranch().Node.makeLatest(pr.GetBranch(), updatedRev, nil)
+			pr.GetBranch().LatestLock.Unlock()
 
 			// Drop the previous child revision
-			rev.GetBranch().Node.Latest().ChildDrop(typeName, childRev.GetHash())
+			pr.GetBranch().Node.Latest().ChildDrop(typeName, childRev.GetHash())
 
 			if updatedChildRev != nil {
 				log.Debugw("verify-persisted-entry--adding-child", log.Fields{
 					"key":  updatedChildRev.GetHash(),
 					"name": updatedChildRev.GetName(),
+					"data": updatedChildRev.GetData(),
 				})
 				response = updatedChildRev
 			}
@@ -343,11 +430,13 @@
 			log.Debugw("verify-persisted-entry--same-data", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
+				"data": childRev.GetData(),
 			})
 			if childRev != nil {
 				log.Debugw("verify-persisted-entry--keeping-child", log.Fields{
 					"key":  childRev.GetHash(),
 					"name": childRev.GetName(),
+					"data": childRev.GetData(),
 				})
 				response = childRev
 			}
@@ -358,29 +447,32 @@
 		log.Debugw("verify-persisted-entry--no-such-entry", log.Fields{
 			"key":  keyValue,
 			"name": typeName,
+			"data": data,
 		})
 
 		// Construct a new child node with the retrieved persistence data
-		childRev = rev.GetBranch().Node.MakeNode(data, txid).Latest(txid)
+		childRev = pr.GetBranch().Node.MakeNode(data, txid).Latest(txid)
 
 		// We need to start watching this entry for future changes
 		childRev.SetName(typeName + "/" + keyValue)
 
 		// Add the child to the parent revision
-		rev.GetBranch().LatestLock.Lock()
+		pr.GetBranch().LatestLock.Lock()
 		children = append(children, childRev)
-		updatedRev := rev.GetBranch().Node.Latest().UpdateChildren(typeName, children, rev.GetBranch())
+		updatedRev := pr.GetBranch().Node.Latest().UpdateChildren(typeName, children, pr.GetBranch())
+		updatedRev.GetNode().SetProxy(pr.GetNode().GetProxy())
 		childRev.SetupWatch(childRev.GetName())
 
 		//rev.GetBranch().Node.Latest().Drop(txid, false)
-		rev.GetBranch().Node.makeLatest(rev.GetBranch(), updatedRev, nil)
-		rev.GetBranch().LatestLock.Unlock()
+		pr.GetBranch().Node.makeLatest(pr.GetBranch(), updatedRev, nil)
+		pr.GetBranch().LatestLock.Unlock()
 
 		// Child entry is valid and can be included in the response object
 		if childRev != nil {
 			log.Debugw("verify-persisted-entry--adding-child", log.Fields{
 				"key":  childRev.GetHash(),
 				"name": childRev.GetName(),
+				"data": childRev.GetData(),
 			})
 			response = childRev
 		}
@@ -391,39 +483,46 @@
 
 // LoadFromPersistence retrieves data from kv store at the specified location and refreshes the memory
 // by adding missing entries, updating changed entries and ignoring unchanged ones
-func (pr *PersistedRevision) LoadFromPersistence(path string, txid string) []Revision {
+func (pr *PersistedRevision) LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision {
 	pr.mutex.Lock()
 	defer pr.mutex.Unlock()
 
 	log.Debugw("loading-from-persistence", log.Fields{"path": path, "txid": txid})
 
 	var response []Revision
-	var rev Revision
 
-	rev = pr
+	for strings.HasPrefix(path, "/") {
+		path = path[1:]
+	}
 
 	if pr.kvStore != nil && path != "" {
-		blobMap, _ := pr.kvStore.List(path)
+		if blobs == nil || len(blobs) == 0 {
+			log.Debugw("retrieve-from-kv", log.Fields{"path": path, "txid": txid})
+			blobs, _ = pr.kvStore.List(path)
+		}
 
 		partition := strings.SplitN(path, "/", 2)
 		name := partition[0]
 
+		var nodeType interface{}
 		if len(partition) < 2 {
 			path = ""
+			nodeType = pr.GetBranch().Node.Type
 		} else {
 			path = partition[1]
+			nodeType = pr.GetBranch().Node.Root.Type
 		}
 
-		field := ChildrenFields(rev.GetBranch().Node.Type)[name]
+		field := ChildrenFields(nodeType)[name]
 
 		if field != nil && field.IsContainer {
 			log.Debugw("load-from-persistence--start-blobs", log.Fields{
 				"path": path,
 				"name": name,
-				"size": len(blobMap),
+				"size": len(blobs),
 			})
 
-			for _, blob := range blobMap {
+			for _, blob := range blobs {
 				output := blob.Value.([]byte)
 
 				data := reflect.New(field.ClassType.Elem())
@@ -440,7 +539,8 @@
 						// based on the field's key attribute
 						_, key := GetAttributeValue(data.Interface(), field.Key, 0)
 
-						if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, key.String(), txid); entry != nil {
+						if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, key.String(),
+							txid); entry != nil {
 							response = append(response, entry)
 						}
 					}
@@ -456,7 +556,8 @@
 					}
 					keyValue := field.KeyFromStr(key)
 
-					if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, keyValue.(string), txid); entry != nil {
+					if entry := pr.verifyPersistedEntry(data.Interface(), name, field.Key, keyValue.(string),
+						txid); entry != nil {
 						response = append(response, entry)
 					}
 				}
@@ -465,7 +566,8 @@
 			log.Debugw("load-from-persistence--end-blobs", log.Fields{"path": path, "name": name})
 		} else {
 			log.Debugw("load-from-persistence--cannot-process-field", log.Fields{
-				"type": rev.GetBranch().Node.Type,
+
+				"type": pr.GetBranch().Node.Type,
 				"name": name,
 			})
 		}
diff --git a/vendor/github.com/opencord/voltha-go/db/model/proxy.go b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
index b45fb1d..2933464 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/proxy.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/proxy.go
@@ -186,11 +186,20 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+	log.Debugw("proxy-list", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
+	p.Operation = PROXY_LIST
 	pac.SetProxy(p)
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	rv := pac.List(path, depth, deep, txid, controlled)
 
@@ -208,10 +217,16 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, PathLock: %s", path, effectivePath, pathLock)
+	log.Debugw("proxy-get", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
+	p.Operation = PROXY_GET
 	pac.SetProxy(p)
 
 	rv := pac.Get(path, depth, deep, txid, controlled)
@@ -237,7 +252,13 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s, Controlled: %b", path, effectivePath, fullPath, pathLock, controlled)
+	log.Debugw("proxy-update", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
@@ -247,7 +268,6 @@
 	defer func(op ProxyOperation) {
 		pac.getProxy().Operation = op
 	}(PROXY_GET)
-
 	log.Debugw("proxy-operation--update", log.Fields{"operation": p.Operation})
 
 	return pac.Update(fullPath, data, strict, txid, controlled)
@@ -273,15 +293,21 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-add-with-id", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(path, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_ADD
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	pac.SetProxy(p)
 
@@ -308,16 +334,22 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-add", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(path, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_ADD
 	pac.SetProxy(p)
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
 
@@ -342,16 +374,22 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-remove", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(effectivePath, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_REMOVE
 	pac.SetProxy(p)
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	log.Debugw("proxy-operation--remove", log.Fields{"operation": p.Operation})
 
@@ -377,16 +415,22 @@
 
 	pathLock, controlled := p.parseForControlledPath(effectivePath)
 
-	log.Debugf("Path: %s, Effective: %s, Full: %s, PathLock: %s", path, effectivePath, fullPath, pathLock)
+	log.Debugw("proxy-create", log.Fields{
+		"path":       path,
+		"effective":  effectivePath,
+		"full":       fullPath,
+		"pathLock":   pathLock,
+		"controlled": controlled,
+	})
 
 	pac := PAC().ReservePath(path, p, pathLock)
 	defer PAC().ReleasePath(pathLock)
 
 	p.Operation = PROXY_CREATE
 	pac.SetProxy(p)
-	defer func() {
-		p.Operation = PROXY_GET
-	}()
+	defer func(op ProxyOperation) {
+		pac.getProxy().Operation = op
+	}(PROXY_GET)
 
 	log.Debugw("proxy-operation--create-proxy", log.Fields{"operation": p.Operation})
 
diff --git a/vendor/github.com/opencord/voltha-go/db/model/revision.go b/vendor/github.com/opencord/voltha-go/db/model/revision.go
index 79620e1..74ae3f7 100644
--- a/vendor/github.com/opencord/voltha-go/db/model/revision.go
+++ b/vendor/github.com/opencord/voltha-go/db/model/revision.go
@@ -15,6 +15,10 @@
  */
 package model
 
+import (
+	"github.com/opencord/voltha-go/db/kvstore"
+)
+
 type Revision interface {
 	Finalize(bool)
 	IsDiscarded() bool
@@ -38,7 +42,7 @@
 	Get(int) interface{}
 	GetData() interface{}
 	GetNode() *node
-	LoadFromPersistence(path string, txid string) []Revision
+	LoadFromPersistence(path string, txid string, blobs map[string]*kvstore.KVPair) []Revision
 	UpdateData(data interface{}, branch *Branch) Revision
 	UpdateChildren(name string, children []Revision, branch *Branch) Revision
 	UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
index ec191dc..367f442 100644
--- a/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
+++ b/vendor/github.com/opencord/voltha-go/rw_core/coreIf/device_manager_if.go
@@ -25,4 +25,13 @@
 type DeviceManager interface {
 	GetDevice(string) (*voltha.Device, error)
 	IsRootDevice(string) (bool, error)
+	NotifyInvalidTransition(*voltha.Device) error
+	SetAdminStateToEnable(*voltha.Device) error
+	CreateLogicalDevice(*voltha.Device) error
+	SetupUNILogicalPorts(*voltha.Device) error
+	DisableAllChildDevices(cDevice *voltha.Device) error
+	DeleteLogicalDevice(cDevice *voltha.Device) error
+	DeleteLogicalPorts(cDevice *voltha.Device) error
+	DeleteAllChildDevices(cDevice *voltha.Device) error
+	RunPostDeviceDelete(cDevice *voltha.Device) error
 }
diff --git a/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go b/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
index 0c485bb..c2c9287 100644
--- a/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-go/rw_core/utils/flow_utils.go
@@ -171,9 +171,11 @@
 
 func (dr *DeviceRules) Copy() *DeviceRules {
 	copyDR := NewDeviceRules()
-	for key, val := range dr.Rules {
-		if val != nil {
-			copyDR.Rules[key] = val.Copy()
+	if dr != nil {
+		for key, val := range dr.Rules {
+			if val != nil {
+				copyDR.Rules[key] = val.Copy()
+			}
 		}
 	}
 	return copyDR