Getting openonu go to work with latest rw-core master

Change-Id: Iab3a1ec7de438cce8b865737d6f34447f1723298
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
index dbf3418..9636a7d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
@@ -18,7 +18,6 @@
 
 import (
 	"context"
-
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
index 02fa3de..bbae0ed 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
@@ -17,6 +17,7 @@
 
 import (
 	"context"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
 	"time"
 
 	"github.com/golang/protobuf/proto"
@@ -32,14 +33,17 @@
 	kafkaICProxy kafka.InterContainerProxy
 	adapterTopic string
 	coreTopic    string
+	endpointMgr  kafka.EndpointManager
 }
 
-func NewAdapterProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *AdapterProxy {
-	var proxy AdapterProxy
-	proxy.kafkaICProxy = kafkaProxy
-	proxy.adapterTopic = adapterTopic
-	proxy.coreTopic = coreTopic
-	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+func NewAdapterProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string, backend *db.Backend) *AdapterProxy {
+	proxy := AdapterProxy{
+		kafkaICProxy: kafkaProxy,
+		adapterTopic: adapterTopic,
+		coreTopic:    coreTopic,
+		endpointMgr:  kafka.NewEndpointManager(backend),
+	}
+	logger.Debugw("topics", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 	return &proxy
 }
 
@@ -87,7 +91,11 @@
 	}
 
 	// Set up the required rpc arguments
-	topic := kafka.Topic{Name: toAdapter}
+	endpoint, err := ap.endpointMgr.GetEndpoint(toDeviceId, toAdapter)
+	if err != nil {
+		return err
+	}
+	topic := kafka.Topic{Name: string(endpoint)}
 	replyToTopic := kafka.Topic{Name: fromAdapter}
 	rpc := "process_inter_adapter_message"
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
index acf818c..95a036d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "common"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
index 7cb933d..20e1a52 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
@@ -99,6 +99,28 @@
 	topic := kafka.Topic{Name: ap.coreTopic}
 	replyToTopic := ap.getAdapterTopic()
 	args := make([]*kafka.KVArg, 2)
+
+	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
+		log.Fatal("totalReplicas can't be 0, since you're here you have at least one")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
+		log.Fatal("currentReplica can't be 0, it has to start from 1")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
+		// if the adapter is not setting these fields they default to 0,
+		// in that case it means the adapter is not ready to be scaled and thus it defaults
+		// to a single instance
+		adapter.CurrentReplica = 1
+		adapter.TotalReplicas = 1
+	}
+
+	if adapter.CurrentReplica > adapter.TotalReplicas {
+		log.Fatalf("CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
+			adapter.CurrentReplica, adapter.TotalReplicas)
+	}
+
 	args[0] = &kafka.KVArg{
 		Key:   "adapter",
 		Value: adapter,
@@ -124,7 +146,7 @@
 	}
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, device.Id, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
 	logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
 	return unPackResponse(rpc, device.Id, success, result)
 }
@@ -148,13 +170,13 @@
 
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
-	log.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "PortsStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -174,7 +196,7 @@
 
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
@@ -195,14 +217,14 @@
 
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
 	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
-	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "DeviceStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -226,7 +248,7 @@
 	}
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
@@ -277,7 +299,7 @@
 		Value: oId,
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
@@ -315,7 +337,7 @@
 		Value: id,
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
 }
@@ -335,7 +357,7 @@
 		Value: id,
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
 }
@@ -354,7 +376,7 @@
 		Value: id,
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
@@ -414,7 +436,7 @@
 		}
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
@@ -450,7 +472,7 @@
 		Value: id,
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
@@ -496,7 +518,7 @@
 		Key:   "packet",
 		Value: pkt,
 	}
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
@@ -520,7 +542,7 @@
 		Key:   "device_reason",
 		Value: reason,
 	}
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
@@ -538,7 +560,7 @@
 		Key:   "device_pm_config",
 		Value: pmConfigs,
 	}
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
 	logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
 	return unPackResponse(rpc, pmConfigs.Id, success, result)
 }
@@ -555,7 +577,7 @@
 		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
 	}
 
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
 	logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
 }
@@ -592,7 +614,7 @@
 
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
 	logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
index 034de8e..da9c9eb 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
@@ -23,6 +23,7 @@
 	"strings"
 	"time"
 
+	"github.com/golang/protobuf/ptypes"
 	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
 	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
@@ -60,7 +61,11 @@
 	return fmt.Sprintf("Voltha.openolt.%s.%s", eventName, strconv.FormatInt(time.Now().UnixNano(), 10))
 }
 
-func (ep *EventProxy) getEventHeader(eventName string, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, eventType adapterif.EventType, raisedTs int64) *voltha.EventHeader {
+func (ep *EventProxy) getEventHeader(eventName string,
+	category adapterif.EventCategory,
+	subCategory adapterif.EventSubCategory,
+	eventType adapterif.EventType,
+	raisedTs int64) (*voltha.EventHeader, error) {
 	var header voltha.EventHeader
 	if strings.Contains(eventName, "_") {
 		eventName = strings.Join(strings.Split(eventName, "_")[:len(strings.Split(eventName, "_"))-2], "_")
@@ -73,9 +78,21 @@
 	header.SubCategory = subCategory
 	header.Type = eventType
 	header.TypeVersion = adapterif.EventTypeVersion
-	header.RaisedTs = float32(raisedTs)
-	header.ReportedTs = float32(time.Now().UnixNano())
-	return &header
+
+	// raisedTs is in nanoseconds
+	timestamp, err := ptypes.TimestampProto(time.Unix(0, raisedTs))
+	if err != nil {
+		return nil, err
+	}
+	header.RaisedTs = timestamp
+
+	timestamp, err = ptypes.TimestampProto(time.Now())
+	if err != nil {
+		return nil, err
+	}
+	header.ReportedTs = timestamp
+
+	return &header, nil
 }
 
 /* Send out device events*/
@@ -86,8 +103,11 @@
 	}
 	var event voltha.Event
 	var de voltha.Event_DeviceEvent
+	var err error
 	de.DeviceEvent = deviceEvent
-	event.Header = ep.getEventHeader(deviceEvent.DeviceEventName, category, subCategory, voltha.EventType_DEVICE_EVENT, raisedTs)
+	if event.Header, err = ep.getEventHeader(deviceEvent.DeviceEventName, category, subCategory, voltha.EventType_DEVICE_EVENT, raisedTs); err != nil {
+		return err
+	}
 	event.EventType = &de
 	if err := ep.sendEvent(&event); err != nil {
 		logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
@@ -110,8 +130,11 @@
 	}
 	var event voltha.Event
 	var de voltha.Event_KpiEvent2
+	var err error
 	de.KpiEvent2 = kpiEvent
-	event.Header = ep.getEventHeader(id, category, subCategory, voltha.EventType_KPI_EVENT2, raisedTs)
+	if event.Header, err = ep.getEventHeader(id, category, subCategory, voltha.EventType_KPI_EVENT2, raisedTs); err != nil {
+		return err
+	}
 	event.EventType = &de
 	if err := ep.sendEvent(&event); err != nil {
 		logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
index 78b8eb5..843b95c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
@@ -658,3 +658,80 @@
 	}
 	return deviceId.Val, port, nil
 }
+
+func (rhp *RequestHandlerProxy) Child_device_lost(args []*ic.Argument) error {
+	if len(args) < 4 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		return errors.New("invalid-number-of-args")
+	}
+
+	pDeviceId := &ic.StrType{}
+	pPortNo := &ic.IntType{}
+	onuID := &ic.IntType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "pDeviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
+				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				return err
+			}
+		case "pPortNo":
+			if err := ptypes.UnmarshalAny(arg.Value, pPortNo); err != nil {
+				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
+				return err
+			}
+		case "onuID":
+			if err := ptypes.UnmarshalAny(arg.Value, onuID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(pDeviceId.Val, fromTopic.Val)
+	//Invoke the Child_device_lost API on the adapter
+	if err := rhp.adapter.Child_device_lost(pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
+		return status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return nil
+}
+
+func (rhp *RequestHandlerProxy) Start_omci_test(args []*ic.Argument) (*ic.TestResponse, error) {
+	if len(args) < 2 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	// TODO: See related comment in voltha-go:adapter_proxy_go:startOmciTest()
+	//   Second argument should perhaps be uuid instead of omcitestrequest
+
+	device := &voltha.Device{}
+	request := &voltha.OmciTestRequest{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "omcitestrequest":
+			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
+				logger.Warnw("cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw("Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
+	result, err := rhp.adapter.Start_omci_test(device, request)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return result, nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
index b782ebe..94e8bd6 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
@@ -84,7 +84,7 @@
 	case ic.ErrorCode_DEADLINE_EXCEEDED:
 		return codes.DeadlineExceeded
 	default:
-		log.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		logger.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
 		return codes.Internal
 	}
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
index c0e44be..112fb94 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
@@ -51,4 +51,6 @@
 	Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
 	Enable_port(deviceId string, port *voltha.Port) error
 	Disable_port(deviceId string, port *voltha.Port) error
+	Child_device_lost(parentDeviceId string, parentPortNo uint32, onuID uint32) error
+	Start_omci_test(device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
new file mode 100644
index 0000000..42574d0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package db
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// Default Minimal Interval for posting alive state of backend kvstore on Liveness Channel
+	DefaultLivenessChannelInterval = time.Second * 30
+)
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	sync.RWMutex
+	Client                  kvstore.Client
+	StoreType               string
+	Host                    string
+	Port                    int
+	Timeout                 int
+	PathPrefix              string
+	alive                   bool          // Is this backend connection alive?
+	liveness                chan bool     // channel to post alive state
+	LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
+	lastLivenessTime        time.Time     // Instant of last alive state push
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(storeType string, host string, port int, timeout int, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:               storeType,
+		Host:                    host,
+		Port:                    port,
+		Timeout:                 timeout,
+		LivenessChannelInterval: DefaultLivenessChannelInterval,
+		PathPrefix:              pathPrefix,
+		alive:                   false, // connection considered down at start
+	}
+
+	address := host + ":" + strconv.Itoa(port)
+	if b.Client, err = b.newClient(address, timeout); err != nil {
+		logger.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "host": host, "port": port,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(address string, timeout int) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+func (b *Backend) updateLiveness(alive bool) {
+	// Periodically push stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Push alive state if the
+	// last push to channel was beyond livenessChannelInterval
+	if b.liveness != nil {
+
+		if b.alive != alive {
+			logger.Debug("update-liveness-channel-reason-change")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
+			logger.Debug("update-liveness-channel-reason-interval")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Emit log message only for alive state change
+	if b.alive != alive {
+		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+		b.alive = alive
+	}
+}
+
+// Perform a dummy Key Lookup on kvstore to test Connection Liveness and
+// post on Liveness channel
+func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
+	alive := b.Client.IsConnectionUp(ctx)
+	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+
+	b.updateLiveness(alive)
+	return alive
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every kvstore operation which indicates whether
+// or not the connection is still Live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (b *Backend) EnableLivenessChannel() chan bool {
+	logger.Debug("enable-kvstore-liveness-channel")
+
+	if b.liveness == nil {
+		logger.Debug("create-kvstore-liveness-channel")
+
+		// Channel size of 10 to avoid any possibility of blocking in Load conditions
+		b.liveness = make(chan bool, 10)
+
+		// Post initial alive state
+		b.liveness <- b.alive
+		b.lastLivenessTime = time.Now()
+	}
+
+	return b.liveness
+}
+
+// Extract Alive status of Kvstore based on type of error
+func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
+	// Alive unless observed an error indicating so
+	alive := true
+
+	if err != nil {
+
+		// timeout indicates kvstore not reachable/alive
+		if err == context.DeadlineExceeded {
+			alive = false
+		}
+
+		// Need to analyze client-specific errors based on backend type
+		if b.StoreType == "etcd" {
+
+			// For etcd backend, consider not-alive only for errors indicating
+			// timedout request or unavailable/corrupted cluster. For all remaining
+			// error codes listed in https://godoc.org/google.golang.org/grpc/codes#Code,
+			// we would not infer a not-alive backend because such a error may also
+			// occur due to bad client requests or sequence of operations
+			switch status.Code(err) {
+			case codes.DeadlineExceeded:
+				fallthrough
+			case codes.Unavailable:
+				fallthrough
+			case codes.DataLoss:
+				alive = false
+			}
+
+			//} else {
+			// TODO: Implement for consul backend; would it be needed ever?
+		}
+	}
+
+	return alive
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.List(ctx, formattedPath)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return pair, err
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.Get(ctx, formattedPath)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return pair, err
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("putting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Put(ctx, formattedPath, value)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return err
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(ctx context.Context, key string) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Delete(ctx, formattedPath)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return err
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(ctx, formattedPath, withPrefix)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
new file mode 100644
index 0000000..1cf2e1c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package db
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
index d30e049..b9cb1ee 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
@@ -81,7 +81,7 @@
 	ReleaseReservation(ctx context.Context, key string) error
 	ReleaseAllReservations(ctx context.Context) error
 	RenewReservation(ctx context.Context, key string) error
-	Watch(ctx context.Context, key string) chan *Event
+	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
 	AcquireLock(ctx context.Context, lockName string, timeout int) error
 	ReleaseLock(lockName string) error
 	IsConnectionUp(ctx context.Context) bool // timeout in second
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
index 2d2a6a6..aa7aeb0 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kvstore"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
index fdf39be..bdf2d10 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
@@ -360,7 +360,7 @@
 
 // Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
 // listen to receive Events.
-func (c *ConsulClient) Watch(ctx context.Context, key string) chan *Event {
+func (c *ConsulClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
 
 	// Create a new channel
 	ch := make(chan *Event, maxClientChannelBufferSize)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
index a0f39cd..d38f0f6 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
@@ -29,19 +29,15 @@
 
 // EtcdClient represents the Etcd KV store client
 type EtcdClient struct {
-	ectdAPI          *v3Client.Client
-	leaderRev        v3Client.Client
-	keyReservations  map[string]*v3Client.LeaseID
-	watchedChannels  sync.Map
-	writeLock        sync.Mutex
-	lockToMutexMap   map[string]*v3Concurrency.Mutex
-	lockToSessionMap map[string]*v3Concurrency.Session
-	lockToMutexLock  sync.Mutex
+	ectdAPI             *v3Client.Client
+	keyReservations     map[string]*v3Client.LeaseID
+	watchedChannels     sync.Map
+	keyReservationsLock sync.RWMutex
+	lockToMutexMap      map[string]*v3Concurrency.Mutex
+	lockToSessionMap    map[string]*v3Concurrency.Session
+	lockToMutexLock     sync.Mutex
 }
 
-// Connection Timeout in Seconds
-var connTimeout int = 2
-
 // NewEtcdClient returns a new client for the Etcd KV store
 func NewEtcdClient(addr string, timeout int) (*EtcdClient, error) {
 	duration := GetDuration(timeout)
@@ -118,13 +114,13 @@
 		return fmt.Errorf("unexpected-type-%T", value)
 	}
 
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-
 	var err error
 	// Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
 	// that KV key permanent instead of automatically removing it after a lease expiration
-	if leaseID, ok := c.keyReservations[key]; ok {
+	c.keyReservationsLock.RLock()
+	leaseID, ok := c.keyReservations[key]
+	c.keyReservationsLock.RUnlock()
+	if ok {
 		_, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
 	} else {
 		_, err = c.ectdAPI.Put(ctx, key, val)
@@ -150,9 +146,6 @@
 // wait for a response
 func (c *EtcdClient) Delete(ctx context.Context, key string) error {
 
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-
 	// delete the key
 	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
 		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
@@ -181,9 +174,9 @@
 		return nil, err
 	}
 	// Register the lease id
-	c.writeLock.Lock()
+	c.keyReservationsLock.Lock()
 	c.keyReservations[key] = &resp.ID
-	c.writeLock.Unlock()
+	c.keyReservationsLock.Unlock()
 
 	// Revoke lease if reservation is not successful
 	reservationSuccessful := false
@@ -239,8 +232,8 @@
 
 // ReleaseAllReservations releases all key reservations previously made (using Reserve API)
 func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
+	c.keyReservationsLock.Lock()
+	defer c.keyReservationsLock.Unlock()
 
 	for key, leaseID := range c.keyReservations {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
@@ -259,8 +252,8 @@
 	logger.Debugw("Release-reservation", log.Fields{"key": key})
 	var ok bool
 	var leaseID *v3Client.LeaseID
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
+	c.keyReservationsLock.Lock()
+	defer c.keyReservationsLock.Unlock()
 	if leaseID, ok = c.keyReservations[key]; !ok {
 		return nil
 	}
@@ -282,9 +275,11 @@
 	// Get the leaseid using the key
 	var ok bool
 	var leaseID *v3Client.LeaseID
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	if leaseID, ok = c.keyReservations[key]; !ok {
+	c.keyReservationsLock.RLock()
+	leaseID, ok = c.keyReservations[key]
+	c.keyReservationsLock.RUnlock()
+
+	if !ok {
 		return errors.New("key-not-reserved")
 	}
 
@@ -302,10 +297,15 @@
 
 // Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
 // listen to receive Events.
-func (c *EtcdClient) Watch(ctx context.Context, key string) chan *Event {
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
 	w := v3Client.NewWatcher(c.ectdAPI)
 	ctx, cancel := context.WithCancel(ctx)
-	channel := w.Watch(ctx, key)
+	var channel v3Client.WatchChan
+	if withPrefix {
+		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+	} else {
+		channel = w.Watch(ctx, key)
+	}
 
 	// Create a new channel
 	ch := make(chan *Event, maxClientChannelBufferSize)
@@ -371,8 +371,6 @@
 	// Get the array of channels mapping
 	var watchedChannels []map[chan *Event]v3Client.Watcher
 	var ok bool
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
 		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
@@ -424,8 +422,6 @@
 
 // Close closes the KV store client
 func (c *EtcdClient) Close() {
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
 	if err := c.ectdAPI.Close(); err != nil {
 		logger.Errorw("error-closing-client", log.Fields{"error": err})
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
index cb6acb2..149c150 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
@@ -19,16 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-const (
-	logLevel = log.ErrorLevel
-)
-
 var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kafka"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
new file mode 100644
index 0000000..1258382
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"github.com/buraksezer/consistent"
+	"github.com/cespare/xxhash"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"sync"
+)
+
+const (
+	// All the values below can be tuned to get optimal data distribution.  The numbers below seems to work well when
+	// supporting 1000-10000 devices and 1 - 20 replicas of a service
+
+	// Keys are distributed among partitions. Prime numbers are good to distribute keys uniformly.
+	DefaultPartitionCount = 1117
+
+	// Represents how many times a node is replicated on the consistent ring.
+	DefaultReplicationFactor = 117
+
+	// Load is used to calculate average load.
+	DefaultLoad = 1.1
+)
+
+type Endpoint string // Endpoint of a service instance.  When using kafka, this is the topic name of a service
+type ReplicaID int32 // The replication ID of a service instance
+
+type EndpointManager interface {
+
+	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
+	// now this will return the topic name
+	GetEndpoint(deviceID string, serviceType string) (Endpoint, error)
+
+	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
+	// devices owned by that service need to be reconciled
+	IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error)
+
+	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
+	// test only
+	GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error)
+}
+
+type service struct {
+	id             string // Id of the service.  The same id is used for all replicas
+	totalReplicas  int32
+	replicas       map[ReplicaID]Endpoint
+	consistentRing *consistent.Consistent
+}
+
+type endpointManager struct {
+	partitionCount           int
+	replicationFactor        int
+	load                     float64
+	backend                  *db.Backend
+	services                 map[string]*service
+	servicesLock             sync.RWMutex
+	deviceTypeServiceMap     map[string]string
+	deviceTypeServiceMapLock sync.RWMutex
+}
+
+type EndpointManagerOption func(*endpointManager)
+
+func PartitionCount(count int) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.partitionCount = count
+	}
+}
+
+func ReplicationFactor(replicas int) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.replicationFactor = replicas
+	}
+}
+
+func Load(load float64) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.load = load
+	}
+}
+
+func newEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
+	tm := &endpointManager{
+		partitionCount:       DefaultPartitionCount,
+		replicationFactor:    DefaultReplicationFactor,
+		load:                 DefaultLoad,
+		backend:              backend,
+		services:             make(map[string]*service),
+		deviceTypeServiceMap: make(map[string]string),
+	}
+
+	for _, option := range opts {
+		option(tm)
+	}
+	return tm
+}
+
+func NewEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
+	return newEndpointManager(backend, opts...)
+}
+
+func (ep *endpointManager) GetEndpoint(deviceID string, serviceType string) (Endpoint, error) {
+	logger.Debugw("getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
+	owner, err := ep.getOwner(deviceID, serviceType)
+	if err != nil {
+		return "", err
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return "", status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	endpoint := m.getEndPoint()
+	if endpoint == "" {
+		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
+	}
+	logger.Debugw("returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
+	return endpoint, nil
+}
+
+func (ep *endpointManager) IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	logger.Debugw("device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
+	owner, err := ep.getOwner(deviceID, serviceType)
+	if err != nil {
+		return false, nil
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return false, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	return m.getReplica() == ReplicaID(replicaNumber), nil
+}
+
+func (ep *endpointManager) GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error) {
+	owner, err := ep.getOwner(deviceID, serviceType)
+	if err != nil {
+		return 0, nil
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return 0, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	return m.getReplica(), nil
+}
+
+func (ep *endpointManager) getOwner(deviceID string, serviceType string) (consistent.Member, error) {
+	serv, dType, err := ep.getServiceAndDeviceType(serviceType)
+	if err != nil {
+		return nil, err
+	}
+	key := ep.makeKey(deviceID, dType, serviceType)
+	return serv.consistentRing.LocateKey(key), nil
+}
+
+func (ep *endpointManager) getServiceAndDeviceType(serviceType string) (*service, string, error) {
+	// Check whether service exist
+	ep.servicesLock.RLock()
+	serv, serviceExist := ep.services[serviceType]
+	ep.servicesLock.RUnlock()
+
+	// Load the service and device types if needed
+	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
+		if err := ep.loadServices(); err != nil {
+			return nil, "", err
+		}
+
+		// Check whether the service exists now
+		ep.servicesLock.RLock()
+		serv, serviceExist = ep.services[serviceType]
+		ep.servicesLock.RUnlock()
+		if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
+			return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
+		}
+	}
+
+	ep.deviceTypeServiceMapLock.RLock()
+	defer ep.deviceTypeServiceMapLock.RUnlock()
+	for dType, sType := range ep.deviceTypeServiceMap {
+		if sType == serviceType {
+			return serv, dType, nil
+		}
+	}
+	return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
+}
+
+func (ep *endpointManager) getConsistentConfig() consistent.Config {
+	return consistent.Config{
+		PartitionCount:    ep.partitionCount,
+		ReplicationFactor: ep.replicationFactor,
+		Load:              ep.load,
+		Hasher:            hasher{},
+	}
+}
+
+// loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
+// the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
+// instead of watching for updates in the dB and acting on it.
+func (ep *endpointManager) loadServices() error {
+	ep.servicesLock.Lock()
+	defer ep.servicesLock.Unlock()
+	ep.deviceTypeServiceMapLock.Lock()
+	defer ep.deviceTypeServiceMapLock.Unlock()
+
+	if ep.backend == nil {
+		return status.Error(codes.Aborted, "backend-not-set")
+	}
+	ep.services = make(map[string]*service)
+	ep.deviceTypeServiceMap = make(map[string]string)
+
+	// Load the adapters
+	blobs, err := ep.backend.List(context.Background(), "adapters")
+	if err != nil {
+		return err
+	}
+
+	// Data is marshalled as proto bytes in the data store
+	for _, blob := range blobs {
+		data := blob.Value.([]byte)
+		adapter := &voltha.Adapter{}
+		if err := proto.Unmarshal(data, adapter); err != nil {
+			return err
+		}
+		// A valid adapter should have the vendorID set
+		if adapter.Vendor != "" {
+			if _, ok := ep.services[adapter.Type]; !ok {
+				ep.services[adapter.Type] = &service{
+					id:             adapter.Type,
+					totalReplicas:  adapter.TotalReplicas,
+					replicas:       make(map[ReplicaID]Endpoint),
+					consistentRing: consistent.New(nil, ep.getConsistentConfig()),
+				}
+
+			}
+			currentReplica := ReplicaID(adapter.CurrentReplica)
+			endpoint := Endpoint(adapter.Endpoint)
+			ep.services[adapter.Type].replicas[currentReplica] = endpoint
+			ep.services[adapter.Type].consistentRing.Add(newMember(adapter.Id, adapter.Type, adapter.Vendor, endpoint, adapter.Version, currentReplica))
+		}
+	}
+	// Load the device types
+	blobs, err = ep.backend.List(context.Background(), "device_types")
+	if err != nil {
+		return err
+	}
+	for _, blob := range blobs {
+		data := blob.Value.([]byte)
+		deviceType := &voltha.DeviceType{}
+		if err := proto.Unmarshal(data, deviceType); err != nil {
+			return err
+		}
+		if _, ok := ep.deviceTypeServiceMap[deviceType.Id]; !ok {
+			ep.deviceTypeServiceMap[deviceType.Id] = deviceType.Adapter
+		}
+	}
+
+	// Log the loaded data in debug mode to facilitate trouble shooting
+	if logger.V(log.DebugLevel) {
+		for key, val := range ep.services {
+			members := val.consistentRing.GetMembers()
+			logger.Debugw("service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
+			for _, m := range members {
+				n := m.(Member)
+				logger.Debugw("service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
+			}
+		}
+		logger.Debugw("device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
+	}
+	return nil
+}
+
+// makeKey creates the string that the hash function uses to create the hash
+func (ep *endpointManager) makeKey(deviceID string, deviceType string, serviceType string) []byte {
+	return []byte(fmt.Sprintf("%s_%s_%s", serviceType, deviceType, deviceID))
+}
+
+// The consistent package requires a hasher function
+type hasher struct{}
+
+// Sum64 provides the hasher function.  Based upon numerous testing scenarios, the xxhash package seems to provide the
+// best distribution compare to other hash packages
+func (h hasher) Sum64(data []byte) uint64 {
+	return xxhash.Sum64(data)
+}
+
+// Member represents a member on the consistent ring
+type Member interface {
+	String() string
+	getReplica() ReplicaID
+	getEndPoint() Endpoint
+	getID() string
+	getServiceType() string
+}
+
+// member implements the Member interface
+type member struct {
+	id          string
+	serviceType string
+	vendor      string
+	version     string
+	replica     ReplicaID
+	endpoint    Endpoint
+}
+
+func newMember(ID string, serviceType string, vendor string, endPoint Endpoint, version string, replica ReplicaID) Member {
+	return &member{
+		id:          ID,
+		serviceType: serviceType,
+		vendor:      vendor,
+		version:     version,
+		replica:     replica,
+		endpoint:    endPoint,
+	}
+}
+
+func (m *member) String() string {
+	return string(m.endpoint)
+}
+
+func (m *member) getReplica() ReplicaID {
+	return m.replica
+}
+
+func (m *member) getEndPoint() Endpoint {
+	return m.endpoint
+}
+
+func (m *member) getID() string {
+	return m.id
+}
+
+func (m *member) getServiceType() string {
+	return m.serviceType
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
index d21fdd5..fc2334d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
@@ -19,6 +19,8 @@
 	"context"
 	"errors"
 	"fmt"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
 	"reflect"
 	"strings"
 	"sync"
@@ -34,7 +36,7 @@
 
 const (
 	DefaultMaxRetries     = 3
-	DefaultRequestTimeout = 10000 // 10000 milliseconds - to handle a wider latency range
+	DefaultRequestTimeout = 60000 // 60000 milliseconds - to handle a wider latency range
 )
 
 const (
@@ -66,6 +68,7 @@
 	GetDefaultTopic() *Topic
 	DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error
 	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
+	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
 	SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error
 	SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error
 	UnSubscribeFromRequestHandler(topic Topic) error
@@ -189,9 +192,15 @@
 	kp.doneOnce.Do(func() { close(kp.doneCh) })
 	// TODO : Perform cleanup
 	kp.kafkaClient.Stop()
-	//kp.deleteAllTopicRequestHandlerChannelMap()
-	//kp.deleteAllTopicResponseChannelMap()
-	//kp.deleteAllTransactionIdToChannelMap()
+	err := kp.deleteAllTopicRequestHandlerChannelMap()
+	if err != nil {
+		logger.Errorw("failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+	}
+	err = kp.deleteAllTopicResponseChannelMap()
+	if err != nil {
+		logger.Errorw("failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+	}
+	kp.deleteAllTransactionIdToChannelMap()
 }
 
 func (kp *interContainerProxy) GetDefaultTopic() *Topic {
@@ -240,6 +249,104 @@
 	return nil
 }
 
+// InvokeAsyncRPC is used to make an RPC request asynchronously
+func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
+
+	logger.Debugw("InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.GetDefaultTopic()
+	}
+
+	chnl := make(chan *RpcResponse)
+
+	go func() {
+
+		// once we're done,
+		// close the response channel
+		defer close(chnl)
+
+		var err error
+		var protoRequest *ic.InterContainerMessage
+
+		// Encode the request
+		protoRequest, err = encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
+		if err != nil {
+			logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+			chnl <- NewResponse(RpcFormattingError, err, nil)
+			return
+		}
+
+		// Subscribe for response, if needed, before sending request
+		var ch <-chan *ic.InterContainerMessage
+		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// Send request - if the topic is formatted with a device Id then we will send the request using a
+		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+		logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+
+		// if the message is not sent on kafka publish an event an close the channel
+		if err = kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// if the client is not waiting for a response send the ack and close the channel
+		chnl <- NewResponse(RpcSent, nil, nil)
+		if !waitForResponse {
+			return
+		}
+
+		defer func() {
+			// Remove the subscription for a response on return
+			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
+				logger.Warnw("invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
+			}
+		}()
+
+		// Wait for response as well as timeout or cancellation
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
+			}
+			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			if responseBody, err := decodeResponse(msg); err != nil {
+				chnl <- NewResponse(RpcReply, err, nil)
+			} else {
+				if responseBody.Success {
+					chnl <- NewResponse(RpcReply, nil, responseBody.Result)
+				} else {
+					// response body contains an error
+					unpackErr := &ic.Error{}
+					if err := ptypes.UnmarshalAny(responseBody.Result, unpackErr); err != nil {
+						chnl <- NewResponse(RpcReply, err, nil)
+					} else {
+						chnl <- NewResponse(RpcReply, status.Error(codes.Internal, unpackErr.Reason), nil)
+					}
+				}
+			}
+		case <-ctx.Done():
+			logger.Errorw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
+			chnl <- NewResponse(RpcTimeout, err, nil)
+		case <-kp.doneCh:
+			chnl <- NewResponse(RpcSystemClosing, nil, nil)
+			logger.Warnw("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+		}
+	}()
+	return chnl
+}
+
 // InvokeRPC is used to send a request to a given topic
 func (kp *interContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
 	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
@@ -272,7 +379,14 @@
 	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
 	//key := GetDeviceIdFromTopic(*toTopic)
 	logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
-	go kp.kafkaClient.Send(protoRequest, toTopic, key)
+	go func() {
+		if err := kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
+			logger.Errorw("send-failed", log.Fields{
+				"topic": toTopic,
+				"key":   key,
+				"error": err})
+		}
+	}()
 
 	if waitForResponse {
 		// Create a child context based on the parent context, if any
@@ -287,7 +401,13 @@
 
 		// Wait for response as well as timeout or cancellation
 		// Remove the subscription for a response on return
-		defer kp.unSubscribeForResponse(protoRequest.Header.Id)
+		defer func() {
+			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
+				logger.Errorw("response-unsubscribe-failed", log.Fields{
+					"id":    protoRequest.Header.Id,
+					"error": err})
+			}
+		}()
 		select {
 		case msg, ok := <-ch:
 			if !ok {
@@ -378,23 +498,6 @@
 	return kp.deleteFromTopicRequestHandlerChannelMap(topic.Name)
 }
 
-// setupTopicResponseChannelMap sets up single consumers channel that will act as a broadcast channel for all
-// responses from that topic.
-func (kp *interContainerProxy) setupTopicResponseChannelMap(topic string, arg <-chan *ic.InterContainerMessage) {
-	kp.lockTopicResponseChannelMap.Lock()
-	defer kp.lockTopicResponseChannelMap.Unlock()
-	if _, exist := kp.topicToResponseChannelMap[topic]; !exist {
-		kp.topicToResponseChannelMap[topic] = arg
-	}
-}
-
-func (kp *interContainerProxy) isTopicSubscribedForResponse(topic string) bool {
-	kp.lockTopicResponseChannelMap.RLock()
-	defer kp.lockTopicResponseChannelMap.RUnlock()
-	_, exist := kp.topicToResponseChannelMap[topic]
-	return exist
-}
-
 func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(topic string) error {
 	kp.lockTopicResponseChannelMap.Lock()
 	defer kp.lockTopicResponseChannelMap.Unlock()
@@ -407,22 +510,31 @@
 		delete(kp.topicToResponseChannelMap, topic)
 		return err
 	} else {
-		return errors.New(fmt.Sprintf("%s-Topic-not-found", topic))
+		return fmt.Errorf("%s-Topic-not-found", topic)
 	}
 }
 
+// nolint: unused
 func (kp *interContainerProxy) deleteAllTopicResponseChannelMap() error {
+	logger.Debug("delete-all-topic-response-channel")
 	kp.lockTopicResponseChannelMap.Lock()
 	defer kp.lockTopicResponseChannelMap.Unlock()
-	var err error
-	for topic, _ := range kp.topicToResponseChannelMap {
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToResponseChannelMap {
 		// Unsubscribe to this topic first - this will close the subscribed channel
-		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
 			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToResponseChannelMap, topic)
 		}
-		delete(kp.topicToResponseChannelMap, topic)
 	}
-	return err
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
 }
 
 func (kp *interContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
@@ -438,26 +550,37 @@
 	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
 	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
 		// Close the kafka client client first by unsubscribing to this topic
-		kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch)
+		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			return err
+		}
 		delete(kp.topicToRequestHandlerChannelMap, topic)
 		return nil
 	} else {
-		return errors.New(fmt.Sprintf("%s-Topic-not-found", topic))
+		return fmt.Errorf("%s-Topic-not-found", topic)
 	}
 }
 
+// nolint: unused
 func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap() error {
+	logger.Debug("delete-all-topic-request-channel")
 	kp.lockTopicRequestHandlerChannelMap.Lock()
 	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	var err error
-	for topic, _ := range kp.topicToRequestHandlerChannelMap {
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToRequestHandlerChannelMap {
 		// Close the kafka client client first by unsubscribing to this topic
-		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
 			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToRequestHandlerChannelMap, topic)
 		}
-		delete(kp.topicToRequestHandlerChannelMap, topic)
 	}
-	return err
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
 }
 
 func (kp *interContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
@@ -489,7 +612,9 @@
 	}
 }
 
+// nolint: unused
 func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap() {
+	logger.Debug("delete-all-transaction-id-channel-map")
 	kp.lockTransactionIdToChannelMap.Lock()
 	defer kp.lockTransactionIdToChannelMap.Unlock()
 	for key, value := range kp.transactionIdToChannelMap {
@@ -575,11 +700,12 @@
 	// Go over all returned values
 	var marshalledReturnedVal *any.Any
 	var err error
-	for _, returnVal := range returnedValues {
-		if marshalledReturnedVal, err = encodeReturnedValue(returnVal); err != nil {
+
+	// for now we support only 1 returned value - (excluding the error)
+	if len(returnedValues) > 0 {
+		if marshalledReturnedVal, err = encodeReturnedValue(returnedValues[0]); err != nil {
 			logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
 		}
-		break // for now we support only 1 returned value - (excluding the error)
 	}
 
 	responseBody := &ic.InterContainerResponseBody{
@@ -730,7 +856,14 @@
 			key := msg.Header.KeyTopic
 			logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
 			// TODO: handle error response.
-			go kp.kafkaClient.Send(icm, replyTopic, key)
+			go func() {
+				if err := kp.kafkaClient.Send(icm, replyTopic, key); err != nil {
+					logger.Errorw("send-reply-failed", log.Fields{
+						"topic": replyTopic,
+						"key":   key,
+						"error": err})
+				}
+			}()
 		}
 	} else if msg.Header.Type == ic.MessageType_RESPONSE {
 		logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
@@ -767,7 +900,8 @@
 
 	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
 	// broadcast any message for this topic to all channels waiting on it.
-	ch := make(chan *ic.InterContainerMessage)
+	// Set channel size to 1 to prevent deadlock, see VOL-2708
+	ch := make(chan *ic.InterContainerMessage, 1)
 	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
 
 	return ch, nil
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
index c0c16f9..deb72fd 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
@@ -32,8 +32,6 @@
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
-type returnErrorFunction func() error
-
 // consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
 // topic, the consumer(s) broadcasts the message to all the listening channels.   The consumer can be a partition
 //consumer or a group consumer
@@ -48,7 +46,6 @@
 // SaramaClient represents the messaging proxy
 type SaramaClient struct {
 	cAdmin                        sarama.ClusterAdmin
-	client                        sarama.Client
 	KafkaHost                     string
 	KafkaPort                     int
 	producer                      sarama.AsyncProducer
@@ -478,7 +475,7 @@
 			logger.Info("update-liveness-channel-because-change")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
-		} else if time.Now().Sub(sc.lastLivenessTime) > sc.livenessChannelInterval {
+		} else if time.Since(sc.lastLivenessTime) > sc.livenessChannelInterval {
 			logger.Info("update-liveness-channel-because-interval")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
@@ -558,7 +555,7 @@
 	// ascertain the value interface type is a proto.Message
 	if protoMsg, ok = msg.(proto.Message); !ok {
 		logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
-		return errors.New(fmt.Sprintf("not-a-proto-msg-%s", msg))
+		return fmt.Errorf("not-a-proto-msg-%s", msg)
 	}
 
 	var marshalled []byte
@@ -740,14 +737,6 @@
 	}
 }
 
-func (sc *SaramaClient) deleteFromTopicToConsumerChannelMap(id string) {
-	sc.lockTopicToConsumerChannelMap.Lock()
-	defer sc.lockTopicToConsumerChannelMap.Unlock()
-	if _, exist := sc.topicToConsumerChannelMap[id]; exist {
-		delete(sc.topicToConsumerChannelMap, id)
-	}
-}
-
 func (sc *SaramaClient) getConsumerChannel(topic *Topic) *consumerChannels {
 	sc.lockTopicToConsumerChannelMap.RLock()
 	defer sc.lockTopicToConsumerChannelMap.RUnlock()
@@ -838,24 +827,6 @@
 	return nil
 }
 
-func (sc *SaramaClient) clearConsumerChannelMap() error {
-	sc.lockTopicToConsumerChannelMap.Lock()
-	defer sc.lockTopicToConsumerChannelMap.Unlock()
-	var err error
-	for topic, consumerCh := range sc.topicToConsumerChannelMap {
-		for _, ch := range consumerCh.channels {
-			// Channel will be closed in the removeChannel method
-			removeChannel(consumerCh.channels, ch)
-		}
-		if errTemp := closeConsumers(consumerCh.consumers); errTemp != nil {
-			err = errTemp
-		}
-		//err = consumerCh.consumers.Close()
-		delete(sc.topicToConsumerChannelMap, topic)
-	}
-	return err
-}
-
 //createPublisher creates the publisher which is used to send a message onto kafka
 func (sc *SaramaClient) createPublisher() error {
 	// This Creates the publisher
@@ -1082,7 +1053,13 @@
 	sc.addTopicToConsumerChannelMap(topic.Name, cc)
 
 	//Start a consumers to listen on that specific topic
-	go sc.startConsumers(topic)
+	go func() {
+		if err := sc.startConsumers(topic); err != nil {
+			logger.Errorw("start-consumers-failed", log.Fields{
+				"topic": topic,
+				"error": err})
+		}
+	}()
 
 	return consumerListeningChannel, nil
 }
@@ -1109,7 +1086,13 @@
 	sc.addTopicToConsumerChannelMap(topic.Name, cc)
 
 	//Start a consumers to listen on that specific topic
-	go sc.startConsumers(topic)
+	go func() {
+		if err := sc.startConsumers(topic); err != nil {
+			logger.Errorw("start-consumers-failed", log.Fields{
+				"topic": topic,
+				"error": err})
+		}
+	}()
 
 	return consumerListeningChannel, nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
index 0cb9535..bdc615f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
@@ -15,7 +15,10 @@
  */
 package kafka
 
-import "strings"
+import (
+	"github.com/golang/protobuf/ptypes/any"
+	"strings"
+)
 
 const (
 	TopicSeparator = "_"
@@ -36,6 +39,31 @@
 	Value interface{}
 }
 
+type RpcMType int
+
+const (
+	RpcFormattingError RpcMType = iota
+	RpcSent
+	RpcReply
+	RpcTimeout
+	RpcTransportError
+	RpcSystemClosing
+)
+
+type RpcResponse struct {
+	MType RpcMType
+	Err   error
+	Reply *any.Any
+}
+
+func NewResponse(messageType RpcMType, err error, body *any.Any) *RpcResponse {
+	return &RpcResponse{
+		MType: messageType,
+		Err:   err,
+		Reply: body,
+	}
+}
+
 // TODO:  Remove and provide better may to get the device id
 // GetDeviceIdFromTopic extract the deviceId from the topic name.  The topic name is formatted either as:
 //			<any string> or <any string>_<deviceId>.  The device Id is 24 characters long.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
index 43567e3..d0169bd 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
@@ -50,9 +50,11 @@
 	"strings"
 )
 
+type LogLevel int8
+
 const (
 	// DebugLevel logs a message at debug level
-	DebugLevel = iota
+	DebugLevel = LogLevel(iota)
 	// InfoLevel logs a message at info level
 	InfoLevel
 	// WarnLevel logs a message at warning level
@@ -106,10 +108,10 @@
 	Warningf(string, ...interface{})
 
 	// V reports whether verbosity level l is at least the requested verbose level.
-	V(l int) bool
+	V(l LogLevel) bool
 
 	//Returns the log level of this specific logger
-	GetLogLevel() int
+	GetLogLevel() LogLevel
 }
 
 // Fields is used as key-value pairs for structured logging
@@ -127,7 +129,7 @@
 	packageName string
 }
 
-func intToAtomicLevel(l int) zp.AtomicLevel {
+func logLevelToAtomicLevel(l LogLevel) zp.AtomicLevel {
 	switch l {
 	case DebugLevel:
 		return zp.NewAtomicLevelAt(zc.DebugLevel)
@@ -143,7 +145,7 @@
 	return zp.NewAtomicLevelAt(zc.ErrorLevel)
 }
 
-func intToLevel(l int) zc.Level {
+func logLevelToLevel(l LogLevel) zc.Level {
 	switch l {
 	case DebugLevel:
 		return zc.DebugLevel
@@ -159,7 +161,7 @@
 	return zc.ErrorLevel
 }
 
-func levelToInt(l zc.Level) int {
+func levelToLogLevel(l zc.Level) LogLevel {
 	switch l {
 	case zc.DebugLevel:
 		return DebugLevel
@@ -175,25 +177,41 @@
 	return ErrorLevel
 }
 
-func StringToInt(l string) int {
-	switch l {
+func StringToLogLevel(l string) (LogLevel, error) {
+	switch strings.ToUpper(l) {
 	case "DEBUG":
-		return DebugLevel
+		return DebugLevel, nil
 	case "INFO":
-		return InfoLevel
+		return InfoLevel, nil
 	case "WARN":
-		return WarnLevel
+		return WarnLevel, nil
 	case "ERROR":
-		return ErrorLevel
+		return ErrorLevel, nil
 	case "FATAL":
-		return FatalLevel
+		return FatalLevel, nil
 	}
-	return ErrorLevel
+	return 0, errors.New("Given LogLevel is invalid : " + l)
 }
 
-func getDefaultConfig(outputType string, level int, defaultFields Fields) zp.Config {
+func LogLevelToString(l LogLevel) (string, error) {
+	switch l {
+	case DebugLevel:
+		return "DEBUG", nil
+	case InfoLevel:
+		return "INFO", nil
+	case WarnLevel:
+		return "WARN", nil
+	case ErrorLevel:
+		return "ERROR", nil
+	case FatalLevel:
+		return "FATAL", nil
+	}
+	return "", errors.New("Given LogLevel is invalid " + string(l))
+}
+
+func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
 	return zp.Config{
-		Level:            intToAtomicLevel(level),
+		Level:            logLevelToAtomicLevel(level),
 		Encoding:         outputType,
 		Development:      true,
 		OutputPaths:      []string{"stdout"},
@@ -203,6 +221,7 @@
 			LevelKey:       "level",
 			MessageKey:     "msg",
 			TimeKey:        "ts",
+			CallerKey:      "caller",
 			StacktraceKey:  "stacktrace",
 			LineEnding:     zc.DefaultLineEnding,
 			EncodeLevel:    zc.LowercaseLevelEncoder,
@@ -215,11 +234,11 @@
 
 // SetLogger needs to be invoked before the logger API can be invoked.  This function
 // initialize the default logger (zap's sugaredlogger)
-func SetDefaultLogger(outputType string, level int, defaultFields Fields) (Logger, error) {
+func SetDefaultLogger(outputType string, level LogLevel, defaultFields Fields) (Logger, error) {
 	// Build a custom config using zap
 	cfg = getDefaultConfig(outputType, level, defaultFields)
 
-	l, err := cfg.Build()
+	l, err := cfg.Build(zp.AddCallerSkip(1))
 	if err != nil {
 		return nil, err
 	}
@@ -241,7 +260,7 @@
 // be available to it, notably log tracing with filename.functionname.linenumber annotation.
 //
 // pkgNames parameter should be used for testing only as this function detects the caller's package.
-func AddPackage(outputType string, level int, defaultFields Fields, pkgNames ...string) (Logger, error) {
+func AddPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (Logger, error) {
 	if cfgs == nil {
 		cfgs = make(map[string]zp.Config)
 	}
@@ -264,7 +283,7 @@
 
 	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
 
-	l, err := cfgs[pkgName].Build()
+	l, err := cfgs[pkgName].Build(zp.AddCallerSkip(1))
 	if err != nil {
 		return nil, err
 	}
@@ -286,16 +305,14 @@
 			}
 			cfg.InitialFields[k] = v
 		}
-		l, err := cfg.Build()
+		l, err := cfg.Build(zp.AddCallerSkip(1))
 		if err != nil {
 			return err
 		}
 
-		loggers[pkgName] = &logger{
-			log:         l.Sugar(),
-			parent:      l,
-			packageName: pkgName,
-		}
+		// Update the existing zap logger instance
+		loggers[pkgName].log = l.Sugar()
+		loggers[pkgName].parent = l
 	}
 	return nil
 }
@@ -311,19 +328,16 @@
 	return keys
 }
 
-// UpdateLogger deletes the logger associated with a caller's package and creates a new logger with the
-// defaultFields.  If a calling package is holding on to a Logger reference obtained from AddPackage invocation, then
-// that package needs to invoke UpdateLogger if it needs to make changes to the default fields and obtain a new logger
-// reference
-func UpdateLogger(defaultFields Fields) (Logger, error) {
+// UpdateLogger updates the logger associated with a caller's package with supplied defaultFields
+func UpdateLogger(defaultFields Fields) error {
 	pkgName, _, _, _ := getCallerInfo()
 	if _, exist := loggers[pkgName]; !exist {
-		return nil, errors.New(fmt.Sprintf("package-%s-not-registered", pkgName))
+		return fmt.Errorf("package-%s-not-registered", pkgName)
 	}
 
 	// Build a new logger
 	if _, exist := cfgs[pkgName]; !exist {
-		return nil, errors.New(fmt.Sprintf("config-%s-not-registered", pkgName))
+		return fmt.Errorf("config-%s-not-registered", pkgName)
 	}
 
 	cfg := cfgs[pkgName]
@@ -333,21 +347,19 @@
 		}
 		cfg.InitialFields[k] = v
 	}
-	l, err := cfg.Build()
+	l, err := cfg.Build(zp.AddCallerSkip(1))
 	if err != nil {
-		return nil, err
+		return err
 	}
 
-	// Set the logger
-	loggers[pkgName] = &logger{
-		log:         l.Sugar(),
-		parent:      l,
-		packageName: pkgName,
-	}
-	return loggers[pkgName], nil
+	// Update the existing zap logger instance
+	loggers[pkgName].log = l.Sugar()
+	loggers[pkgName].parent = l
+
+	return nil
 }
 
-func setLevel(cfg zp.Config, level int) {
+func setLevel(cfg zp.Config, level LogLevel) {
 	switch level {
 	case DebugLevel:
 		cfg.Level.SetLevel(zc.DebugLevel)
@@ -366,7 +378,7 @@
 
 //SetPackageLogLevel dynamically sets the log level of a given package to level.  This is typically invoked at an
 // application level during debugging
-func SetPackageLogLevel(packageName string, level int) {
+func SetPackageLogLevel(packageName string, level LogLevel) {
 	// Get proper config
 	if cfg, ok := cfgs[packageName]; ok {
 		setLevel(cfg, level)
@@ -374,7 +386,7 @@
 }
 
 //SetAllLogLevel sets the log level of all registered packages to level
-func SetAllLogLevel(level int) {
+func SetAllLogLevel(level LogLevel) {
 	// Get proper config
 	for _, cfg := range cfgs {
 		setLevel(cfg, level)
@@ -382,7 +394,7 @@
 }
 
 //GetPackageLogLevel returns the current log level of a package.
-func GetPackageLogLevel(packageName ...string) (int, error) {
+func GetPackageLogLevel(packageName ...string) (LogLevel, error) {
 	var name string
 	if len(packageName) == 1 {
 		name = packageName[0]
@@ -390,21 +402,21 @@
 		name, _, _, _ = getCallerInfo()
 	}
 	if cfg, ok := cfgs[name]; ok {
-		return levelToInt(cfg.Level.Level()), nil
+		return levelToLogLevel(cfg.Level.Level()), nil
 	}
-	return 0, errors.New(fmt.Sprintf("unknown-package-%s", name))
+	return 0, fmt.Errorf("unknown-package-%s", name)
 }
 
 //GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
-func GetDefaultLogLevel() int {
-	return levelToInt(cfg.Level.Level())
+func GetDefaultLogLevel() LogLevel {
+	return levelToLogLevel(cfg.Level.Level())
 }
 
 //SetLogLevel sets the log level for the logger corresponding to the caller's package
-func SetLogLevel(level int) error {
+func SetLogLevel(level LogLevel) error {
 	pkgName, _, _, _ := getCallerInfo()
 	if _, exist := cfgs[pkgName]; !exist {
-		return errors.New(fmt.Sprintf("unregistered-package-%s", pkgName))
+		return fmt.Errorf("unregistered-package-%s", pkgName)
 	}
 	cfg := cfgs[pkgName]
 	setLevel(cfg, level)
@@ -412,7 +424,7 @@
 }
 
 //SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
-func SetDefaultLogLevel(level int) {
+func SetDefaultLogLevel(level LogLevel) {
 	setLevel(cfg, level)
 }
 
@@ -487,11 +499,11 @@
 }
 
 func getPackageLevelSugaredLogger() *zp.SugaredLogger {
-	pkgName, fileName, funcName, line := getCallerInfo()
+	pkgName, _, _, _ := getCallerInfo()
 	if _, exist := loggers[pkgName]; exist {
-		return loggers[pkgName].log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+		return loggers[pkgName].log
 	}
-	return defaultLogger.log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+	return defaultLogger.log
 }
 
 func getPackageLevelLogger() Logger {
@@ -641,13 +653,13 @@
 }
 
 // V reports whether verbosity level l is at least the requested verbose level.
-func (l logger) V(level int) bool {
-	return l.parent.Core().Enabled(intToLevel(level))
+func (l logger) V(level LogLevel) bool {
+	return l.parent.Core().Enabled(logLevelToLevel(level))
 }
 
 // GetLogLevel returns the current level of the logger
-func (l logger) GetLogLevel() int {
-	return levelToInt(cfgs[l.packageName].Level.Level())
+func (l logger) GetLogLevel() LogLevel {
+	return levelToLogLevel(cfgs[l.packageName].Level.Level())
 }
 
 // With returns a logger initialized with the key-value pairs
@@ -776,11 +788,11 @@
 }
 
 // V reports whether verbosity level l is at least the requested verbose level.
-func V(level int) bool {
+func V(level LogLevel) bool {
 	return getPackageLevelLogger().V(level)
 }
 
 //GetLogLevel returns the log level of the invoking package
-func GetLogLevel() int {
+func GetLogLevel() LogLevel {
 	return getPackageLevelLogger().GetLogLevel()
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
new file mode 100644
index 0000000..211419d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
index 9f00953..e89d5bc 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
@@ -118,7 +118,7 @@
 	for _, name := range names {
 		if _, ok := p.status[name]; !ok {
 			p.status[name] = ServiceStatusUnknown
-			log.Debugw("probe-service-registered", log.Fields{"service-name": name})
+			logger.Debugw("probe-service-registered", log.Fields{"service-name": name})
 		}
 	}
 
@@ -161,7 +161,7 @@
 	} else {
 		p.isHealthy = defaultHealthFunc(p.status)
 	}
-	log.Debugw("probe-service-status-updated",
+	logger.Debugw("probe-service-status-updated",
 		log.Fields{
 			"service-name": name,
 			"status":       status.String(),
@@ -231,15 +231,26 @@
 	p.mutex.RLock()
 	defer p.mutex.RUnlock()
 	w.Header().Set("Content-Type", "application/json")
-	w.Write([]byte("{"))
+	if _, err := w.Write([]byte("{")); err != nil {
+		logger.Errorw("write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
 	comma := ""
 	for c, s := range p.status {
-		w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String())))
+		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+			logger.Errorw("write-response", log.Fields{"error": err})
+			w.WriteHeader(http.StatusInternalServerError)
+			return
+		}
 		comma = ", "
 	}
-	w.Write([]byte("}"))
+	if _, err := w.Write([]byte("}")); err != nil {
+		logger.Errorw("write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
 	w.WriteHeader(http.StatusOK)
-
 }
 
 // ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
@@ -258,7 +269,7 @@
 		Addr:    address,
 		Handler: mux,
 	}
-	log.Fatal(s.ListenAndServe())
+	logger.Fatal(s.ListenAndServe())
 }
 
 func (p *Probe) IsReady() bool {