[VOL-4183] Multi UNI

voltha-lib-go version changed

Unused constant removed

Before sending trap flows to OLT device, rw-core sets the correct meterId to flow.

Change-Id: Id16afc685161dee560b62cc6c045c44f0bc4a427
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
new file mode 100644
index 0000000..c514d6d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"context"
+
+	"github.com/golang/protobuf/proto"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+// AdapterProxy interface for AdapterProxy implementation.
+type AdapterProxy interface {
+	SendInterAdapterMessage(ctx context.Context,
+		msg proto.Message,
+		msgType ic.InterAdapterMessageType_Types,
+		fromAdapter string,
+		toAdapter string,
+		toDeviceID string,
+		proxyDeviceID string,
+		messageID string) error
+	TechProfileInstanceRequest(ctx context.Context,
+		tpPath string,
+		ponIntfID uint32,
+		onuID uint32,
+		uniID uint32,
+		fromAdapter string,
+		toAdapter string,
+		toDeviceID string,
+		proxyDeviceID string) (*ic.InterAdapterTechProfileDownloadMessage, error)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
new file mode 100644
index 0000000..36939bd
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"context"
+
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// CoreProxy interface for voltha-go coreproxy.
+type CoreProxy interface {
+	UpdateCoreReference(deviceID string, coreReference string)
+	DeleteCoreReference(deviceID string)
+	RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error
+	DeviceUpdate(ctx context.Context, device *voltha.Device) error
+	PortCreated(ctx context.Context, deviceID string, port *voltha.Port) error
+	PortsStateUpdate(ctx context.Context, deviceID string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error
+	DeleteAllPorts(ctx context.Context, deviceID string) error
+	GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error)
+	ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error)
+	DeviceStateUpdate(ctx context.Context, deviceID string,
+		connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error
+
+	DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error
+	ChildDeviceDetected(ctx context.Context, parentDeviceID string, parentPortNo int,
+		childDeviceType string, channelID int, vendorID string, serialNumber string, onuID int64) (*voltha.Device, error)
+
+	ChildDevicesLost(ctx context.Context, parentDeviceID string) error
+	ChildDevicesDetected(ctx context.Context, parentDeviceID string) error
+	GetDevice(ctx context.Context, parentDeviceID string, deviceID string) (*voltha.Device, error)
+	GetChildDevice(ctx context.Context, parentDeviceID string, kwargs map[string]interface{}) (*voltha.Device, error)
+	GetChildDevices(ctx context.Context, parentDeviceID string) (*voltha.Devices, error)
+	SendPacketIn(ctx context.Context, deviceID string, port uint32, pktPayload []byte) error
+	DeviceReasonUpdate(ctx context.Context, deviceID string, deviceReason string) error
+	PortStateUpdate(ctx context.Context, deviceID string, pType voltha.Port_PortType, portNo uint32,
+		operStatus voltha.OperStatus_Types) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
new file mode 100644
index 0000000..fc31041
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"google.golang.org/grpc/status"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+type AdapterProxy struct {
+	kafkaICProxy kafka.InterContainerProxy
+	coreTopic    string
+	endpointMgr  kafka.EndpointManager
+}
+
+func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
+	proxy := AdapterProxy{
+		kafkaICProxy: kafkaProxy,
+		coreTopic:    coreTopic,
+		endpointMgr:  kafka.NewEndpointManager(backend),
+	}
+	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
+	return &proxy
+}
+
+func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
+	msg proto.Message,
+	msgType ic.InterAdapterMessageType_Types,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string,
+	messageId string) error {
+	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	//Marshal the message
+	var marshalledMsg *any.Any
+	var err error
+	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
+		return err
+	}
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
+		return err
+	}
+
+	//Build the inter adapter message
+	header := &ic.InterAdapterHeader{
+		Type:          msgType,
+		FromTopic:     fromAdapter,
+		ToTopic:       string(endpoint),
+		ToDeviceId:    toDeviceId,
+		ProxyDeviceId: proxyDeviceId,
+	}
+	if messageId != "" {
+		header.Id = messageId
+	} else {
+		header.Id = uuid.New().String()
+	}
+	header.Timestamp = ptypes.TimestampNow()
+	iaMsg := &ic.InterAdapterMessage{
+		Header: header,
+		Body:   marshalledMsg,
+	}
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: iaMsg,
+	}
+
+	topic := kafka.Topic{Name: string(endpoint)}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_inter_adapter_message"
+
+	// Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
+	ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *AdapterProxy) TechProfileInstanceRequest(ctx context.Context,
+	tpPath string,
+	parentPonPort uint32,
+	onuID uint32,
+	uniID uint32,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+	logger.Debugw(ctx, "sending-tech-profile-instance-request-message", log.Fields{"from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
+		return nil, err
+	}
+
+	//Build the inter adapter message
+	tpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{
+		TpInstancePath: tpPath,
+		ParentDeviceId: toDeviceId,
+		ParentPonPort:  parentPonPort,
+		OnuId:          onuID,
+		UniId:          uniID,
+	}
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: tpReqMsg,
+	}
+
+	topic := kafka.Topic{Name: string(endpoint)}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_tech_profile_instance_request"
+
+	ctx = context.WithValue(ctx, "inter-adapter-tp-req-msg", tpPath)
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	if success {
+		tpDwnldMsg := &ic.InterAdapterTechProfileDownloadMessage{}
+		if err := ptypes.UnmarshalAny(result, tpDwnldMsg); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, err
+		}
+		return tpDwnldMsg, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "TechProfileInstanceRequest-return", log.Fields{"tpPath": tpPath, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
new file mode 100644
index 0000000..98085bb
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
new file mode 100644
index 0000000..589d951
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
@@ -0,0 +1,689 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"sync"
+
+	"github.com/golang/protobuf/ptypes"
+	a "github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type CoreProxy struct {
+	kafkaICProxy        kafka.InterContainerProxy
+	adapterTopic        string
+	coreTopic           string
+	deviceIdCoreMap     map[string]string
+	lockDeviceIdCoreMap sync.RWMutex
+}
+
+func NewCoreProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
+	var proxy CoreProxy
+	proxy.kafkaICProxy = kafkaProxy
+	proxy.adapterTopic = adapterTopic
+	proxy.coreTopic = coreTopic
+	proxy.deviceIdCoreMap = make(map[string]string)
+	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
+	logger.Debugw(ctx, "TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+
+	return &proxy
+}
+
+func unPackResponse(ctx context.Context, rpc string, deviceId string, success bool, response *a.Any) error {
+	if success {
+		return nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "response", log.Fields{"rpc": rpc, "device-id": deviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
+	}
+}
+
+// UpdateCoreReference adds or update a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) UpdateCoreReference(deviceId string, coreReference string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	ap.deviceIdCoreMap[deviceId] = coreReference
+}
+
+// DeleteCoreReference removes a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) DeleteCoreReference(deviceId string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	delete(ap.deviceIdCoreMap, deviceId)
+}
+
+func (ap *CoreProxy) getCoreTopic(deviceId string) kafka.Topic {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+
+	if t, exist := ap.deviceIdCoreMap[deviceId]; exist {
+		return kafka.Topic{Name: t}
+	}
+
+	return kafka.Topic{Name: ap.coreTopic}
+}
+
+func (ap *CoreProxy) getAdapterTopic(args ...string) kafka.Topic {
+	return kafka.Topic{Name: ap.adapterTopic}
+}
+
+func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
+	logger.Debugw(ctx, "registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	rpc := "Register"
+	topic := kafka.Topic{Name: ap.coreTopic}
+	replyToTopic := ap.getAdapterTopic()
+	args := make([]*kafka.KVArg, 2)
+
+	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
+		logger.Fatal(ctx, "totalReplicas can't be 0, since you're here you have at least one")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
+		logger.Fatal(ctx, "currentReplica can't be 0, it has to start from 1")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
+		// if the adapter is not setting these fields they default to 0,
+		// in that case it means the adapter is not ready to be scaled and thus it defaults
+		// to a single instance
+		adapter.CurrentReplica = 1
+		adapter.TotalReplicas = 1
+	}
+
+	if adapter.CurrentReplica > adapter.TotalReplicas {
+		logger.Fatalf(ctx, "CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
+			adapter.CurrentReplica, adapter.TotalReplicas)
+	}
+
+	args[0] = &kafka.KVArg{
+		Key:   "adapter",
+		Value: adapter,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "deviceTypes",
+		Value: deviceTypes,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
+	logger.Debugw(ctx, "Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
+	logger.Debugw(ctx, "DeviceUpdate", log.Fields{"device-id": device.Id})
+	rpc := "DeviceUpdate"
+	toTopic := ap.getCoreTopic(device.Id)
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
+	logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"device-id": device.Id, "success": success})
+	return unPackResponse(ctx, rpc, device.Id, success, result)
+}
+
+func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
+	logger.Debugw(ctx, "PortCreated", log.Fields{"portNo": port.PortNo})
+	rpc := "PortCreated"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: port,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortCreated-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "PortsStateUpdate", log.Fields{"device-id": deviceId})
+	rpc := "PortsStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceId},
+	}, {
+		Key:   "port_type_filter",
+		Value: &ic.IntType{Val: int64(portTypeFilter)},
+	}, {
+		Key:   "oper_status",
+		Value: &ic.IntType{Val: int64(operStatus)},
+	}}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
+	logger.Debugw(ctx, "DeleteAllPorts", log.Fields{"device-id": deviceId})
+	rpc := "DeleteAllPorts"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error) {
+	logger.Debugw(ctx, "GetDevicePort", log.Fields{"device-id": deviceID})
+	rpc := "GetDevicePort"
+
+	toTopic := ap.getCoreTopic(deviceID)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceID},
+	}, {
+		Key:   "port_no",
+		Value: &ic.IntType{Val: int64(portNo)},
+	}}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+	logger.Debugw(ctx, "GetDevicePort-response", log.Fields{"device-id": deviceID, "success": success})
+
+	if success {
+		port := &voltha.Port{}
+		if err := ptypes.UnmarshalAny(result, port); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return port, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetDevicePort-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error) {
+	logger.Debugw(ctx, "ListDevicePorts", log.Fields{"device-id": deviceID})
+	rpc := "ListDevicePorts"
+
+	toTopic := ap.getCoreTopic(deviceID)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceID},
+	}}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+	logger.Debugw(ctx, "ListDevicePorts-response", log.Fields{"device-id": deviceID, "success": success})
+
+	if success {
+		ports := &voltha.Ports{}
+		if err := ptypes.UnmarshalAny(result, ports); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return ports.Items, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "ListDevicePorts-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
+	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "DeviceStateUpdate", log.Fields{"device-id": deviceId})
+	rpc := "DeviceStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+	cStatus := &ic.IntType{Val: int64(connStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "connect_status",
+		Value: cStatus,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
+	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
+	logger.Debugw(ctx, "ChildDeviceDetected", log.Fields{"parent-device-id": parentDeviceId, "channelId": channelId})
+	rpc := "ChildDeviceDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 7)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+	ppn := &ic.IntType{Val: int64(parentPortNo)}
+	args[1] = &kafka.KVArg{
+		Key:   "parent_port_no",
+		Value: ppn,
+	}
+	cdt := &ic.StrType{Val: childDeviceType}
+	args[2] = &kafka.KVArg{
+		Key:   "child_device_type",
+		Value: cdt,
+	}
+	channel := &ic.IntType{Val: int64(channelId)}
+	args[3] = &kafka.KVArg{
+		Key:   "channel_id",
+		Value: channel,
+	}
+	vId := &ic.StrType{Val: vendorId}
+	args[4] = &kafka.KVArg{
+		Key:   "vendor_id",
+		Value: vId,
+	}
+	sNo := &ic.StrType{Val: serialNumber}
+	args[5] = &kafka.KVArg{
+		Key:   "serial_number",
+		Value: sNo,
+	}
+	oId := &ic.IntType{Val: int64(onuId)}
+	args[6] = &kafka.KVArg{
+		Key:   "onu_id",
+		Value: oId,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "ChildDeviceDetected-return", log.Fields{"device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+
+}
+
+func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ChildDevicesLost", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ChildDevicesLost"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ChildDevicesDetected", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ChildDevicesDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
+	logger.Debugw(ctx, "GetDevice", log.Fields{"device-id": deviceId})
+	rpc := "GetDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
+	logger.Debugw(ctx, "GetChildDevice", log.Fields{"parent-device-id": parentDeviceId, "kwargs": kwargs})
+	rpc := "GetChildDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 4)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	var cnt uint8 = 0
+	for k, v := range kwargs {
+		cnt += 1
+		if k == "serial_number" {
+			val := &ic.StrType{Val: v.(string)}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "onu_id" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "parent_port_no" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		}
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetChildDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
+	logger.Debugw(ctx, "GetChildDevices", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "GetChildDevices"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevices := &voltha.Devices{}
+		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevices, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetChildDevices-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
+	logger.Debugw(ctx, "SendPacketIn", log.Fields{"device-id": deviceId, "port": port, "pktPayload": pktPayload})
+	rpc := "PacketIn"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	portNo := &ic.IntType{Val: int64(port)}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: portNo,
+	}
+	pkt := &ic.Packet{Payload: pktPayload}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: pkt,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
+	logger.Debugw(ctx, "DeviceReasonUpdate", log.Fields{"device-id": deviceId, "deviceReason": deviceReason})
+	rpc := "DeviceReasonUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	reason := &ic.StrType{Val: deviceReason}
+	args[1] = &kafka.KVArg{
+		Key:   "device_reason",
+		Value: reason,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeviceReason-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
+	logger.Debugw(ctx, "DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	rpc := "DevicePMConfigUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(pmConfigs.Id)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device_pm_config",
+		Value: pmConfigs,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
+	logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pmconfig-device-id": pmConfigs.Id, "success": success})
+	return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
+}
+
+func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ReconcileChildDevices", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ReconcileChildDevices"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{
+		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
+	operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "PortStateUpdate", log.Fields{"device-id": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	rpc := "PortStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 4)
+	deviceID := &voltha.ID{Id: deviceId}
+	portNo := &ic.IntType{Val: int64(portNum)}
+	portType := &ic.IntType{Val: int64(pType)}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: deviceID,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "port_type",
+		Value: portType,
+	}
+	args[3] = &kafka.KVArg{
+		Key:   "port_no",
+		Value: portNo,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
new file mode 100644
index 0000000..3b6d4f9
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+type PmMetrics struct {
+	deviceId          string
+	frequency         uint32
+	grouped           bool
+	frequencyOverride bool
+	metrics           map[string]*voltha.PmConfig
+}
+
+type PmMetricsOption func(*PmMetrics)
+
+func Frequency(frequency uint32) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequency = frequency
+	}
+}
+
+func Grouped(grouped bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.grouped = grouped
+	}
+}
+
+func FrequencyOverride(frequencyOverride bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequencyOverride = frequencyOverride
+	}
+}
+
+// UpdateFrequency will update the frequency.
+func (pm *PmMetrics) UpdateFrequency(frequency uint32) {
+	pm.frequency = frequency
+}
+
+func Metrics(pmNames []string) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.metrics = make(map[string]*voltha.PmConfig)
+		for _, name := range pmNames {
+			args.metrics[name] = &voltha.PmConfig{
+				Name:    name,
+				Type:    voltha.PmConfig_COUNTER,
+				Enabled: true,
+			}
+		}
+	}
+}
+
+func NewPmMetrics(deviceId string, opts ...PmMetricsOption) *PmMetrics {
+	pm := &PmMetrics{deviceId: deviceId}
+	for _, option := range opts {
+		option(pm)
+	}
+	return pm
+}
+
+func (pm *PmMetrics) ToPmConfigs() *voltha.PmConfigs {
+	pmConfigs := &voltha.PmConfigs{
+		Id:           pm.deviceId,
+		DefaultFreq:  pm.frequency,
+		Grouped:      pm.grouped,
+		FreqOverride: pm.frequencyOverride,
+	}
+	for _, v := range pm.metrics {
+		pmConfigs.Metrics = append(pmConfigs.Metrics, &voltha.PmConfig{Name: v.Name, Type: v.Type, Enabled: v.Enabled})
+	}
+	return pmConfigs
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
new file mode 100644
index 0000000..90f575b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
@@ -0,0 +1,1011 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"errors"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/extension"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type RequestHandlerProxy struct {
+	TestMode       bool
+	coreInstanceId string
+	adapter        adapters.IAdapter
+	coreProxy      adapterif.CoreProxy
+}
+
+func NewRequestHandlerProxy(coreInstanceId string, iadapter adapters.IAdapter, cProxy adapterif.CoreProxy) *RequestHandlerProxy {
+	var proxy RequestHandlerProxy
+	proxy.coreInstanceId = coreInstanceId
+	proxy.adapter = iadapter
+	proxy.coreProxy = cProxy
+	return &proxy
+}
+
+func (rhp *RequestHandlerProxy) Adapter_descriptor() (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Device_types() (*voltha.DeviceTypes, error) {
+	return nil, nil
+}
+
+func (rhp *RequestHandlerProxy) Health() (*voltha.HealthStatus, error) {
+	return nil, nil
+}
+
+func (rhp *RequestHandlerProxy) Adopt_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Adopt_device", log.Fields{"deviceId": device.Id})
+
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Adopt_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reconcile_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+
+	//Invoke the reconcile device API on the adapter
+	if err := rhp.adapter.Reconcile_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Abandon_device(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Disable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Disable_device API on the adapter
+	if err := rhp.adapter.Disable_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reenable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Reenable_device API on the adapter
+	if err := rhp.adapter.Reenable_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reboot_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Reboot_device API on the adapter
+	if err := rhp.adapter.Reboot_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+
+}
+
+func (rhp *RequestHandlerProxy) Self_test_device(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Delete_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the delete_device API on the adapter
+	if err := rhp.adapter.Delete_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Get_device_details(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_flows_bulk(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_bulk")
+	if len(args) < 5 {
+		logger.Warn(ctx, "Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	flows := &voltha.Flows{}
+	flowMetadata := &voltha.FlowMetadata{}
+	groups := &voltha.FlowGroups{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flows":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "groups":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_metadata":
+			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	//Invoke the bulk flow update API of the adapter
+	if err := rhp.adapter.Update_flows_bulk(ctx, device, flows, groups, flowMetadata); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_flows_incrementally(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_incrementally")
+	if len(args) < 5 {
+		logger.Warn(ctx, "Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	flows := &openflow_13.FlowChanges{}
+	flowMetadata := &voltha.FlowMetadata{}
+	groups := &openflow_13.FlowGroupChanges{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "group_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_metadata":
+			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	//Invoke the incremental flow update API of the adapter
+	if err := rhp.adapter.Update_flows_incrementally(ctx, device, flows, groups, flowMetadata); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_pm_config(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_pm_config")
+	if len(args) < 2 {
+		logger.Warn(ctx, "Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	pmConfigs := &voltha.PmConfigs{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "pm_configs":
+			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Update_pm_config", log.Fields{"device-id": device.Id, "pmConfigs": pmConfigs})
+	//Invoke the pm config update API of the adapter
+	if err := rhp.adapter.Update_pm_config(ctx, device, pmConfigs); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Receive_packet_out(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"args": args})
+	if len(args) < 3 {
+		logger.Warn(ctx, "Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &ic.StrType{}
+	egressPort := &ic.IntType{}
+	packet := &openflow_13.OfpPacketOut{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "outPort":
+			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-egressPort", log.Fields{"error": err})
+				return nil, err
+			}
+		case "packet":
+			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-packet", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"device-id": deviceId.Val, "outPort": egressPort, "packet": packet})
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Receive_packet_out(ctx, deviceId.Val, int(egressPort.Val), packet); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Suppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Unsuppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Get_ofp_device_info(ctx context.Context, args []*ic.Argument) (*ic.SwitchCapability, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"device-id": device.Id})
+
+	var cap *ic.SwitchCapability
+	var err error
+	if cap, err = rhp.adapter.Get_ofp_device_info(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"cap": cap})
+	return cap, nil
+}
+
+func (rhp *RequestHandlerProxy) Process_inter_adapter_message(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	iaMsg := &ic.InterAdapterMessage{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "msg":
+			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+
+	//Invoke the inter adapter API on the handler
+	if err := rhp.adapter.Process_inter_adapter_message(ctx, iaMsg); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Process_tech_profile_instance_request(ctx context.Context, args []*ic.Argument) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	iaTpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "msg":
+			if err := ptypes.UnmarshalAny(arg.Value, iaTpReqMsg); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Process_tech_profile_instance_request", log.Fields{"tpPath": iaTpReqMsg.TpInstancePath})
+
+	//Invoke the tech profile instance request
+	tpInst := rhp.adapter.Process_tech_profile_instance_request(ctx, iaTpReqMsg)
+
+	return tpInst, nil
+}
+
+func (rhp *RequestHandlerProxy) Download_image(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Download_image(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_image_download_status(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Get_image_download_status(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Cancel_image_download(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Cancel_image_download(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Activate_image_update(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Activate_image_update(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Revert_image_update(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Revert_image_update(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func unMarshalImageDowload(args []*ic.Argument, ctx context.Context) (*voltha.Device, *voltha.ImageDownload, error) {
+	if len(args) < 4 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, nil, err
+	}
+	device := &voltha.Device{}
+	image := &voltha.ImageDownload{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		case "request":
+			if err := ptypes.UnmarshalAny(arg.Value, image); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-image", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		}
+	}
+	return device, image, nil
+}
+
+func (rhp *RequestHandlerProxy) Enable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "enable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
+	if err != nil {
+		logger.Warnw(ctx, "enable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
+		return err
+	}
+	return rhp.adapter.Enable_port(ctx, deviceId, port)
+}
+
+func (rhp *RequestHandlerProxy) Disable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "disable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
+	if err != nil {
+		logger.Warnw(ctx, "disable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
+		return err
+	}
+	return rhp.adapter.Disable_port(ctx, deviceId, port)
+}
+
+func (rhp *RequestHandlerProxy) getEnableDisableParams(ctx context.Context, args []*ic.Argument) (string, *voltha.Port, error) {
+	logger.Debugw(ctx, "getEnableDisableParams", log.Fields{"args": args})
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return "", nil, errors.New("invalid-number-of-args")
+	}
+	deviceId := &ic.StrType{}
+	port := &voltha.Port{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return "", nil, err
+			}
+		case "port":
+			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
+				return "", nil, err
+			}
+		}
+	}
+	return deviceId.Val, port, nil
+}
+
+func (rhp *RequestHandlerProxy) Child_device_lost(ctx context.Context, args []*ic.Argument) error {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return errors.New("invalid-number-of-args")
+	}
+	childDevice := &voltha.Device{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "childDevice":
+			if err := ptypes.UnmarshalAny(arg.Value, childDevice); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-child-device", log.Fields{"error": err})
+				return err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(childDevice.ParentId, fromTopic.Val)
+	//Invoke the Child_device_lost API on the adapter
+	if err := rhp.adapter.Child_device_lost(ctx, childDevice); err != nil {
+		return status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return nil
+}
+
+func (rhp *RequestHandlerProxy) Start_omci_test(ctx context.Context, args []*ic.Argument) (*ic.TestResponse, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	// TODO: See related comment in voltha-go:adapter_proxy_go:startOmciTest()
+	//   Second argument should perhaps be uuid instead of omcitestrequest
+
+	device := &voltha.Device{}
+	request := &voltha.OmciTestRequest{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "omcitestrequest":
+			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
+	result, err := rhp.adapter.Start_omci_test(ctx, device, request)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return result, nil
+}
+func (rhp *RequestHandlerProxy) Get_ext_value(ctx context.Context, args []*ic.Argument) (*voltha.ReturnValues, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+
+	pDeviceId := &ic.StrType{}
+	device := &voltha.Device{}
+	valuetype := &ic.IntType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "pDeviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-parent-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "valuetype":
+			if err := ptypes.UnmarshalAny(arg.Value, valuetype); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-valuetype", log.Fields{"error": err})
+				return nil, err
+			}
+		default:
+			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
+		}
+	}
+
+	//Invoke the Get_value API on the adapter
+	return rhp.adapter.Get_ext_value(ctx, pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
+}
+
+func (rhp *RequestHandlerProxy) Single_get_value_request(ctx context.Context, args []*ic.Argument) (*extension.SingleGetValueResponse, error) {
+	logger.Debugw(ctx, "req handler Single_get_value_request", log.Fields{"no of args": len(args), "args": args})
+
+	if len(args) < 1 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+	singleGetvalueReq := extension.SingleGetValueRequest{}
+	errResp := func(status extension.GetValueResponse_Status, reason extension.GetValueResponse_ErrorReason) *extension.SingleGetValueResponse {
+		return &extension.SingleGetValueResponse{
+			Response: &extension.GetValueResponse{
+				Status:    status,
+				ErrReason: reason,
+			},
+		}
+	}
+	for _, arg := range args {
+		switch arg.Key {
+		case "request":
+			if err := ptypes.UnmarshalAny(arg.Value, &singleGetvalueReq); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-singleGetvalueReq", log.Fields{"error": err})
+				return errResp(extension.GetValueResponse_ERROR, extension.GetValueResponse_REASON_UNDEFINED), nil
+			}
+		default:
+			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
+		}
+	}
+	logger.Debugw(ctx, "invoke rhp.adapter.Single_get_value_request ", log.Fields{"singleGetvalueReq": singleGetvalueReq})
+	return rhp.adapter.Single_get_value_request(ctx, singleGetvalueReq)
+}
+
+func (rhp *RequestHandlerProxy) getDeviceDownloadImageRequest(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageDownloadRequest, error) {
+	logger.Debugw(ctx, "getDeviceDownloadImageRequest", log.Fields{"args": args})
+	if len(args) < 1 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+
+	deviceDownloadImageReq := ic.DeviceImageDownloadRequest{}
+
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceImageDownloadReq":
+			if err := ptypes.UnmarshalAny(arg.Value, &deviceDownloadImageReq); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	return &deviceDownloadImageReq, nil
+}
+
+func (rhp *RequestHandlerProxy) getDeviceImageRequest(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageRequest, error) {
+	logger.Debugw(ctx, "getDeviceImageRequest", log.Fields{"args": args})
+	if len(args) < 1 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+
+	deviceImageReq := ic.DeviceImageRequest{}
+
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceImageReq":
+			if err := ptypes.UnmarshalAny(arg.Value, &deviceImageReq); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	return &deviceImageReq, nil
+}
+
+func (rhp *RequestHandlerProxy) getDeviceID(ctx context.Context, args []*ic.Argument) (string, error) {
+	logger.Debugw(ctx, "getDeviceID", log.Fields{"args": args})
+
+	deviceId := &ic.StrType{}
+
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return "", err
+			}
+		}
+	}
+
+	if deviceId.Val == "" {
+		return "", errors.New("invalid argument")
+	}
+
+	return deviceId.Val, nil
+}
+
+func (rhp *RequestHandlerProxy) Download_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageDownloadReq, err := rhp.getDeviceDownloadImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownloadRsp, err := rhp.adapter.Download_onu_image(ctx, imageDownloadReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownloadRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_onu_image_status(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageStatusReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageStatus, err := rhp.adapter.Get_onu_image_status(ctx, imageStatusReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageStatus, nil
+}
+
+func (rhp *RequestHandlerProxy) Abort_onu_image_upgrade(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageAbortReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageAbortRsp, err := rhp.adapter.Abort_onu_image_upgrade(ctx, imageAbortReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageAbortRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Activate_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageActivateReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageActivateRsp, err := rhp.adapter.Activate_onu_image(ctx, imageActivateReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageActivateRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Commit_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageCommitReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageCommitRsp, err := rhp.adapter.Commit_onu_image(ctx, imageCommitReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return imageCommitRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_onu_images(ctx context.Context, args []*ic.Argument) (*voltha.OnuImages, error) {
+	deviceID, err := rhp.getDeviceID(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	onuImages, err := rhp.adapter.Get_onu_images(ctx, deviceID)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return onuImages, nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
new file mode 100644
index 0000000..35f227e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"google.golang.org/grpc/codes"
+	"math/rand"
+	"time"
+)
+
+//GetRandomSerialNumber returns a serial number formatted as "HOST:PORT"
+func GetRandomSerialNumber() string {
+	rand.Seed(time.Now().UnixNano())
+	return fmt.Sprintf("%d.%d.%d.%d:%d",
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(9000)+1000,
+	)
+}
+
+//GetRandomMacAddress returns a random mac address
+func GetRandomMacAddress() string {
+	rand.Seed(time.Now().UnixNano())
+	return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+	)
+}
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+const (
+	letterIdxBits = 6                    // 6 bits to represent a letter index
+	letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+	letterIdxMax  = 63 / letterIdxBits   // # of letter indices fitting in 63 bits
+)
+
+var src = rand.NewSource(time.Now().UnixNano())
+
+func GetRandomString(n int) string {
+	b := make([]byte, n)
+	// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
+	for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+		if remain == 0 {
+			cache, remain = src.Int63(), letterIdxMax
+		}
+		if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+			b[i] = letterBytes[idx]
+			i--
+		}
+		cache >>= letterIdxBits
+		remain--
+	}
+	return string(b)
+}
+
+func ICProxyErrorCodeToGrpcErrorCode(ctx context.Context, icErr ic.ErrorCodeCodes) codes.Code {
+	switch icErr {
+	case ic.ErrorCode_INVALID_PARAMETERS:
+		return codes.InvalidArgument
+	case ic.ErrorCode_UNSUPPORTED_REQUEST:
+		return codes.Unavailable
+	case ic.ErrorCode_DEADLINE_EXCEEDED:
+		return codes.DeadlineExceeded
+	default:
+		logger.Warnw(ctx, "cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		return codes.Internal
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
new file mode 100644
index 0000000..aca4271
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package adapters
+
+import (
+	"context"
+
+	"github.com/opencord/voltha-protos/v4/go/extension"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+//IAdapter represents the set of APIs a voltha adapter has to support.
+type IAdapter interface {
+	Adapter_descriptor(ctx context.Context) error
+	Device_types(ctx context.Context) (*voltha.DeviceTypes, error)
+	Health(ctx context.Context) (*voltha.HealthStatus, error)
+	Adopt_device(ctx context.Context, device *voltha.Device) error
+	Reconcile_device(ctx context.Context, device *voltha.Device) error
+	Abandon_device(ctx context.Context, device *voltha.Device) error
+	Disable_device(ctx context.Context, device *voltha.Device) error
+	Reenable_device(ctx context.Context, device *voltha.Device) error
+	Reboot_device(ctx context.Context, device *voltha.Device) error
+	Self_test_device(ctx context.Context, device *voltha.Device) error
+	Delete_device(ctx context.Context, device *voltha.Device) error
+	Get_device_details(ctx context.Context, device *voltha.Device) error
+	Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
+	Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
+	Update_pm_config(ctx context.Context, device *voltha.Device, pm_configs *voltha.PmConfigs) error
+	Receive_packet_out(ctx context.Context, deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
+	Suppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
+	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
+	Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage
+	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Child_device_lost(ctx context.Context, childDevice *voltha.Device) error
+	Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
+	Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
+	Single_get_value_request(ctx context.Context, request extension.SingleGetValueRequest) (*extension.SingleGetValueResponse, error)
+	Download_onu_image(ctx context.Context, request *voltha.DeviceImageDownloadRequest) (*voltha.DeviceImageResponse, error)
+	Get_onu_image_status(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	Abort_onu_image_upgrade(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	Get_onu_images(ctx context.Context, deviceID string) (*voltha.OnuImages, error)
+	Activate_onu_image(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	Commit_onu_image(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
new file mode 100644
index 0000000..606d18c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
new file mode 100644
index 0000000..f5efa36
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"strings"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+const (
+	defaultkvStoreConfigPath     = "config"
+	defaultkvStoreDataPathPrefix = "service/voltha_voltha"
+	kvStorePathSeparator         = "/"
+)
+
+// ConfigType represents the type for which config is created inside the kvstore
+// For example, loglevel
+type ConfigType int
+
+const (
+	ConfigTypeLogLevel ConfigType = iota
+	ConfigTypeMetadata
+	ConfigTypeKafka
+	ConfigTypeLogFeatures
+)
+
+func (c ConfigType) String() string {
+	return [...]string{"loglevel", "metadata", "kafka", "logfeatures"}[c]
+}
+
+// ChangeEvent represents the event recieved from watch
+// For example, Put Event
+type ChangeEvent int
+
+const (
+	Put ChangeEvent = iota
+	Delete
+)
+
+func (ce ChangeEvent) String() string {
+	return [...]string{"Put", "Delete"}[ce]
+}
+
+// ConfigChangeEvent represents config for the events recieved from watch
+// For example,ChangeType is Put ,ConfigAttribute default
+type ConfigChangeEvent struct {
+	ChangeType      ChangeEvent
+	ConfigAttribute string
+}
+
+// ConfigManager is a wrapper over Backend to maintain Configuration of voltha components
+// in kvstore based persistent storage
+type ConfigManager struct {
+	Backend               *db.Backend
+	KVStoreConfigPrefix   string
+	KVStoreDataPathPrefix string
+}
+
+// ComponentConfig represents a category of configuration for a specific VOLTHA component type
+// stored in a persistent storage pointed to by Config Manager
+// For example, one ComponentConfig instance will be created for loglevel config type for rw-core
+// component while another ComponentConfig instance will refer to connection config type for same
+// rw-core component. So, there can be multiple ComponentConfig instance created per component
+// pointing to different category of configuration.
+//
+// Configuration pointed to be by ComponentConfig is stored in kvstore as a list of key/value pairs
+// under the hierarchical tree with following base path
+// <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/
+//
+// For example, rw-core ComponentConfig for loglevel config entries will be stored under following path
+// /voltha/service/config/rw-core/loglevel/
+type ComponentConfig struct {
+	cManager         *ConfigManager
+	componentLabel   string
+	configType       ConfigType
+	changeEventChan  chan *ConfigChangeEvent
+	kvStoreEventChan chan *kvstore.Event
+}
+
+func NewConfigManager(ctx context.Context, kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
+	var kvStorePrefix string
+	if prefix, present := os.LookupEnv("KV_STORE_DATAPATH_PREFIX"); present {
+		kvStorePrefix = prefix
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
+	} else {
+		kvStorePrefix = defaultkvStoreDataPathPrefix
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
+	}
+	return &ConfigManager{
+		KVStoreConfigPrefix:   defaultkvStoreConfigPath,
+		KVStoreDataPathPrefix: kvStorePrefix,
+		Backend: &db.Backend{
+			Client:     kvClient,
+			StoreType:  kvStoreType,
+			Address:    kvStoreAddress,
+			Timeout:    kvStoreTimeout,
+			PathPrefix: kvStorePrefix,
+		},
+	}
+}
+
+// RetrieveComponentList list the component Names for which loglevel is stored in kvstore
+func (c *ConfigManager) RetrieveComponentList(ctx context.Context, configType ConfigType) ([]string, error) {
+	data, err := c.Backend.List(ctx, c.KVStoreConfigPrefix)
+	if err != nil {
+		return nil, err
+	}
+
+	// Looping through the data recieved from the Backend for config
+	// Trimming and Splitting the required key and value from data and  storing as componentName,PackageName and Level
+	// For Example, recieved key would be <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/default and value \"DEBUG\"
+	// Then in default will be stored as PackageName,componentName as <Component Name> and DEBUG will be stored as value in List struct
+	ccPathPrefix := kvStorePathSeparator + configType.String() + kvStorePathSeparator
+	pathPrefix := c.KVStoreDataPathPrefix + kvStorePathSeparator + c.KVStoreConfigPrefix + kvStorePathSeparator
+	var list []string
+	keys := make(map[string]interface{})
+	for attr := range data {
+		cname := strings.TrimPrefix(attr, pathPrefix)
+		cName := strings.SplitN(cname, ccPathPrefix, 2)
+		if len(cName) != 2 {
+			continue
+		}
+		if _, exist := keys[cName[0]]; !exist {
+			keys[cName[0]] = nil
+			list = append(list, cName[0])
+		}
+	}
+	return list, nil
+}
+
+// Initialize the component config
+func (cm *ConfigManager) InitComponentConfig(componentLabel string, configType ConfigType) *ComponentConfig {
+
+	return &ComponentConfig{
+		componentLabel:   componentLabel,
+		configType:       configType,
+		cManager:         cm,
+		changeEventChan:  nil,
+		kvStoreEventChan: nil,
+	}
+
+}
+
+func (c *ComponentConfig) makeConfigPath() string {
+
+	cType := c.configType.String()
+	return c.cManager.KVStoreConfigPrefix + kvStorePathSeparator +
+		c.componentLabel + kvStorePathSeparator + cType
+}
+
+// MonitorForConfigChange watch on the subkeys for the given key
+// Any changes to the subkeys for the given key will return an event channel
+// Then Event channel will be processed and  new event channel with required values will be created and return
+// For example, rw-core will be watching on <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/
+// will return an event channel for PUT,DELETE eventType.
+// Then values from event channel will be processed and  stored in kvStoreEventChan.
+func (c *ComponentConfig) MonitorForConfigChange(ctx context.Context) chan *ConfigChangeEvent {
+	key := c.makeConfigPath()
+
+	logger.Debugw(ctx, "monitoring-for-config-change", log.Fields{"key": key})
+
+	c.changeEventChan = make(chan *ConfigChangeEvent, 1)
+
+	c.kvStoreEventChan = c.cManager.Backend.CreateWatch(ctx, key, true)
+
+	go c.processKVStoreWatchEvents(ctx)
+
+	return c.changeEventChan
+}
+
+// processKVStoreWatchEvents process event channel recieved from the Backend for any ChangeType
+// It checks for the EventType is valid or not.For the valid EventTypes creates ConfigChangeEvent and send it on channel
+func (c *ComponentConfig) processKVStoreWatchEvents(ctx context.Context) {
+
+	ccKeyPrefix := c.makeConfigPath()
+
+	logger.Debugw(ctx, "processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
+
+	ccPathPrefix := c.cManager.Backend.PathPrefix + ccKeyPrefix + kvStorePathSeparator
+
+	for watchResp := range c.kvStoreEventChan {
+
+		if watchResp.EventType == kvstore.CONNECTIONDOWN || watchResp.EventType == kvstore.UNKNOWN {
+			logger.Warnw(ctx, "received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
+			continue
+		}
+
+		// populating the configAttribute from the received Key
+		// For Example, Key received would be <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/default
+		// Storing default in configAttribute variable
+		ky := fmt.Sprintf("%s", watchResp.Key)
+
+		c.changeEventChan <- &ConfigChangeEvent{
+			ChangeType:      ChangeEvent(watchResp.EventType),
+			ConfigAttribute: strings.TrimPrefix(ky, ccPathPrefix),
+		}
+	}
+}
+
+// Retrieves value of a specific config key. Value of key is returned in String format
+func (c *ComponentConfig) Retrieve(ctx context.Context, configKey string) (string, error) {
+	key := c.makeConfigPath() + "/" + configKey
+
+	logger.Debugw(ctx, "retrieving-config", log.Fields{"key": key})
+
+	if kvpair, err := c.cManager.Backend.Get(ctx, key); err != nil {
+		return "", err
+	} else {
+		if kvpair == nil {
+			return "", fmt.Errorf("config-key-does-not-exist : %s", key)
+		}
+
+		value := strings.Trim(fmt.Sprintf("%s", kvpair.Value), "\"")
+		logger.Debugw(ctx, "retrieved-config", log.Fields{"key": key, "value": value})
+		return value, nil
+	}
+}
+
+func (c *ComponentConfig) RetrieveAll(ctx context.Context) (map[string]string, error) {
+	key := c.makeConfigPath()
+
+	logger.Debugw(ctx, "retreiving-list", log.Fields{"key": key})
+
+	data, err := c.cManager.Backend.List(ctx, key)
+	if err != nil {
+		return nil, err
+	}
+
+	// Looping through the data recieved from the Backend for the given key
+	// Trimming the required key and value from data and  storing as key/value pair
+	// For Example, recieved key would be <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/default and value \"DEBUG\"
+	// Then in default will be stored as key and DEBUG will be stored as value in map[string]string
+	res := make(map[string]string)
+	ccPathPrefix := c.cManager.Backend.PathPrefix + kvStorePathSeparator + key + kvStorePathSeparator
+	for attr, val := range data {
+		res[strings.TrimPrefix(attr, ccPathPrefix)] = strings.Trim(fmt.Sprintf("%s", val.Value), "\"")
+	}
+
+	return res, nil
+}
+
+func (c *ComponentConfig) Save(ctx context.Context, configKey string, configValue string) error {
+	key := c.makeConfigPath() + "/" + configKey
+
+	logger.Debugw(ctx, "saving-config", log.Fields{"key": key, "value": configValue})
+
+	//save the data for update config
+	if err := c.cManager.Backend.Put(ctx, key, configValue); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *ComponentConfig) Delete(ctx context.Context, configKey string) error {
+	//construct key using makeConfigPath
+	key := c.makeConfigPath() + "/" + configKey
+
+	logger.Debugw(ctx, "deleting-config", log.Fields{"key": key})
+	//delete the config
+	if err := c.cManager.Backend.Delete(ctx, key); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
new file mode 100644
index 0000000..68bfb32
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package Config provides dynamic logging configuration for specific Voltha component with loglevel lookup
+// from etcd kvstore implemented using Backend.
+// Any Voltha component can start utilizing dynamic logging by starting goroutine of StartLogLevelConfigProcessing after
+// starting kvClient for the component.
+
+package config
+
+import (
+	"context"
+	"crypto/md5"
+	"encoding/json"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"os"
+	"sort"
+	"strings"
+)
+
+const (
+	defaultLogLevelKey                = "default"          // kvstore key containing default loglevel
+	globalConfigRootNode              = "global"           // Root Node in kvstore containing global config
+	initialGlobalDefaultLogLevelValue = "WARN"             // Hard-coded Global Default loglevel pushed at PoD startup
+	logPackagesListKey                = "log_package_list" // kvstore key containing list of allowed log packages
+)
+
+// ComponentLogController represents a Configuration for Logging Config of specific Voltha component type
+// It stores ComponentConfig and GlobalConfig of loglevel config of specific Voltha component type
+// For example,ComponentLogController instance will be created for rw-core component
+type ComponentLogController struct {
+	ComponentName       string
+	componentNameConfig *ComponentConfig
+	GlobalConfig        *ComponentConfig
+	configManager       *ConfigManager
+	logHash             [16]byte
+	initialLogLevel     string // Initial default log level set by helm chart
+}
+
+func NewComponentLogController(ctx context.Context, cm *ConfigManager) (*ComponentLogController, error) {
+	logger.Debug(ctx, "creating-new-component-log-controller")
+	componentName := os.Getenv("COMPONENT_NAME")
+	if componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	var defaultLogLevel string
+	var err error
+	// Retrieve and save default log level; used for fallback if all loglevel config is cleared in etcd
+	if defaultLogLevel, err = log.LogLevelToString(log.GetDefaultLogLevel()); err != nil {
+		defaultLogLevel = "DEBUG"
+	}
+
+	return &ComponentLogController{
+		ComponentName:       componentName,
+		componentNameConfig: nil,
+		GlobalConfig:        nil,
+		configManager:       cm,
+		initialLogLevel:     defaultLogLevel,
+	}, nil
+
+}
+
+// StartLogLevelConfigProcessing initialize component config and global config
+// Then, it persists initial default Loglevels into Config Store before
+// starting the loading and processing of all Log Configuration
+func StartLogLevelConfigProcessing(cm *ConfigManager, ctx context.Context) {
+	cc, err := NewComponentLogController(ctx, cm)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
+		return
+	}
+
+	cc.GlobalConfig = cm.InitComponentConfig(globalConfigRootNode, ConfigTypeLogLevel)
+	logger.Debugw(ctx, "global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
+
+	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogLevel)
+	logger.Debugw(ctx, "component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+
+	cc.persistInitialDefaultLogConfigs(ctx)
+
+	cc.persistRegisteredLogPackageList(ctx)
+
+	cc.processLogConfig(ctx)
+}
+
+// Method to persist Global default loglevel into etcd, if not set yet
+// It also checks and set Component default loglevel into etcd with initial loglevel set from command line
+func (c *ComponentLogController) persistInitialDefaultLogConfigs(ctx context.Context) {
+
+	_, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
+	if err != nil {
+		logger.Debugw(ctx, "failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
+
+		err = c.GlobalConfig.Save(ctx, defaultLogLevelKey, initialGlobalDefaultLogLevelValue)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
+		}
+	}
+
+	_, err = c.componentNameConfig.Retrieve(ctx, defaultLogLevelKey)
+	if err != nil {
+		logger.Debugw(ctx, "failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
+
+		err = c.componentNameConfig.Save(ctx, defaultLogLevelKey, c.initialLogLevel)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
+		}
+	}
+}
+
+// Method to save list of all registered packages for component into config kvstore. A single string
+// is constructed with comma-separated package names in sorted order and persisted
+func (c *ComponentLogController) persistRegisteredLogPackageList(ctx context.Context) {
+
+	componentMetadataConfig := c.configManager.InitComponentConfig(c.ComponentName, ConfigTypeMetadata)
+	logger.Debugw(ctx, "component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
+
+	packageList := log.GetPackageNames()
+	packageList = append(packageList, defaultLogLevelKey)
+	sort.Strings(packageList)
+
+	packageNames, err := json.Marshal(packageList)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
+		return
+	}
+
+	if err := componentMetadataConfig.Save(ctx, logPackagesListKey, string(packageNames)); err != nil {
+		logger.Errorw(ctx, "failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
+	}
+}
+
+// ProcessLogConfig will first load and apply log config and then start waiting on component config and global config
+// channels for any changes. Event channel will be recieved from Backend for valid change type
+// Then data for componentn log config and global log config will be retrieved from Backend and stored in updatedLogConfig in precedence order
+// If any changes in updatedLogConfig will be applied on component
+func (c *ComponentLogController) processLogConfig(ctx context.Context) {
+
+	// Load and apply Log Config for first time
+	initialLogConfig, err := c.buildUpdatedLogConfig(ctx)
+	if err != nil {
+		logger.Warnw(ctx, "unable-to-load-log-config-at-startup", log.Fields{"error": err})
+	} else {
+		if err := c.loadAndApplyLogConfig(ctx, initialLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-apply-log-config-at-startup", log.Fields{"error": err})
+		}
+	}
+
+	componentConfigEventChan := c.componentNameConfig.MonitorForConfigChange(ctx)
+
+	globalConfigEventChan := c.GlobalConfig.MonitorForConfigChange(ctx)
+
+	// process the events for componentName and  global config
+	var configEvent *ConfigChangeEvent
+	for {
+		select {
+		case configEvent = <-globalConfigEventChan:
+		case configEvent = <-componentConfigEventChan:
+
+		}
+		logger.Debugw(ctx, "processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+
+		updatedLogConfig, err := c.buildUpdatedLogConfig(ctx)
+		if err != nil {
+			logger.Warnw(ctx, "unable-to-fetch-updated-log-config", log.Fields{"error": err})
+			continue
+		}
+
+		logger.Debugw(ctx, "applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
+
+		if err := c.loadAndApplyLogConfig(ctx, updatedLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-load-and-apply-log-config", log.Fields{"error": err})
+		}
+	}
+
+}
+
+// get active loglevel from the zap logger
+func getActiveLogLevels(ctx context.Context) map[string]string {
+	loglevels := make(map[string]string)
+
+	// now do the default log level
+	if level, err := log.LogLevelToString(log.GetDefaultLogLevel()); err == nil {
+		loglevels[defaultLogLevelKey] = level
+	}
+
+	// do the per-package log levels
+	for _, packageName := range log.GetPackageNames() {
+		level, err := log.GetPackageLogLevel(packageName)
+		if err != nil {
+			logger.Warnw(ctx, "unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
+			continue
+		}
+
+		if l, err := log.LogLevelToString(level); err == nil {
+			loglevels[packageName] = l
+		}
+	}
+
+	logger.Debugw(ctx, "retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
+
+	return loglevels
+}
+
+func (c *ComponentLogController) getGlobalLogConfig(ctx context.Context) (string, error) {
+
+	globalDefaultLogLevel, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
+	if err != nil {
+		return "", err
+	}
+
+	// Handle edge cases when global default loglevel is deleted directly from etcd or set to a invalid value
+	// We should use hard-coded initial default value in such cases
+	if globalDefaultLogLevel == "" {
+		logger.Warn(ctx, "global-default-loglevel-not-found-in-config-store")
+		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
+	}
+
+	if _, err := log.StringToLogLevel(globalDefaultLogLevel); err != nil {
+		logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
+		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
+	}
+
+	logger.Debugw(ctx, "retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
+
+	return globalDefaultLogLevel, nil
+}
+
+func (c *ComponentLogController) getComponentLogConfig(ctx context.Context, globalDefaultLogLevel string) (map[string]string, error) {
+	componentLogConfig, err := c.componentNameConfig.RetrieveAll(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	effectiveDefaultLogLevel := ""
+	for logConfigKey, logConfigValue := range componentLogConfig {
+		if _, err := log.StringToLogLevel(logConfigValue); err != nil || logConfigKey == "" {
+			logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
+			delete(componentLogConfig, logConfigKey)
+		} else {
+			if logConfigKey == defaultLogLevelKey {
+				effectiveDefaultLogLevel = componentLogConfig[defaultLogLevelKey]
+			}
+		}
+	}
+
+	// if default loglevel is not configured for the component, component should use
+	// default loglevel configured at global level
+	if effectiveDefaultLogLevel == "" {
+		effectiveDefaultLogLevel = globalDefaultLogLevel
+	}
+
+	componentLogConfig[defaultLogLevelKey] = effectiveDefaultLogLevel
+
+	logger.Debugw(ctx, "retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
+
+	return componentLogConfig, nil
+}
+
+// buildUpdatedLogConfig retrieve the global logConfig and component logConfig  from Backend
+// component logConfig stores the log config with precedence order
+// For example, If the global logConfig is set and component logConfig is set only for specific package then
+// component logConfig is stored with global logConfig  and component logConfig of specific package
+// For example, If the global logConfig is set and component logConfig is set for specific package and as well as for default then
+// component logConfig is stored with  component logConfig data only
+func (c *ComponentLogController) buildUpdatedLogConfig(ctx context.Context) (map[string]string, error) {
+	globalLogLevel, err := c.getGlobalLogConfig(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-retrieve-global-log-config", log.Fields{"err": err})
+	}
+
+	componentLogConfig, err := c.getComponentLogConfig(ctx, globalLogLevel)
+	if err != nil {
+		return nil, err
+	}
+
+	finalLogConfig := make(map[string]string)
+	for packageName, logLevel := range componentLogConfig {
+		finalLogConfig[strings.ReplaceAll(packageName, "#", "/")] = logLevel
+	}
+
+	return finalLogConfig, nil
+}
+
+// load and apply the current configuration for component name
+// create hash of loaded configuration using GenerateLogConfigHash
+// if there is previous hash stored, compare the hash to stored hash
+// if there is any change will call UpdateLogLevels
+func (c *ComponentLogController) loadAndApplyLogConfig(ctx context.Context, logConfig map[string]string) error {
+	currentLogHash, err := GenerateLogConfigHash(logConfig)
+	if err != nil {
+		return err
+	}
+
+	if c.logHash != currentLogHash {
+		updateLogLevels(ctx, logConfig)
+		c.logHash = currentLogHash
+	} else {
+		logger.Debug(ctx, "effective-loglevel-config-same-as-currently-active")
+	}
+
+	return nil
+}
+
+// createModifiedLogLevels loops through the activeLogLevels recieved from zap logger and updatedLogLevels recieved from buildUpdatedLogConfig
+// to identify and create map of modified Log Levels of 2 types:
+// - Packages for which log level has been changed
+// - Packages for which log level config has been cleared - set to default log level
+func createModifiedLogLevels(ctx context.Context, activeLogLevels, updatedLogLevels map[string]string) map[string]string {
+	defaultLevel := updatedLogLevels[defaultLogLevelKey]
+
+	modifiedLogLevels := make(map[string]string)
+	for activeKey, activeLevel := range activeLogLevels {
+		if _, exist := updatedLogLevels[activeKey]; !exist {
+			if activeLevel != defaultLevel {
+				modifiedLogLevels[activeKey] = defaultLevel
+			}
+		} else if activeLevel != updatedLogLevels[activeKey] {
+			modifiedLogLevels[activeKey] = updatedLogLevels[activeKey]
+		}
+	}
+
+	// Log warnings for all invalid packages for which log config has been set
+	for key, value := range updatedLogLevels {
+		if _, exist := activeLogLevels[key]; !exist {
+			logger.Warnw(ctx, "ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
+		}
+	}
+
+	return modifiedLogLevels
+}
+
+// updateLogLevels update the loglevels for the component
+// retrieve active confguration from logger
+// compare with entries one by one and apply
+func updateLogLevels(ctx context.Context, updatedLogConfig map[string]string) {
+
+	activeLogLevels := getActiveLogLevels(ctx)
+	changedLogLevels := createModifiedLogLevels(ctx, activeLogLevels, updatedLogConfig)
+
+	// If no changed log levels are found, just return. It may happen on configuration of a invalid package
+	if len(changedLogLevels) == 0 {
+		logger.Debug(ctx, "no-change-in-effective-loglevel-config")
+		return
+	}
+
+	logger.Debugw(ctx, "applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
+	for key, level := range changedLogLevels {
+		if key == defaultLogLevelKey {
+			if l, err := log.StringToLogLevel(level); err == nil {
+				log.SetDefaultLogLevel(l)
+			}
+		} else {
+			if l, err := log.StringToLogLevel(level); err == nil {
+				log.SetPackageLogLevel(key, l)
+			}
+		}
+	}
+}
+
+// generate md5 hash of key value pairs appended into a single string
+// in order by key name
+func GenerateLogConfigHash(createHashLog map[string]string) ([16]byte, error) {
+	createHashLogBytes := []byte{}
+	levelData, err := json.Marshal(createHashLog)
+	if err != nil {
+		return [16]byte{}, err
+	}
+	createHashLogBytes = append(createHashLogBytes, levelData...)
+	return md5.Sum(createHashLogBytes), nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
new file mode 100644
index 0000000..95c5bde
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package config
+
+import (
+	"context"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"os"
+	"strings"
+)
+
+const (
+	defaultTracingStatusKey        = "trace_publish"   // kvstore key containing tracing configuration status
+	defaultLogCorrelationStatusKey = "log_correlation" // kvstore key containing log correlation configuration status
+)
+
+// ComponentLogFeatureController represents Configuration for Logging related features of Tracing and Log
+// Correlation of specific Voltha component.
+type ComponentLogFeaturesController struct {
+	ComponentName               string
+	componentNameConfig         *ComponentConfig
+	configManager               *ConfigManager
+	initialTracingStatus        bool // Initial default tracing status set by helm chart
+	initialLogCorrelationStatus bool // Initial default log correlation status set by helm chart
+}
+
+func NewComponentLogFeaturesController(ctx context.Context, cm *ConfigManager) (*ComponentLogFeaturesController, error) {
+	logger.Debug(ctx, "creating-new-component-log-features-controller")
+	componentName := os.Getenv("COMPONENT_NAME")
+	if componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
+	logCorrelationStatus := log.GetGlobalLFM().GetLogCorrelationStatus()
+
+	return &ComponentLogFeaturesController{
+		ComponentName:               componentName,
+		componentNameConfig:         nil,
+		configManager:               cm,
+		initialTracingStatus:        tracingStatus,
+		initialLogCorrelationStatus: logCorrelationStatus,
+	}, nil
+
+}
+
+// StartLogFeaturesConfigProcessing persists initial config of Log Features into Config Store before
+// starting the loading and processing of Configuration updates
+func StartLogFeaturesConfigProcessing(cm *ConfigManager, ctx context.Context) {
+	cc, err := NewComponentLogFeaturesController(ctx, cm)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-construct-component-log-features-controller-instance-for-monitoring", log.Fields{"error": err})
+		return
+	}
+
+	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogFeatures)
+	logger.Debugw(ctx, "component-log-features-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+
+	cc.persistInitialLogFeaturesConfigs(ctx)
+
+	cc.processLogFeaturesConfig(ctx)
+}
+
+// Method to persist Initial status of Log Correlation and Tracing features (as set from command line)
+// into config store (etcd kvstore), if not set yet
+func (cc *ComponentLogFeaturesController) persistInitialLogFeaturesConfigs(ctx context.Context) {
+
+	_, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+	if err != nil {
+		statusString := "DISABLED"
+		if cc.initialTracingStatus {
+			statusString = "ENABLED"
+		}
+		err = cc.componentNameConfig.Save(ctx, defaultTracingStatusKey, statusString)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-initial-tracing-status-at-startup", log.Fields{"error": err, "tracingstatus": statusString})
+		}
+	}
+
+	_, err = cc.componentNameConfig.Retrieve(ctx, defaultLogCorrelationStatusKey)
+	if err != nil {
+		statusString := "DISABLED"
+		if cc.initialLogCorrelationStatus {
+			statusString = "ENABLED"
+		}
+		err = cc.componentNameConfig.Save(ctx, defaultLogCorrelationStatusKey, statusString)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-initial-log-correlation-status-at-startup", log.Fields{"error": err, "logcorrelationstatus": statusString})
+		}
+	}
+}
+
+// processLogFeaturesConfig will first load and apply configuration of log features. Then it will start waiting for any changes
+// made to configuration in config store (etcd) and apply the same
+func (cc *ComponentLogFeaturesController) processLogFeaturesConfig(ctx context.Context) {
+
+	// Load and apply Tracing Status and log correlation status for first time
+	cc.loadAndApplyTracingStatusUpdate(ctx)
+	cc.loadAndApplyLogCorrelationStatusUpdate(ctx)
+
+	componentConfigEventChan := cc.componentNameConfig.MonitorForConfigChange(ctx)
+
+	// process the change events received on the channel
+	var configEvent *ConfigChangeEvent
+	for {
+		select {
+		case <-ctx.Done():
+			return
+
+		case configEvent = <-componentConfigEventChan:
+			logger.Debugw(ctx, "processing-log-features-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+
+			if strings.HasSuffix(configEvent.ConfigAttribute, defaultTracingStatusKey) {
+				cc.loadAndApplyTracingStatusUpdate(ctx)
+			} else if strings.HasSuffix(configEvent.ConfigAttribute, defaultLogCorrelationStatusKey) {
+				cc.loadAndApplyLogCorrelationStatusUpdate(ctx)
+			}
+		}
+	}
+
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyTracingStatusUpdate(ctx context.Context) {
+
+	desiredTracingStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+	if err != nil || desiredTracingStatus == "" {
+		logger.Warn(ctx, "unable-to-retrieve-tracing-status-from-config-store")
+		return
+	}
+
+	if desiredTracingStatus != "ENABLED" && desiredTracingStatus != "DISABLED" {
+		logger.Warnw(ctx, "unsupported-tracing-status-configured-in-config-store", log.Fields{"failed-tracing-status": desiredTracingStatus, "tracing-status": log.GetGlobalLFM().GetTracePublishingStatus()})
+		return
+	}
+
+	logger.Debugw(ctx, "retrieved-tracing-status", log.Fields{"tracing-status": desiredTracingStatus})
+
+	log.GetGlobalLFM().SetTracePublishingStatus(desiredTracingStatus == "ENABLED")
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyLogCorrelationStatusUpdate(ctx context.Context) {
+
+	desiredLogCorrelationStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultLogCorrelationStatusKey)
+	if err != nil || desiredLogCorrelationStatus == "" {
+		logger.Warn(ctx, "unable-to-retrieve-log-correlation-status-from-config-store")
+		return
+	}
+
+	if desiredLogCorrelationStatus != "ENABLED" && desiredLogCorrelationStatus != "DISABLED" {
+		logger.Warnw(ctx, "unsupported-log-correlation-status-configured-in-config-store", log.Fields{"failed-log-correlation-status": desiredLogCorrelationStatus, "log-correlation-status": log.GetGlobalLFM().GetLogCorrelationStatus()})
+		return
+	}
+
+	logger.Debugw(ctx, "retrieved-log-correlation-status", log.Fields{"log-correlation-status": desiredLogCorrelationStatus})
+
+	log.GetGlobalLFM().SetLogCorrelationStatus(desiredLogCorrelationStatus == "ENABLED")
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
new file mode 100644
index 0000000..ff0b5b7
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package db
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// Default Minimal Interval for posting alive state of backend kvstore on Liveness Channel
+	DefaultLivenessChannelInterval = time.Second * 30
+)
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	Client                  kvstore.Client
+	StoreType               string
+	Timeout                 time.Duration
+	Address                 string
+	PathPrefix              string
+	alive                   bool // Is this backend connection alive?
+	livenessMutex           sync.Mutex
+	liveness                chan bool     // channel to post alive state
+	LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
+	lastLivenessTime        time.Time     // Instant of last alive state push
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(ctx context.Context, storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:               storeType,
+		Address:                 address,
+		Timeout:                 timeout,
+		LivenessChannelInterval: DefaultLivenessChannelInterval,
+		PathPrefix:              pathPrefix,
+		alive:                   false, // connection considered down at start
+	}
+
+	if b.Client, err = b.newClient(ctx, address, timeout); err != nil {
+		logger.Errorw(ctx, "failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "address": address,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "etcd":
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(ctx context.Context, key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+func (b *Backend) updateLiveness(ctx context.Context, alive bool) {
+	// Periodically push stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Push alive state if the
+	// last push to channel was beyond livenessChannelInterval
+	b.livenessMutex.Lock()
+	defer b.livenessMutex.Unlock()
+	if b.liveness != nil {
+		if b.alive != alive {
+			logger.Debug(ctx, "update-liveness-channel-reason-change")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
+			logger.Debug(ctx, "update-liveness-channel-reason-interval")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Emit log message only for alive state change
+	if b.alive != alive {
+		logger.Debugw(ctx, "change-kvstore-alive-status", log.Fields{"alive": alive})
+		b.alive = alive
+	}
+}
+
+// Perform a dummy Key Lookup on kvstore to test Connection Liveness and
+// post on Liveness channel
+func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
+	alive := b.Client.IsConnectionUp(ctx)
+	logger.Debugw(ctx, "kvstore-liveness-check-result", log.Fields{"alive": alive})
+
+	b.updateLiveness(ctx, alive)
+	return alive
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every kvstore operation which indicates whether
+// or not the connection is still Live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (b *Backend) EnableLivenessChannel(ctx context.Context) chan bool {
+	logger.Debug(ctx, "enable-kvstore-liveness-channel")
+	b.livenessMutex.Lock()
+	defer b.livenessMutex.Unlock()
+	if b.liveness == nil {
+		b.liveness = make(chan bool, 10)
+		b.liveness <- b.alive
+		b.lastLivenessTime = time.Now()
+	}
+
+	return b.liveness
+}
+
+// Extract Alive status of Kvstore based on type of error
+func (b *Backend) isErrorIndicatingAliveKvstore(ctx context.Context, err error) bool {
+	// Alive unless observed an error indicating so
+	alive := true
+
+	if err != nil {
+
+		// timeout indicates kvstore not reachable/alive
+		if err == context.DeadlineExceeded {
+			alive = false
+		}
+
+		// Need to analyze client-specific errors based on backend type
+		if b.StoreType == "etcd" {
+
+			// For etcd backend, consider not-alive only for errors indicating
+			// timedout request or unavailable/corrupted cluster. For all remaining
+			// error codes listed in https://godoc.org/google.golang.org/grpc/codes#Code,
+			// we would not infer a not-alive backend because such a error may also
+			// occur due to bad client requests or sequence of operations
+			switch status.Code(err) {
+			case codes.DeadlineExceeded:
+				fallthrough
+			case codes.Unavailable:
+				fallthrough
+			case codes.DataLoss:
+				alive = false
+			}
+		}
+	}
+
+	return alive
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-list")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "listing-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.List(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return pair, err
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-get")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "getting-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.Get(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return pair, err
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-put")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "putting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Put(ctx, formattedPath, value)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(ctx context.Context, key string) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Delete(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
+// DeleteWithPrefix removes items having prefix key
+func (b *Backend) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete-with-prefix")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, prefixKey)
+	logger.Debugw(ctx, "deleting-prefix-key", log.Fields{"key": prefixKey, "path": formattedPath})
+
+	err := b.Client.DeleteWithPrefix(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-create-watch")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(ctx, formattedPath, withPrefix)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete-watch")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(ctx, formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
new file mode 100644
index 0000000..4bc92b1
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package db
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
new file mode 100644
index 0000000..b35f1f3
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"time"
+)
+
+const (
+	// Maximum channel buffer between publisher/subscriber goroutines
+	maxClientChannelBufferSize = 10
+)
+
+// These constants represent the event types returned by the KV client
+const (
+	PUT = iota
+	DELETE
+	CONNECTIONDOWN
+	UNKNOWN
+)
+
+// KVPair is a common wrapper for key-value pairs returned from the KV store
+type KVPair struct {
+	Key     string
+	Value   interface{}
+	Version int64
+	Session string
+	Lease   int64
+}
+
+// NewKVPair creates a new KVPair object
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
+	kv := new(KVPair)
+	kv.Key = key
+	kv.Value = value
+	kv.Session = session
+	kv.Lease = lease
+	kv.Version = version
+	return kv
+}
+
+// Event is generated by the KV client when a key change is detected
+type Event struct {
+	EventType int
+	Key       interface{}
+	Value     interface{}
+	Version   int64
+}
+
+// NewEvent creates a new Event object
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
+	evnt := new(Event)
+	evnt.EventType = eventType
+	evnt.Key = key
+	evnt.Value = value
+	evnt.Version = version
+
+	return evnt
+}
+
+// Client represents the set of APIs a KV Client must implement
+type Client interface {
+	List(ctx context.Context, key string) (map[string]*KVPair, error)
+	Get(ctx context.Context, key string) (*KVPair, error)
+	Put(ctx context.Context, key string, value interface{}) error
+	Delete(ctx context.Context, key string) error
+	DeleteWithPrefix(ctx context.Context, prefixKey string) error
+	Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error)
+	ReleaseReservation(ctx context.Context, key string) error
+	ReleaseAllReservations(ctx context.Context) error
+	RenewReservation(ctx context.Context, key string) error
+	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
+	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
+	ReleaseLock(lockName string) error
+	IsConnectionUp(ctx context.Context) bool // timeout in second
+	CloseWatch(ctx context.Context, key string, ch chan *Event)
+	Close(ctx context.Context)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
new file mode 100644
index 0000000..b8509db
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..c2a38c6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,574 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+
+	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+	ectdAPI             *v3Client.Client
+	keyReservations     map[string]*v3Client.LeaseID
+	watchedChannels     sync.Map
+	keyReservationsLock sync.RWMutex
+	lockToMutexMap      map[string]*v3Concurrency.Mutex
+	lockToSessionMap    map[string]*v3Concurrency.Session
+	lockToMutexLock     sync.Mutex
+}
+
+// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
+// the called to specify etcd client configuration
+func NewEtcdCustomClient(ctx context.Context, config *v3Client.Config) (*EtcdClient, error) {
+	c, err := v3Client.New(*config)
+	if err != nil {
+		logger.Error(ctx, err)
+		return nil, err
+	}
+
+	reservations := make(map[string]*v3Client.LeaseID)
+	lockMutexMap := make(map[string]*v3Concurrency.Mutex)
+	lockSessionMap := make(map[string]*v3Concurrency.Session)
+
+	return &EtcdClient{ectdAPI: c, keyReservations: reservations, lockToMutexMap: lockMutexMap,
+		lockToSessionMap: lockSessionMap}, nil
+}
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
+
+	return NewEtcdCustomClient(
+		ctx,
+		&v3Client.Config{
+			Endpoints:   []string{addr},
+			DialTimeout: timeout,
+			LogConfig:   &logconfig})
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
+	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
+	if _, err := c.Get(ctx, "non-existent-key"); err != nil {
+		return false
+	}
+	//cancel()
+	return true
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
+	if err != nil {
+		logger.Error(ctx, err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, ev := range resp.Kvs {
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
+
+	attempt := 0
+startLoop:
+	for {
+		resp, err := c.ectdAPI.Get(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return nil, err
+				}
+				logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return nil, err
+		}
+
+		for _, ev := range resp.Kvs {
+			// Only one value is returned
+			return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+		}
+		return nil, nil
+	}
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	// Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
+	// that KV key permanent instead of automatically removing it after a lease expiration
+	c.keyReservationsLock.RLock()
+	leaseID, ok := c.keyReservations[key]
+	c.keyReservationsLock.RUnlock()
+	attempt := 0
+startLoop:
+	for {
+		var err error
+		if ok {
+			_, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
+		} else {
+			_, err = c.ectdAPI.Put(ctx, key, val)
+		}
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		return nil
+	}
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(ctx context.Context, key string) error {
+
+	attempt := 0
+startLoop:
+	for {
+		_, err := c.ectdAPI.Delete(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+		return nil
+	}
+}
+
+func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+
+	//delete the prefix
+	if _, err := c.ectdAPI.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
+		return err
+	}
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
+	return nil
+}
+
+// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
+// the etcd API accepts only a string.  Timeout defines how long the function will wait for a response.  TTL
+// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
+// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
+// then the value assigned to that key will be returned.
+func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return nil, fmt.Errorf("unexpected-type%T", value)
+	}
+
+	resp, err := c.ectdAPI.Grant(ctx, int64(ttl.Seconds()))
+	if err != nil {
+		logger.Error(ctx, err)
+		return nil, err
+	}
+	// Register the lease id
+	c.keyReservationsLock.Lock()
+	c.keyReservations[key] = &resp.ID
+	c.keyReservationsLock.Unlock()
+
+	// Revoke lease if reservation is not successful
+	reservationSuccessful := false
+	defer func() {
+		if !reservationSuccessful {
+			if err = c.ReleaseReservation(context.Background(), key); err != nil {
+				logger.Error(ctx, "cannot-release-lease")
+			}
+		}
+	}()
+
+	// Try to grap the Key with the above lease
+	c.ectdAPI.Txn(context.Background())
+	txn := c.ectdAPI.Txn(context.Background())
+	txn = txn.If(v3Client.Compare(v3Client.Version(key), "=", 0))
+	txn = txn.Then(v3Client.OpPut(key, val, v3Client.WithLease(resp.ID)))
+	txn = txn.Else(v3Client.OpGet(key))
+	result, er := txn.Commit()
+	if er != nil {
+		return nil, er
+	}
+
+	if !result.Succeeded {
+		// Verify whether we are already the owner of that Key
+		if len(result.Responses) > 0 &&
+			len(result.Responses[0].GetResponseRange().Kvs) > 0 {
+			kv := result.Responses[0].GetResponseRange().Kvs[0]
+			if string(kv.Value) == val {
+				reservationSuccessful = true
+				return value, nil
+			}
+			return kv.Value, nil
+		}
+	} else {
+		// Read the Key to ensure this is our Key
+		m, err := c.Get(ctx, key)
+		if err != nil {
+			return nil, err
+		}
+		if m != nil {
+			if m.Key == key && isEqual(m.Value, value) {
+				// My reservation is successful - register it.  For now, support is only for 1 reservation per key
+				// per session.
+				reservationSuccessful = true
+				return value, nil
+			}
+			// My reservation has failed.  Return the owner of that key
+			return m.Value, nil
+		}
+	}
+	return nil, nil
+}
+
+// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
+func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
+	c.keyReservationsLock.Lock()
+	defer c.keyReservationsLock.Unlock()
+
+	for key, leaseID := range c.keyReservations {
+		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
+		if err != nil {
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
+			return err
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// ReleaseReservation releases reservation for a specific key.
+func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
+	// Get the leaseid using the key
+	logger.Debugw(ctx, "Release-reservation", log.Fields{"key": key})
+	var ok bool
+	var leaseID *v3Client.LeaseID
+	c.keyReservationsLock.Lock()
+	defer c.keyReservationsLock.Unlock()
+	if leaseID, ok = c.keyReservations[key]; !ok {
+		return nil
+	}
+
+	if leaseID != nil {
+		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
+		if err != nil {
+			logger.Error(ctx, err)
+			return err
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
+// period specified when reserving the key
+func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
+	// Get the leaseid using the key
+	var ok bool
+	var leaseID *v3Client.LeaseID
+	c.keyReservationsLock.RLock()
+	leaseID, ok = c.keyReservations[key]
+	c.keyReservationsLock.RUnlock()
+
+	if !ok {
+		return errors.New("key-not-reserved")
+	}
+
+	if leaseID != nil {
+		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
+		if err != nil {
+			logger.Errorw(ctx, "lease-may-have-expired", log.Fields{"error": err})
+			return err
+		}
+	} else {
+		return errors.New("lease-expired")
+	}
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+	w := v3Client.NewWatcher(c.ectdAPI)
+	ctx, cancel := context.WithCancel(ctx)
+	var channel v3Client.WatchChan
+	if withPrefix {
+		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+	} else {
+		channel = w.Watch(ctx, key)
+	}
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap[ch] = w
+
+	channelMaps := c.addChannelMap(key, channelMap)
+
+	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+	// json format.
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(ctx, channel, ch, cancel)
+
+	return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+	} else {
+		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var ok bool
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug(ctx, "channel-found")
+			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for resp := range channel {
+		for _, ev := range resp.Events {
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+		}
+	}
+	logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+	switch event.Type {
+	case v3Client.EventTypePut:
+		return PUT
+	case v3Client.EventTypeDelete:
+		return DELETE
+	}
+	return UNKNOWN
+}
+
+// Close closes the KV store client
+func (c *EtcdClient) Close(ctx context.Context) {
+	if err := c.ectdAPI.Close(); err != nil {
+		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
+	}
+}
+
+func (c *EtcdClient) addLockName(lockName string, lock *v3Concurrency.Mutex, session *v3Concurrency.Session) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	c.lockToMutexMap[lockName] = lock
+	c.lockToSessionMap[lockName] = session
+}
+
+func (c *EtcdClient) deleteLockName(lockName string) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	delete(c.lockToMutexMap, lockName)
+	delete(c.lockToSessionMap, lockName)
+}
+
+func (c *EtcdClient) getLock(lockName string) (*v3Concurrency.Mutex, *v3Concurrency.Session) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	var lock *v3Concurrency.Mutex
+	var session *v3Concurrency.Session
+	if l, exist := c.lockToMutexMap[lockName]; exist {
+		lock = l
+	}
+	if s, exist := c.lockToSessionMap[lockName]; exist {
+		session = s
+	}
+	return lock, session
+}
+
+func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+	session, _ := v3Concurrency.NewSession(c.ectdAPI, v3Concurrency.WithContext(ctx))
+	mu := v3Concurrency.NewMutex(session, "/devicelock_"+lockName)
+	if err := mu.Lock(context.Background()); err != nil {
+		//cancel()
+		return err
+	}
+	c.addLockName(lockName, mu, session)
+	return nil
+}
+
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+	lock, session := c.getLock(lockName)
+	var err error
+	if lock != nil {
+		if e := lock.Unlock(context.Background()); e != nil {
+			err = e
+		}
+	}
+	if session != nil {
+		if e := session.Close(); e != nil {
+			err = e
+		}
+	}
+	c.deleteLockName(lockName)
+
+	return err
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
new file mode 100644
index 0000000..946dbf2
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+const (
+	minRetryInterval  = 100
+	maxRetryInterval  = 5000
+	incrementalFactor = 1.2
+	jitter            = 0.2
+)
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// Helper function to verify mostly whether the content of two interface types are the same.  Focus is []byte and
+// string types
+func isEqual(val1 interface{}, val2 interface{}) bool {
+	b1, err := ToByte(val1)
+	b2, er := ToByte(val2)
+	if err == nil && er == nil {
+		return bytes.Equal(b1, b2)
+	}
+	return val1 == val2
+}
+
+// backoff waits an amount of time that is proportional to the attempt value.  The wait time in a range of
+// minRetryInterval and maxRetryInterval.
+func backoff(ctx context.Context, attempt int) error {
+	if attempt == 0 {
+		return nil
+	}
+	backoff := int(minRetryInterval + incrementalFactor*math.Exp(float64(attempt)))
+	backoff *= 1 + int(jitter*(rand.Float64()*2-1))
+	if backoff > maxRetryInterval {
+		backoff = maxRetryInterval
+	}
+	ticker := time.NewTicker(time.Duration(backoff) * time.Millisecond)
+	defer ticker.Stop()
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-ticker.C:
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
new file mode 100644
index 0000000..df3e839
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package events
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
new file mode 100644
index 0000000..35f821f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package eventif
+
+import (
+	"context"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// EventProxy interface for eventproxy
+type EventProxy interface {
+	SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category EventCategory,
+		subCategory EventSubCategory, raisedTs int64) error
+	SendKpiEvent(ctx context.Context, id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
+		subCategory EventSubCategory, raisedTs int64) error
+	SendRPCEvent(ctx context.Context, id string, deviceEvent *voltha.RPCEvent, category EventCategory,
+		subCategory *EventSubCategory, raisedTs int64) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	SendLiveness(ctx context.Context) error
+}
+
+const (
+	EventTypeVersion = "0.1"
+)
+
+type (
+	EventType        = voltha.EventType_Types
+	EventCategory    = voltha.EventCategory_Types
+	EventSubCategory = voltha.EventSubCategory_Types
+)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
new file mode 100644
index 0000000..19a4f26
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package events
+
+import (
+	"container/ring"
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// TODO: Make configurable through helm chart
+const EVENT_THRESHOLD = 1000
+
+type lastEvent struct{}
+
+type EventProxy struct {
+	kafkaClient    kafka.Client
+	eventTopic     kafka.Topic
+	eventQueue     *EventQueue
+	queueCtx       context.Context
+	queueCancelCtx context.CancelFunc
+}
+
+func NewEventProxy(opts ...EventProxyOption) *EventProxy {
+	var proxy EventProxy
+	for _, option := range opts {
+		option(&proxy)
+	}
+	proxy.eventQueue = newEventQueue()
+	proxy.queueCtx, proxy.queueCancelCtx = context.WithCancel(context.Background())
+	return &proxy
+}
+
+type EventProxyOption func(*EventProxy)
+
+func MsgClient(client kafka.Client) EventProxyOption {
+	return func(args *EventProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func MsgTopic(topic kafka.Topic) EventProxyOption {
+	return func(args *EventProxy) {
+		args.eventTopic = topic
+	}
+}
+
+func (ep *EventProxy) formatId(eventName string) string {
+	return fmt.Sprintf("Voltha.openolt.%s.%s", eventName, strconv.FormatInt(time.Now().UnixNano(), 10))
+}
+
+func (ep *EventProxy) getEventHeader(eventName string,
+	category eventif.EventCategory,
+	subCategory *eventif.EventSubCategory,
+	eventType eventif.EventType,
+	raisedTs int64) (*voltha.EventHeader, error) {
+	var header voltha.EventHeader
+	if strings.Contains(eventName, "_") {
+		eventName = strings.Join(strings.Split(eventName, "_")[:len(strings.Split(eventName, "_"))-2], "_")
+	} else {
+		eventName = "UNKNOWN_EVENT"
+	}
+	/* Populating event header */
+	header.Id = ep.formatId(eventName)
+	header.Category = category
+	if subCategory != nil {
+		header.SubCategory = *subCategory
+	} else {
+		header.SubCategory = voltha.EventSubCategory_NONE
+	}
+	header.Type = eventType
+	header.TypeVersion = eventif.EventTypeVersion
+
+	// raisedTs is in seconds
+	timestamp, err := ptypes.TimestampProto(time.Unix(raisedTs, 0))
+	if err != nil {
+		return nil, err
+	}
+	header.RaisedTs = timestamp
+
+	timestamp, err = ptypes.TimestampProto(time.Now())
+	if err != nil {
+		return nil, err
+	}
+	header.ReportedTs = timestamp
+
+	return &header, nil
+}
+
+/* Send out rpc events*/
+func (ep *EventProxy) SendRPCEvent(ctx context.Context, id string, rpcEvent *voltha.RPCEvent, category eventif.EventCategory, subCategory *eventif.EventSubCategory, raisedTs int64) error {
+	if rpcEvent == nil {
+		logger.Error(ctx, "Received empty rpc event")
+		return errors.New("rpc event nil")
+	}
+	var event voltha.Event
+	var err error
+	if event.Header, err = ep.getEventHeader(id, category, subCategory, voltha.EventType_RPC_EVENT, raisedTs); err != nil {
+		return err
+	}
+	event.EventType = &voltha.Event_RpcEvent{RpcEvent: rpcEvent}
+	ep.eventQueue.push(&event)
+	return nil
+
+}
+
+/* Send out device events*/
+func (ep *EventProxy) SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64) error {
+	if deviceEvent == nil {
+		logger.Error(ctx, "Recieved empty device event")
+		return errors.New("Device event nil")
+	}
+	var event voltha.Event
+	var de voltha.Event_DeviceEvent
+	var err error
+	de.DeviceEvent = deviceEvent
+	if event.Header, err = ep.getEventHeader(deviceEvent.DeviceEventName, category, &subCategory, voltha.EventType_DEVICE_EVENT, raisedTs); err != nil {
+		return err
+	}
+	event.EventType = &de
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+		return err
+	}
+	logger.Infow(ctx, "Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
+		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
+		"DeviceEventName": deviceEvent.DeviceEventName})
+
+	return nil
+
+}
+
+// SendKpiEvent is to send kpi events to voltha.event topic
+func (ep *EventProxy) SendKpiEvent(ctx context.Context, id string, kpiEvent *voltha.KpiEvent2, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64) error {
+	if kpiEvent == nil {
+		logger.Error(ctx, "Recieved empty kpi event")
+		return errors.New("KPI event nil")
+	}
+	var event voltha.Event
+	var de voltha.Event_KpiEvent2
+	var err error
+	de.KpiEvent2 = kpiEvent
+	if event.Header, err = ep.getEventHeader(id, category, &subCategory, voltha.EventType_KPI_EVENT2, raisedTs); err != nil {
+		return err
+	}
+	event.EventType = &de
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+		return err
+	}
+	logger.Infow(ctx, "Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
+		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
+
+	return nil
+
+}
+
+func (ep *EventProxy) sendEvent(ctx context.Context, event *voltha.Event) error {
+	logger.Debugw(ctx, "Send event to kafka", log.Fields{"event": event})
+	if err := ep.kafkaClient.Send(ctx, event, &ep.eventTopic); err != nil {
+		return err
+	}
+	logger.Debugw(ctx, "Sent event to kafka", log.Fields{"event": event})
+
+	return nil
+}
+
+func (ep *EventProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return ep.kafkaClient.EnableLivenessChannel(ctx, enable)
+}
+
+func (ep *EventProxy) SendLiveness(ctx context.Context) error {
+	return ep.kafkaClient.SendLiveness(ctx)
+}
+
+// Start the event proxy
+func (ep *EventProxy) Start() {
+	eq := ep.eventQueue
+	go eq.start(ep.queueCtx)
+	logger.Debugw(context.Background(), "event-proxy-starting...", log.Fields{"events-threashold": EVENT_THRESHOLD})
+	for {
+		// Notify the queue I am ready
+		eq.readyToSendToKafkaCh <- struct{}{}
+		// Wait for an event
+		elem, ok := <-eq.eventChannel
+		if !ok {
+			logger.Debug(context.Background(), "event-channel-closed-exiting")
+			break
+		}
+		// Check for last event
+		if _, ok := elem.(*lastEvent); ok {
+			// close the queuing loop
+			logger.Info(context.Background(), "received-last-event")
+			ep.queueCancelCtx()
+			break
+		}
+		ctx := context.Background()
+		event, ok := elem.(*voltha.Event)
+		if !ok {
+			logger.Warnw(ctx, "invalid-event", log.Fields{"element": elem})
+			continue
+		}
+		if err := ep.sendEvent(ctx, event); err != nil {
+			logger.Errorw(ctx, "failed-to-send-event-to-kafka-bus", log.Fields{"event": event})
+		} else {
+			logger.Debugw(ctx, "successfully-sent-rpc-event-to-kafka-bus", log.Fields{"id": event.Header.Id, "category": event.Header.Category,
+				"sub-category": event.Header.SubCategory, "type": event.Header.Type, "type-version": event.Header.TypeVersion,
+				"reported-ts": event.Header.ReportedTs, "event-type": event.EventType})
+		}
+	}
+}
+
+func (ep *EventProxy) Stop() {
+	ep.eventQueue.stop()
+}
+
+type EventQueue struct {
+	mutex                sync.RWMutex
+	eventChannel         chan interface{}
+	insertPosition       *ring.Ring
+	popPosition          *ring.Ring
+	dataToSendAvailable  chan struct{}
+	readyToSendToKafkaCh chan struct{}
+	eventQueueStopped    chan struct{}
+}
+
+func newEventQueue() *EventQueue {
+	ev := &EventQueue{
+		eventChannel:         make(chan interface{}),
+		insertPosition:       ring.New(EVENT_THRESHOLD),
+		dataToSendAvailable:  make(chan struct{}),
+		readyToSendToKafkaCh: make(chan struct{}),
+		eventQueueStopped:    make(chan struct{}),
+	}
+	ev.popPosition = ev.insertPosition
+	return ev
+}
+
+// push is invoked to push an event at the back of a queue
+func (eq *EventQueue) push(event interface{}) {
+	eq.mutex.Lock()
+
+	if eq.insertPosition != nil {
+		// Handle Queue is full.
+		// TODO: Current default is to overwrite old data if queue is full. Is there a need to
+		// block caller if max threshold is reached?
+		if eq.insertPosition.Value != nil && eq.insertPosition == eq.popPosition {
+			eq.popPosition = eq.popPosition.Next()
+		}
+
+		// Insert data and move pointer to next empty position
+		eq.insertPosition.Value = event
+		eq.insertPosition = eq.insertPosition.Next()
+
+		// Check for last event
+		if _, ok := event.(*lastEvent); ok {
+			eq.insertPosition = nil
+		}
+		eq.mutex.Unlock()
+		// Notify waiting thread of data availability
+		eq.dataToSendAvailable <- struct{}{}
+
+	} else {
+		logger.Debug(context.Background(), "event-queue-is-closed-as-insert-position-is-cleared")
+		eq.mutex.Unlock()
+	}
+}
+
+// start starts the routine that extracts an element from the event queue and
+// send it to the kafka sending routine to process.
+func (eq *EventQueue) start(ctx context.Context) {
+	logger.Info(ctx, "starting-event-queue")
+loop:
+	for {
+		select {
+		case <-eq.dataToSendAvailable:
+		//	Do nothing - use to prevent caller pushing data to block
+		case <-eq.readyToSendToKafkaCh:
+			{
+				// Kafka sending routine is ready to process an event
+				eq.mutex.Lock()
+				element := eq.popPosition.Value
+				if element == nil {
+					// No events to send. Wait
+					eq.mutex.Unlock()
+					select {
+					case _, ok := <-eq.dataToSendAvailable:
+						if !ok {
+							// channel closed
+							eq.eventQueueStopped <- struct{}{}
+							return
+						}
+					case <-ctx.Done():
+						logger.Info(ctx, "event-queue-context-done")
+						eq.eventQueueStopped <- struct{}{}
+						return
+					}
+					eq.mutex.Lock()
+					element = eq.popPosition.Value
+				}
+				eq.popPosition.Value = nil
+				eq.popPosition = eq.popPosition.Next()
+				eq.mutex.Unlock()
+				eq.eventChannel <- element
+			}
+		case <-ctx.Done():
+			logger.Info(ctx, "event-queue-context-done")
+			eq.eventQueueStopped <- struct{}{}
+			break loop
+		}
+	}
+	logger.Info(ctx, "event-queue-stopped")
+
+}
+
+func (eq *EventQueue) stop() {
+	// Flush all
+	eq.push(&lastEvent{})
+	<-eq.eventQueueStopped
+	eq.mutex.Lock()
+	close(eq.readyToSendToKafkaCh)
+	close(eq.dataToSendAvailable)
+	close(eq.eventChannel)
+	eq.mutex.Unlock()
+
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
new file mode 100644
index 0000000..fe3a017
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package events
+
+import (
+	"fmt"
+	"strconv"
+
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+type ContextType string
+
+const (
+	// ContextAdminState is for the admin state of the Device in the context of the event
+	ContextAdminState ContextType = "admin-state"
+	// ContextConnectState is for the connect state of the Device in the context of the event
+	ContextConnectState ContextType = "connect-state"
+	// ContextOperState is for the operational state of the Device in the context of the event
+	ContextOperState ContextType = "oper-state"
+	// ContextPrevdminState is for the previous admin state of the Device in the context of the event
+	ContextPrevAdminState ContextType = "prev-admin-state"
+	// ContextPrevConnectState is for the previous connect state of the Device in the context of the event
+	ContextPrevConnectState ContextType = "prev-connect-state"
+	// ContextPrevOperState is for the previous operational state of the Device in the context of the event
+	ContextPrevOperState ContextType = "prev-oper-state"
+	// ContextDeviceID is for the previous operational state of the Device in the context of the event
+	ContextDeviceID ContextType = "id"
+	// ContextParentID is for the parent id in the context of the event
+	ContextParentID ContextType = "parent-id"
+	// ContextSerialNumber is for the serial number of the Device in the context of the event
+	ContextSerialNumber ContextType = "serial-number"
+	// ContextIsRoot is for the root flag of Device in the context of the event
+	ContextIsRoot ContextType = "is-root"
+	// ContextParentPort is for the parent interface id of child in the context of the event
+	ContextParentPort ContextType = "parent-port"
+)
+
+type EventName string
+
+const (
+	DeviceStateChangeEvent EventName = "DEVICE_STATE_CHANGE"
+)
+
+type EventAction string
+
+const (
+	Raise EventAction = "RAISE_EVENT"
+	Clear EventAction = "CLEAR_EVENT"
+)
+
+//CreateDeviceStateChangeEvent forms and returns a new DeviceStateChange Event
+func CreateDeviceStateChangeEvent(serialNumber string, deviceID string, parentID string,
+	prevOperStatus voltha.OperStatus_Types, prevConnStatus voltha.ConnectStatus_Types, prevAdminStatus voltha.AdminState_Types,
+	operStatus voltha.OperStatus_Types, connStatus voltha.ConnectStatus_Types, adminStatus voltha.AdminState_Types,
+	parentPort uint32, isRoot bool) *voltha.DeviceEvent {
+
+	context := make(map[string]string)
+	/* Populating event context */
+	context[string(ContextSerialNumber)] = serialNumber
+	context[string(ContextDeviceID)] = deviceID
+	context[string(ContextParentID)] = parentID
+	context[string(ContextPrevOperState)] = prevOperStatus.String()
+	context[string(ContextPrevConnectState)] = prevConnStatus.String()
+	context[string(ContextPrevAdminState)] = prevAdminStatus.String()
+	context[string(ContextOperState)] = operStatus.String()
+	context[string(ContextConnectState)] = connStatus.String()
+	context[string(ContextAdminState)] = adminStatus.String()
+	context[string(ContextIsRoot)] = strconv.FormatBool(isRoot)
+	context[string(ContextParentPort)] = strconv.FormatUint(uint64(parentPort), 10)
+
+	return &voltha.DeviceEvent{
+		Context:         context,
+		ResourceId:      deviceID,
+		DeviceEventName: fmt.Sprintf("%s_%s", string(DeviceStateChangeEvent), string(Raise)),
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
new file mode 100644
index 0000000..beb0574
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
new file mode 100644
index 0000000..ff6aaf0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
@@ -0,0 +1,1608 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"bytes"
+	"context"
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"hash"
+	"sort"
+
+	"github.com/cevaris/ordered_map"
+	"github.com/gogo/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
+)
+
+var (
+	// Instructions shortcut
+	APPLY_ACTIONS  = ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS
+	WRITE_METADATA = ofp.OfpInstructionType_OFPIT_WRITE_METADATA
+	METER_ACTION   = ofp.OfpInstructionType_OFPIT_METER
+
+	//OFPAT_* shortcuts
+	OUTPUT       = ofp.OfpActionType_OFPAT_OUTPUT
+	COPY_TTL_OUT = ofp.OfpActionType_OFPAT_COPY_TTL_OUT
+	COPY_TTL_IN  = ofp.OfpActionType_OFPAT_COPY_TTL_IN
+	SET_MPLS_TTL = ofp.OfpActionType_OFPAT_SET_MPLS_TTL
+	DEC_MPLS_TTL = ofp.OfpActionType_OFPAT_DEC_MPLS_TTL
+	PUSH_VLAN    = ofp.OfpActionType_OFPAT_PUSH_VLAN
+	POP_VLAN     = ofp.OfpActionType_OFPAT_POP_VLAN
+	PUSH_MPLS    = ofp.OfpActionType_OFPAT_PUSH_MPLS
+	POP_MPLS     = ofp.OfpActionType_OFPAT_POP_MPLS
+	SET_QUEUE    = ofp.OfpActionType_OFPAT_SET_QUEUE
+	GROUP        = ofp.OfpActionType_OFPAT_GROUP
+	SET_NW_TTL   = ofp.OfpActionType_OFPAT_SET_NW_TTL
+	NW_TTL       = ofp.OfpActionType_OFPAT_DEC_NW_TTL
+	SET_FIELD    = ofp.OfpActionType_OFPAT_SET_FIELD
+	PUSH_PBB     = ofp.OfpActionType_OFPAT_PUSH_PBB
+	POP_PBB      = ofp.OfpActionType_OFPAT_POP_PBB
+	EXPERIMENTER = ofp.OfpActionType_OFPAT_EXPERIMENTER
+
+	//OFPXMT_OFB_* shortcuts (incomplete)
+	IN_PORT         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+	IN_PHY_PORT     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT
+	METADATA        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_METADATA
+	ETH_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST
+	ETH_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC
+	ETH_TYPE        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE
+	VLAN_VID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID
+	VLAN_PCP        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP
+	IP_DSCP         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP
+	IP_ECN          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN
+	IP_PROTO        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO
+	IPV4_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC
+	IPV4_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST
+	TCP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC
+	TCP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST
+	UDP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC
+	UDP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST
+	SCTP_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC
+	SCTP_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST
+	ICMPV4_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE
+	ICMPV4_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE
+	ARP_OP          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP
+	ARP_SPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA
+	ARP_TPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA
+	ARP_SHA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA
+	ARP_THA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA
+	IPV6_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC
+	IPV6_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST
+	IPV6_FLABEL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL
+	ICMPV6_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE
+	ICMPV6_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE
+	IPV6_ND_TARGET  = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET
+	OFB_IPV6_ND_SLL = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL
+	IPV6_ND_TLL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL
+	MPLS_LABEL      = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL
+	MPLS_TC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC
+	MPLS_BOS        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS
+	PBB_ISID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID
+	TUNNEL_ID       = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID
+	IPV6_EXTHDR     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR
+)
+
+//ofp_action_* shortcuts
+
+func Output(port uint32, maxLen ...ofp.OfpControllerMaxLen) *ofp.OfpAction {
+	maxLength := ofp.OfpControllerMaxLen_OFPCML_MAX
+	if len(maxLen) > 0 {
+		maxLength = maxLen[0]
+	}
+	return &ofp.OfpAction{Type: OUTPUT, Action: &ofp.OfpAction_Output{Output: &ofp.OfpActionOutput{Port: port, MaxLen: uint32(maxLength)}}}
+}
+
+func MplsTtl(ttl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: SET_MPLS_TTL, Action: &ofp.OfpAction_MplsTtl{MplsTtl: &ofp.OfpActionMplsTtl{MplsTtl: ttl}}}
+}
+
+func PushVlan(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: PUSH_VLAN, Action: &ofp.OfpAction_Push{Push: &ofp.OfpActionPush{Ethertype: ethType}}}
+}
+
+func PopVlan() *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_VLAN}
+}
+
+func PopMpls(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_MPLS, Action: &ofp.OfpAction_PopMpls{PopMpls: &ofp.OfpActionPopMpls{Ethertype: ethType}}}
+}
+
+func Group(groupId uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: GROUP, Action: &ofp.OfpAction_Group{Group: &ofp.OfpActionGroup{GroupId: groupId}}}
+}
+
+func NwTtl(nwTtl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: NW_TTL, Action: &ofp.OfpAction_NwTtl{NwTtl: &ofp.OfpActionNwTtl{NwTtl: nwTtl}}}
+}
+
+func SetField(field *ofp.OfpOxmOfbField) *ofp.OfpAction {
+	actionSetField := &ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: &ofp.OfpOxmField_OfbField{OfbField: field}}
+	return &ofp.OfpAction{Type: SET_FIELD, Action: &ofp.OfpAction_SetField{SetField: &ofp.OfpActionSetField{Field: actionSetField}}}
+}
+
+func Experimenter(experimenter uint32, data []byte) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: EXPERIMENTER, Action: &ofp.OfpAction_Experimenter{Experimenter: &ofp.OfpActionExperimenter{Experimenter: experimenter, Data: data}}}
+}
+
+//ofb_field generators (incomplete set)
+
+func InPort(inPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPort}}
+}
+
+func InPhyPort(inPhyPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PHY_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPhyPort}}
+}
+
+func Metadata_ofp(tableMetadata uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: METADATA, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: tableMetadata}}
+}
+
+// should Metadata_ofp used here ?????
+func EthDst(ethDst uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_DST, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethDst}}
+}
+
+// should Metadata_ofp used here ?????
+func EthSrc(ethSrc uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_SRC, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethSrc}}
+}
+
+func EthType(ethType uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_TYPE, Value: &ofp.OfpOxmOfbField_EthType{EthType: ethType}}
+}
+
+func VlanVid(vlanVid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_VID, Value: &ofp.OfpOxmOfbField_VlanVid{VlanVid: vlanVid}}
+}
+
+func VlanPcp(vlanPcp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_PCP, Value: &ofp.OfpOxmOfbField_VlanPcp{VlanPcp: vlanPcp}}
+}
+
+func IpDscp(ipDscp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_DSCP, Value: &ofp.OfpOxmOfbField_IpDscp{IpDscp: ipDscp}}
+}
+
+func IpEcn(ipEcn uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_ECN, Value: &ofp.OfpOxmOfbField_IpEcn{IpEcn: ipEcn}}
+}
+
+func IpProto(ipProto uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_PROTO, Value: &ofp.OfpOxmOfbField_IpProto{IpProto: ipProto}}
+}
+
+func Ipv4Src(ipv4Src uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_SRC, Value: &ofp.OfpOxmOfbField_Ipv4Src{Ipv4Src: ipv4Src}}
+}
+
+func Ipv4Dst(ipv4Dst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_DST, Value: &ofp.OfpOxmOfbField_Ipv4Dst{Ipv4Dst: ipv4Dst}}
+}
+
+func TcpSrc(tcpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_SRC, Value: &ofp.OfpOxmOfbField_TcpSrc{TcpSrc: tcpSrc}}
+}
+
+func TcpDst(tcpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_DST, Value: &ofp.OfpOxmOfbField_TcpDst{TcpDst: tcpDst}}
+}
+
+func UdpSrc(udpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_SRC, Value: &ofp.OfpOxmOfbField_UdpSrc{UdpSrc: udpSrc}}
+}
+
+func UdpDst(udpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_DST, Value: &ofp.OfpOxmOfbField_UdpDst{UdpDst: udpDst}}
+}
+
+func SctpSrc(sctpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_SRC, Value: &ofp.OfpOxmOfbField_SctpSrc{SctpSrc: sctpSrc}}
+}
+
+func SctpDst(sctpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_DST, Value: &ofp.OfpOxmOfbField_SctpDst{SctpDst: sctpDst}}
+}
+
+func Icmpv4Type(icmpv4Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv4Type{Icmpv4Type: icmpv4Type}}
+}
+
+func Icmpv4Code(icmpv4Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_CODE, Value: &ofp.OfpOxmOfbField_Icmpv4Code{Icmpv4Code: icmpv4Code}}
+}
+
+func ArpOp(arpOp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_OP, Value: &ofp.OfpOxmOfbField_ArpOp{ArpOp: arpOp}}
+}
+
+func ArpSpa(arpSpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SPA, Value: &ofp.OfpOxmOfbField_ArpSpa{ArpSpa: arpSpa}}
+}
+
+func ArpTpa(arpTpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_TPA, Value: &ofp.OfpOxmOfbField_ArpTpa{ArpTpa: arpTpa}}
+}
+
+func ArpSha(arpSha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SHA, Value: &ofp.OfpOxmOfbField_ArpSha{ArpSha: arpSha}}
+}
+
+func ArpTha(arpTha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_THA, Value: &ofp.OfpOxmOfbField_ArpTha{ArpTha: arpTha}}
+}
+
+func Ipv6Src(ipv6Src []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_SRC, Value: &ofp.OfpOxmOfbField_Ipv6Src{Ipv6Src: ipv6Src}}
+}
+
+func Ipv6Dst(ipv6Dst []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_DST, Value: &ofp.OfpOxmOfbField_Ipv6Dst{Ipv6Dst: ipv6Dst}}
+}
+
+func Ipv6Flabel(ipv6Flabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_FLABEL, Value: &ofp.OfpOxmOfbField_Ipv6Flabel{Ipv6Flabel: ipv6Flabel}}
+}
+
+func Icmpv6Type(icmpv6Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv6Type{Icmpv6Type: icmpv6Type}}
+}
+
+func Icmpv6Code(icmpv6Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_CODE, Value: &ofp.OfpOxmOfbField_Icmpv6Code{Icmpv6Code: icmpv6Code}}
+}
+
+func Ipv6NdTarget(ipv6NdTarget []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TARGET, Value: &ofp.OfpOxmOfbField_Ipv6NdTarget{Ipv6NdTarget: ipv6NdTarget}}
+}
+
+func OfbIpv6NdSll(ofbIpv6NdSll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: OFB_IPV6_ND_SLL, Value: &ofp.OfpOxmOfbField_Ipv6NdSsl{Ipv6NdSsl: ofbIpv6NdSll}}
+}
+
+func Ipv6NdTll(ipv6NdTll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TLL, Value: &ofp.OfpOxmOfbField_Ipv6NdTll{Ipv6NdTll: ipv6NdTll}}
+}
+
+func MplsLabel(mplsLabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_LABEL, Value: &ofp.OfpOxmOfbField_MplsLabel{MplsLabel: mplsLabel}}
+}
+
+func MplsTc(mplsTc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_TC, Value: &ofp.OfpOxmOfbField_MplsTc{MplsTc: mplsTc}}
+}
+
+func MplsBos(mplsBos uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_BOS, Value: &ofp.OfpOxmOfbField_MplsBos{MplsBos: mplsBos}}
+}
+
+func PbbIsid(pbbIsid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: PBB_ISID, Value: &ofp.OfpOxmOfbField_PbbIsid{PbbIsid: pbbIsid}}
+}
+
+func TunnelId(tunnelId uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TUNNEL_ID, Value: &ofp.OfpOxmOfbField_TunnelId{TunnelId: tunnelId}}
+}
+
+func Ipv6Exthdr(ipv6Exthdr uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_EXTHDR, Value: &ofp.OfpOxmOfbField_Ipv6Exthdr{Ipv6Exthdr: ipv6Exthdr}}
+}
+
+//frequently used extractors
+
+func excludeAction(action *ofp.OfpAction, exclude ...ofp.OfpActionType) bool {
+	for _, actionToExclude := range exclude {
+		if action.Type == actionToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetActions(flow *ofp.OfpFlowStats, exclude ...ofp.OfpActionType) []*ofp.OfpAction {
+	if flow == nil {
+		return nil
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			if len(exclude) == 0 {
+				return instActions.Actions
+			} else {
+				filteredAction := make([]*ofp.OfpAction, 0)
+				for _, action := range instActions.Actions {
+					if !excludeAction(action, exclude...) {
+						filteredAction = append(filteredAction, action)
+					}
+				}
+				return filteredAction
+			}
+		}
+	}
+	return nil
+}
+
+func UpdateOutputPortByActionType(flow *ofp.OfpFlowStats, actionType uint32, toPort uint32) *ofp.OfpFlowStats {
+	if flow == nil {
+		return nil
+	}
+	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	nFlow.Instructions = nil
+	nInsts := make([]*ofp.OfpInstruction, 0)
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == actionType {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			nActions := make([]*ofp.OfpAction, 0)
+			for _, action := range instActions.Actions {
+				if action.GetOutput() != nil {
+					nActions = append(nActions, Output(toPort))
+				} else {
+					nActions = append(nActions, action)
+				}
+			}
+			instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: nActions}}
+			nInsts = append(nInsts, &ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction})
+		} else {
+			nInsts = append(nInsts, instruction)
+		}
+	}
+	nFlow.Instructions = nInsts
+	return nFlow
+}
+
+func excludeOxmOfbField(field *ofp.OfpOxmOfbField, exclude ...ofp.OxmOfbFieldTypes) bool {
+	for _, fieldToExclude := range exclude {
+		if field.Type == fieldToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetOfbFields(flow *ofp.OfpFlowStats, exclude ...ofp.OxmOfbFieldTypes) []*ofp.OfpOxmOfbField {
+	if flow == nil || flow.Match == nil || flow.Match.Type != ofp.OfpMatchType_OFPMT_OXM {
+		return nil
+	}
+	ofbFields := make([]*ofp.OfpOxmOfbField, 0)
+	for _, field := range flow.Match.OxmFields {
+		if field.OxmClass == ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
+			ofbFields = append(ofbFields, field.GetOfbField())
+		}
+	}
+	if len(exclude) == 0 {
+		return ofbFields
+	} else {
+		filteredFields := make([]*ofp.OfpOxmOfbField, 0)
+		for _, ofbField := range ofbFields {
+			if !excludeOxmOfbField(ofbField, exclude...) {
+				filteredFields = append(filteredFields, ofbField)
+			}
+		}
+		return filteredFields
+	}
+}
+
+func GetPacketOutPort(packet *ofp.OfpPacketOut) uint32 {
+	if packet == nil {
+		return 0
+	}
+	for _, action := range packet.GetActions() {
+		if action.Type == OUTPUT {
+			return action.GetOutput().Port
+		}
+	}
+	return 0
+}
+
+func GetOutPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == OUTPUT {
+			out := action.GetOutput()
+			if out == nil {
+				return 0
+			}
+			return out.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetInPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == IN_PORT {
+			return field.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetGotoTableId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE) {
+			gotoTable := instruction.GetGotoTable()
+			if gotoTable == nil {
+				return 0
+			}
+			return gotoTable.GetTableId()
+		}
+	}
+	return 0
+}
+
+func GetMeterId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_METER) {
+			MeterInstruction := instruction.GetMeter()
+			if MeterInstruction == nil {
+				return 0
+			}
+			return MeterInstruction.GetMeterId()
+		}
+	}
+	return 0
+}
+
+func GetVlanVid(flow *ofp.OfpFlowStats) *uint32 {
+	if flow == nil {
+		return nil
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == VLAN_VID {
+			ret := field.GetVlanVid()
+			return &ret
+		}
+	}
+	// Dont return 0 if the field is missing as vlan id value 0 has meaning and cannot be overloaded as "not found"
+	return nil
+}
+
+func GetSetActionField(ctx context.Context, flow *ofp.OfpFlowStats, ofbType ofp.OxmOfbFieldTypes) (uint32, bool) {
+	if flow == nil {
+		return 0, false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(APPLY_ACTIONS) {
+			actions := instruction.GetActions()
+			for _, action := range actions.GetActions() {
+				if action.Type == SET_FIELD {
+					setField := action.GetSetField()
+					if setField.Field.GetOfbField().Type == ofbType {
+						switch ofbType {
+						case VLAN_PCP:
+							return setField.Field.GetOfbField().GetVlanPcp(), true
+						case VLAN_VID:
+							return setField.Field.GetOfbField().GetVlanVid(), true
+						default:
+							logger.Errorw(ctx, "unsupported-ofb-field-type", log.Fields{"ofbType": ofbType})
+							return 0, false
+						}
+					}
+				}
+			}
+			return 0, false
+		}
+	}
+	return 0, false
+}
+
+func GetTunnelId(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == TUNNEL_ID {
+			return field.GetTunnelId()
+		}
+	}
+	return 0
+}
+
+//GetMetaData - legacy get method (only want lower 32 bits)
+func GetMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
+		}
+	}
+	logger.Debug(ctx, "No-metadata-present")
+	return 0
+}
+
+func GetMetaData64Bit(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return field.GetTableMetadata()
+		}
+	}
+	logger.Debug(ctx, "No-metadata-present")
+	return 0
+}
+
+// function returns write metadata value from write_metadata action field
+func GetMetadataFromWriteMetadataAction(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(WRITE_METADATA) {
+				if writeMetadata := instruction.GetWriteMetadata(); writeMetadata != nil {
+					return writeMetadata.GetMetadata()
+				}
+			}
+		}
+	}
+	logger.Debugw(ctx, "No-write-metadata-present", log.Fields{"flow": flow})
+	return 0
+}
+
+func GetTechProfileIDFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
+	/*
+	   Write metadata instruction value (metadata) is 8 bytes:
+	   MS 2 bytes: C Tag
+	   Next 2 bytes: Technology Profile Id
+	   Next 4 bytes: Port number (uni or nni)
+
+	   This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var tpId uint16 = 0
+	logger.Debugw(ctx, "Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	if metadata != 0 {
+		tpId = uint16((metadata >> 32) & 0xFFFF)
+		logger.Debugw(ctx, "Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+	}
+	return tpId
+}
+
+func GetEgressPortNumberFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var uniPort uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	if md != 0 {
+		uniPort = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+	}
+	return uniPort
+
+}
+
+func GetInnerTagFromMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint16 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var innerTag uint16 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	if md != 0 {
+		innerTag = uint16((md >> 48) & 0xFFFF)
+		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+	}
+	return innerTag
+}
+
+//GetInnerTagFromMetaData retrieves the inner tag from the Metadata_ofp. The port number (UNI on ONU) is in the
+// lower 32-bits of Metadata_ofp and the inner_tag is in the upper 32-bits. This is set in the ONOS OltPipeline as
+//// a Metadata_ofp field
+/*func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint64 {
+	md := GetMetaData64Bit(flow)
+	if md == 0 {
+		return 0
+	}
+	if md <= 0xffffffff {
+		logger.Debugw(ctx, "onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		return md
+	}
+	return (md >> 32) & 0xffffffff
+}*/
+
+// Extract the child device port from a flow that contains the parent device peer port.  Typically the UNI port of an
+// ONU child device.  Per TST agreement this will be the lower 32 bits of tunnel id reserving upper 32 bits for later
+// use
+func GetChildPortFromTunnelId(flow *ofp.OfpFlowStats) uint32 {
+	tid := GetTunnelId(flow)
+	if tid == 0 {
+		return 0
+	}
+	// Per TST agreement we are keeping any child port id (uni port id) in the lower 32 bits
+	return uint32(tid & 0xffffffff)
+}
+
+func HasNextTable(flow *ofp.OfpFlowStats) bool {
+	if flow == nil {
+		return false
+	}
+	return GetGotoTableId(flow) != 0
+}
+
+func GetGroup(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == GROUP {
+			grp := action.GetGroup()
+			if grp == nil {
+				return 0
+			}
+			return grp.GetGroupId()
+		}
+	}
+	return 0
+}
+
+func HasGroup(flow *ofp.OfpFlowStats) bool {
+	return GetGroup(flow) != 0
+}
+
+// GetNextTableId returns the next table ID if the "table_id" is present in the map, otherwise return nil
+func GetNextTableId(kw OfpFlowModArgs) *uint32 {
+	if val, exist := kw["table_id"]; exist {
+		ret := uint32(val)
+		return &ret
+	}
+	return nil
+}
+
+// GetMeterIdFlowModArgs returns the meterId if the "meter_id" is present in the map, otherwise return 0
+func GetMeterIdFlowModArgs(kw OfpFlowModArgs) uint32 {
+	if val, exist := kw["meter_id"]; exist {
+		return uint32(val)
+	}
+	return 0
+}
+
+// Function returns the metadata if the "write_metadata" is present in the map, otherwise return nil
+func GetMetadataFlowModArgs(kw OfpFlowModArgs) uint64 {
+	if val, exist := kw["write_metadata"]; exist {
+		ret := uint64(val)
+		return ret
+	}
+	return 0
+}
+
+// HashFlowStats returns a unique 64-bit integer hash of 'table_id', 'priority', and 'match'
+// The OF spec states that:
+// A flow table entry is identified by its match fields and priority: the match fields
+// and priority taken together identify a unique flow entry in the flow table.
+func HashFlowStats(flow *ofp.OfpFlowStats) (uint64, error) {
+	// first we need to make sure the oxm fields are in a predictable order (the specific order doesn't matter)
+	sort.Slice(flow.Match.OxmFields, func(a, b int) bool {
+		fieldsA, fieldsB := flow.Match.OxmFields[a], flow.Match.OxmFields[b]
+		if fieldsA.OxmClass < fieldsB.OxmClass {
+			return true
+		}
+		switch fieldA := fieldsA.Field.(type) {
+		case *ofp.OfpOxmField_OfbField:
+			switch fieldB := fieldsB.Field.(type) {
+			case *ofp.OfpOxmField_ExperimenterField:
+				return true // ofp < experimenter
+			case *ofp.OfpOxmField_OfbField:
+				return fieldA.OfbField.Type < fieldB.OfbField.Type
+			}
+		case *ofp.OfpOxmField_ExperimenterField:
+			switch fieldB := fieldsB.Field.(type) {
+			case *ofp.OfpOxmField_OfbField:
+				return false // ofp < experimenter
+			case *ofp.OfpOxmField_ExperimenterField:
+				eFieldA, eFieldB := fieldA.ExperimenterField, fieldB.ExperimenterField
+				if eFieldA.Experimenter != eFieldB.Experimenter {
+					return eFieldA.Experimenter < eFieldB.Experimenter
+				}
+				return eFieldA.OxmHeader < eFieldB.OxmHeader
+			}
+		}
+		return false
+	})
+
+	md5Hash := md5.New() // note that write errors will never occur with md5 hashing
+	var tmp [12]byte
+
+	binary.BigEndian.PutUint32(tmp[0:4], flow.TableId)             // tableId
+	binary.BigEndian.PutUint32(tmp[4:8], flow.Priority)            // priority
+	binary.BigEndian.PutUint32(tmp[8:12], uint32(flow.Match.Type)) // match type
+	_, _ = md5Hash.Write(tmp[:12])
+
+	for _, field := range flow.Match.OxmFields { // for all match fields
+		binary.BigEndian.PutUint32(tmp[:4], uint32(field.OxmClass)) // match class
+		_, _ = md5Hash.Write(tmp[:4])
+
+		switch oxmField := field.Field.(type) {
+		case *ofp.OfpOxmField_ExperimenterField:
+			binary.BigEndian.PutUint32(tmp[0:4], oxmField.ExperimenterField.Experimenter)
+			binary.BigEndian.PutUint32(tmp[4:8], oxmField.ExperimenterField.OxmHeader)
+			_, _ = md5Hash.Write(tmp[:8])
+
+		case *ofp.OfpOxmField_OfbField:
+			if err := hashWriteOfbField(md5Hash, oxmField.OfbField); err != nil {
+				return 0, err
+			}
+
+		default:
+			return 0, fmt.Errorf("unknown OfpOxmField type: %T", field.Field)
+		}
+	}
+
+	ret := md5Hash.Sum(nil)
+	return binary.BigEndian.Uint64(ret[0:8]), nil
+}
+
+func hashWriteOfbField(md5Hash hash.Hash, field *ofp.OfpOxmOfbField) error {
+	var tmp [8]byte
+	binary.BigEndian.PutUint32(tmp[:4], uint32(field.Type)) // type
+	_, _ = md5Hash.Write(tmp[:4])
+
+	// value
+	valType, val32, val64, valSlice := uint8(0), uint32(0), uint64(0), []byte(nil)
+	switch val := field.Value.(type) {
+	case *ofp.OfpOxmOfbField_Port:
+		valType, val32 = 4, val.Port
+	case *ofp.OfpOxmOfbField_PhysicalPort:
+		valType, val32 = 4, val.PhysicalPort
+	case *ofp.OfpOxmOfbField_TableMetadata:
+		valType, val64 = 8, val.TableMetadata
+	case *ofp.OfpOxmOfbField_EthDst:
+		valType, valSlice = 1, val.EthDst
+	case *ofp.OfpOxmOfbField_EthSrc:
+		valType, valSlice = 1, val.EthSrc
+	case *ofp.OfpOxmOfbField_EthType:
+		valType, val32 = 4, val.EthType
+	case *ofp.OfpOxmOfbField_VlanVid:
+		valType, val32 = 4, val.VlanVid
+	case *ofp.OfpOxmOfbField_VlanPcp:
+		valType, val32 = 4, val.VlanPcp
+	case *ofp.OfpOxmOfbField_IpDscp:
+		valType, val32 = 4, val.IpDscp
+	case *ofp.OfpOxmOfbField_IpEcn:
+		valType, val32 = 4, val.IpEcn
+	case *ofp.OfpOxmOfbField_IpProto:
+		valType, val32 = 4, val.IpProto
+	case *ofp.OfpOxmOfbField_Ipv4Src:
+		valType, val32 = 4, val.Ipv4Src
+	case *ofp.OfpOxmOfbField_Ipv4Dst:
+		valType, val32 = 4, val.Ipv4Dst
+	case *ofp.OfpOxmOfbField_TcpSrc:
+		valType, val32 = 4, val.TcpSrc
+	case *ofp.OfpOxmOfbField_TcpDst:
+		valType, val32 = 4, val.TcpDst
+	case *ofp.OfpOxmOfbField_UdpSrc:
+		valType, val32 = 4, val.UdpSrc
+	case *ofp.OfpOxmOfbField_UdpDst:
+		valType, val32 = 4, val.UdpDst
+	case *ofp.OfpOxmOfbField_SctpSrc:
+		valType, val32 = 4, val.SctpSrc
+	case *ofp.OfpOxmOfbField_SctpDst:
+		valType, val32 = 4, val.SctpDst
+	case *ofp.OfpOxmOfbField_Icmpv4Type:
+		valType, val32 = 4, val.Icmpv4Type
+	case *ofp.OfpOxmOfbField_Icmpv4Code:
+		valType, val32 = 4, val.Icmpv4Code
+	case *ofp.OfpOxmOfbField_ArpOp:
+		valType, val32 = 4, val.ArpOp
+	case *ofp.OfpOxmOfbField_ArpSpa:
+		valType, val32 = 4, val.ArpSpa
+	case *ofp.OfpOxmOfbField_ArpTpa:
+		valType, val32 = 4, val.ArpTpa
+	case *ofp.OfpOxmOfbField_ArpSha:
+		valType, valSlice = 1, val.ArpSha
+	case *ofp.OfpOxmOfbField_ArpTha:
+		valType, valSlice = 1, val.ArpTha
+	case *ofp.OfpOxmOfbField_Ipv6Src:
+		valType, valSlice = 1, val.Ipv6Src
+	case *ofp.OfpOxmOfbField_Ipv6Dst:
+		valType, valSlice = 1, val.Ipv6Dst
+	case *ofp.OfpOxmOfbField_Ipv6Flabel:
+		valType, val32 = 4, val.Ipv6Flabel
+	case *ofp.OfpOxmOfbField_Icmpv6Type:
+		valType, val32 = 4, val.Icmpv6Type
+	case *ofp.OfpOxmOfbField_Icmpv6Code:
+		valType, val32 = 4, val.Icmpv6Code
+	case *ofp.OfpOxmOfbField_Ipv6NdTarget:
+		valType, valSlice = 1, val.Ipv6NdTarget
+	case *ofp.OfpOxmOfbField_Ipv6NdSsl:
+		valType, valSlice = 1, val.Ipv6NdSsl
+	case *ofp.OfpOxmOfbField_Ipv6NdTll:
+		valType, valSlice = 1, val.Ipv6NdTll
+	case *ofp.OfpOxmOfbField_MplsLabel:
+		valType, val32 = 4, val.MplsLabel
+	case *ofp.OfpOxmOfbField_MplsTc:
+		valType, val32 = 4, val.MplsTc
+	case *ofp.OfpOxmOfbField_MplsBos:
+		valType, val32 = 4, val.MplsBos
+	case *ofp.OfpOxmOfbField_PbbIsid:
+		valType, val32 = 4, val.PbbIsid
+	case *ofp.OfpOxmOfbField_TunnelId:
+		valType, val64 = 8, val.TunnelId
+	case *ofp.OfpOxmOfbField_Ipv6Exthdr:
+		valType, val32 = 4, val.Ipv6Exthdr
+	default:
+		return fmt.Errorf("unknown OfpOxmField value type: %T", val)
+	}
+	switch valType {
+	case 1: // slice
+		_, _ = md5Hash.Write(valSlice)
+	case 4: // uint32
+		binary.BigEndian.PutUint32(tmp[:4], val32)
+		_, _ = md5Hash.Write(tmp[:4])
+	case 8: // uint64
+		binary.BigEndian.PutUint64(tmp[:8], val64)
+		_, _ = md5Hash.Write(tmp[:8])
+	}
+
+	// mask
+	if !field.HasMask {
+		tmp[0] = 0x00
+		_, _ = md5Hash.Write(tmp[:1]) // match hasMask = false
+	} else {
+		tmp[0] = 0x01
+		_, _ = md5Hash.Write(tmp[:1]) // match hasMask = true
+
+		maskType, mask32, mask64, maskSlice := uint8(0), uint32(0), uint64(0), []byte(nil)
+		switch mask := field.Mask.(type) {
+		case *ofp.OfpOxmOfbField_TableMetadataMask:
+			maskType, mask64 = 8, mask.TableMetadataMask
+		case *ofp.OfpOxmOfbField_EthDstMask:
+			maskType, maskSlice = 1, mask.EthDstMask
+		case *ofp.OfpOxmOfbField_EthSrcMask:
+			maskType, maskSlice = 1, mask.EthSrcMask
+		case *ofp.OfpOxmOfbField_VlanVidMask:
+			maskType, mask32 = 4, mask.VlanVidMask
+		case *ofp.OfpOxmOfbField_Ipv4SrcMask:
+			maskType, mask32 = 4, mask.Ipv4SrcMask
+		case *ofp.OfpOxmOfbField_Ipv4DstMask:
+			maskType, mask32 = 4, mask.Ipv4DstMask
+		case *ofp.OfpOxmOfbField_ArpSpaMask:
+			maskType, mask32 = 4, mask.ArpSpaMask
+		case *ofp.OfpOxmOfbField_ArpTpaMask:
+			maskType, mask32 = 4, mask.ArpTpaMask
+		case *ofp.OfpOxmOfbField_Ipv6SrcMask:
+			maskType, maskSlice = 1, mask.Ipv6SrcMask
+		case *ofp.OfpOxmOfbField_Ipv6DstMask:
+			maskType, maskSlice = 1, mask.Ipv6DstMask
+		case *ofp.OfpOxmOfbField_Ipv6FlabelMask:
+			maskType, mask32 = 4, mask.Ipv6FlabelMask
+		case *ofp.OfpOxmOfbField_PbbIsidMask:
+			maskType, mask32 = 4, mask.PbbIsidMask
+		case *ofp.OfpOxmOfbField_TunnelIdMask:
+			maskType, mask64 = 8, mask.TunnelIdMask
+		case *ofp.OfpOxmOfbField_Ipv6ExthdrMask:
+			maskType, mask32 = 4, mask.Ipv6ExthdrMask
+		case nil:
+			return fmt.Errorf("hasMask set to true, but no mask present")
+		default:
+			return fmt.Errorf("unknown OfpOxmField mask type: %T", mask)
+		}
+		switch maskType {
+		case 1: // slice
+			_, _ = md5Hash.Write(maskSlice)
+		case 4: // uint32
+			binary.BigEndian.PutUint32(tmp[:4], mask32)
+			_, _ = md5Hash.Write(tmp[:4])
+		case 8: // uint64
+			binary.BigEndian.PutUint64(tmp[:8], mask64)
+			_, _ = md5Hash.Write(tmp[:8])
+		}
+	}
+	return nil
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func FlowStatsEntryFromFlowModMessage(mod *ofp.OfpFlowMod) (*ofp.OfpFlowStats, error) {
+	flow := &ofp.OfpFlowStats{}
+	if mod == nil {
+		return flow, nil
+	}
+	flow.TableId = mod.TableId
+	flow.Priority = mod.Priority
+	flow.IdleTimeout = mod.IdleTimeout
+	flow.HardTimeout = mod.HardTimeout
+	flow.Flags = mod.Flags
+	flow.Cookie = mod.Cookie
+	flow.Match = mod.Match
+	flow.Instructions = mod.Instructions
+	var err error
+	if flow.Id, err = HashFlowStats(flow); err != nil {
+		return nil, err
+	}
+
+	return flow, nil
+}
+
+func GroupEntryFromGroupMod(mod *ofp.OfpGroupMod) *ofp.OfpGroupEntry {
+	group := &ofp.OfpGroupEntry{}
+	if mod == nil {
+		return group
+	}
+	group.Desc = &ofp.OfpGroupDesc{Type: mod.Type, GroupId: mod.GroupId, Buckets: mod.Buckets}
+	group.Stats = &ofp.OfpGroupStats{GroupId: mod.GroupId}
+	//TODO do we need to instantiate bucket bins?
+	return group
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func MeterEntryFromMeterMod(ctx context.Context, meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+	bandStats := make([]*ofp.OfpMeterBandStats, 0)
+	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
+		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
+	if meterMod == nil {
+		logger.Error(ctx, "Invalid meter mod command")
+		return meter
+	}
+	// config init
+	meter.Config.MeterId = meterMod.MeterId
+	meter.Config.Flags = meterMod.Flags
+	meter.Config.Bands = meterMod.Bands
+	// meter stats init
+	meter.Stats.MeterId = meterMod.MeterId
+	meter.Stats.FlowCount = 0
+	meter.Stats.PacketInCount = 0
+	meter.Stats.ByteInCount = 0
+	meter.Stats.DurationSec = 0
+	meter.Stats.DurationNsec = 0
+	// band stats init
+	for range meterMod.Bands {
+		band := &ofp.OfpMeterBandStats{}
+		band.PacketBandCount = 0
+		band.ByteBandCount = 0
+		bandStats = append(bandStats, band)
+	}
+	meter.Stats.BandStats = bandStats
+	logger.Debugw(ctx, "Allocated meter entry", log.Fields{"meter": *meter})
+	return meter
+
+}
+
+func GetMeterIdFromFlow(flow *ofp.OfpFlowStats) uint32 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					return meterInst.GetMeterId()
+				}
+			}
+		}
+	}
+
+	return uint32(0)
+}
+
+func MkOxmFields(matchFields []ofp.OfpOxmField) []*ofp.OfpOxmField {
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	return oxmFields
+}
+
+func MkInstructionsFromActions(actions []*ofp.OfpAction) []*ofp.OfpInstruction {
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+	return instructions
+}
+
+// Convenience function to generare ofp_flow_mod message with OXM BASIC match composed from the match_fields, and
+// single APPLY_ACTIONS instruction with a list if ofp_action objects.
+func MkSimpleFlowMod(matchFields []*ofp.OfpOxmField, actions []*ofp.OfpAction, command *ofp.OfpFlowModCommand, kw OfpFlowModArgs) *ofp.OfpFlowMod {
+
+	// Process actions instructions
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+
+	// Process next table
+	if tableId := GetNextTableId(kw); tableId != nil {
+		var instGotoTable ofp.OfpInstruction_GotoTable
+		instGotoTable.GotoTable = &ofp.OfpInstructionGotoTable{TableId: *tableId}
+		inst := ofp.OfpInstruction{Type: uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE), Data: &instGotoTable}
+		instructions = append(instructions, &inst)
+	}
+	// Process meter action
+	if meterId := GetMeterIdFlowModArgs(kw); meterId != 0 {
+		var instMeter ofp.OfpInstruction_Meter
+		instMeter.Meter = &ofp.OfpInstructionMeter{MeterId: meterId}
+		inst := ofp.OfpInstruction{Type: uint32(METER_ACTION), Data: &instMeter}
+		instructions = append(instructions, &inst)
+	}
+	//process write_metadata action
+	if metadata := GetMetadataFlowModArgs(kw); metadata != 0 {
+		var instWriteMetadata ofp.OfpInstruction_WriteMetadata
+		instWriteMetadata.WriteMetadata = &ofp.OfpInstructionWriteMetadata{Metadata: metadata}
+		inst := ofp.OfpInstruction{Type: uint32(WRITE_METADATA), Data: &instWriteMetadata}
+		instructions = append(instructions, &inst)
+	}
+
+	// Process match fields
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	var match ofp.OfpMatch
+	match.Type = ofp.OfpMatchType_OFPMT_OXM
+	match.OxmFields = oxmFields
+
+	// Create ofp_flow_message
+	msg := &ofp.OfpFlowMod{}
+	if command == nil {
+		msg.Command = ofp.OfpFlowModCommand_OFPFC_ADD
+	} else {
+		msg.Command = *command
+	}
+	msg.Instructions = instructions
+	msg.Match = &match
+
+	// Set the variadic argument values
+	msg = setVariadicModAttributes(msg, kw)
+
+	return msg
+}
+
+func MkMulticastGroupMod(groupId uint32, buckets []*ofp.OfpBucket, command *ofp.OfpGroupModCommand) *ofp.OfpGroupMod {
+	group := &ofp.OfpGroupMod{}
+	if command == nil {
+		group.Command = ofp.OfpGroupModCommand_OFPGC_ADD
+	} else {
+		group.Command = *command
+	}
+	group.Type = ofp.OfpGroupType_OFPGT_ALL
+	group.GroupId = groupId
+	group.Buckets = buckets
+	return group
+}
+
+//SetVariadicModAttributes sets only uint64 or uint32 fields of the ofp_flow_mod message
+func setVariadicModAttributes(mod *ofp.OfpFlowMod, args OfpFlowModArgs) *ofp.OfpFlowMod {
+	if args == nil {
+		return mod
+	}
+	for key, val := range args {
+		switch key {
+		case "cookie":
+			mod.Cookie = val
+		case "cookie_mask":
+			mod.CookieMask = val
+		case "table_id":
+			mod.TableId = uint32(val)
+		case "idle_timeout":
+			mod.IdleTimeout = uint32(val)
+		case "hard_timeout":
+			mod.HardTimeout = uint32(val)
+		case "priority":
+			mod.Priority = uint32(val)
+		case "buffer_id":
+			mod.BufferId = uint32(val)
+		case "out_port":
+			mod.OutPort = uint32(val)
+		case "out_group":
+			mod.OutGroup = uint32(val)
+		case "flags":
+			mod.Flags = uint32(val)
+		}
+	}
+	return mod
+}
+
+func MkPacketIn(port uint32, packet []byte) *ofp.OfpPacketIn {
+	packetIn := &ofp.OfpPacketIn{
+		Reason: ofp.OfpPacketInReason_OFPR_ACTION,
+		Match: &ofp.OfpMatch{
+			Type: ofp.OfpMatchType_OFPMT_OXM,
+			OxmFields: []*ofp.OfpOxmField{
+				{
+					OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC,
+					Field: &ofp.OfpOxmField_OfbField{
+						OfbField: InPort(port)},
+				},
+			},
+		},
+		Data: packet,
+	}
+	return packetIn
+}
+
+// MkFlowStat is a helper method to build flows
+func MkFlowStat(fa *FlowArgs) (*ofp.OfpFlowStats, error) {
+	//Build the match-fields
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range fa.MatchFields {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return FlowStatsEntryFromFlowModMessage(MkSimpleFlowMod(matchFields, fa.Actions, fa.Command, fa.KV))
+}
+
+func MkGroupStat(ga *GroupArgs) *ofp.OfpGroupEntry {
+	return GroupEntryFromGroupMod(MkMulticastGroupMod(ga.GroupId, ga.Buckets, ga.Command))
+}
+
+type OfpFlowModArgs map[string]uint64
+
+type FlowArgs struct {
+	MatchFields []*ofp.OfpOxmOfbField
+	Actions     []*ofp.OfpAction
+	Command     *ofp.OfpFlowModCommand
+	Priority    uint32
+	KV          OfpFlowModArgs
+}
+
+type GroupArgs struct {
+	GroupId uint32
+	Buckets []*ofp.OfpBucket
+	Command *ofp.OfpGroupModCommand
+}
+
+type FlowsAndGroups struct {
+	Flows  *ordered_map.OrderedMap
+	Groups *ordered_map.OrderedMap
+}
+
+func NewFlowsAndGroups() *FlowsAndGroups {
+	var fg FlowsAndGroups
+	fg.Flows = ordered_map.NewOrderedMap()
+	fg.Groups = ordered_map.NewOrderedMap()
+	return &fg
+}
+
+func (fg *FlowsAndGroups) Copy() *FlowsAndGroups {
+	copyFG := NewFlowsAndGroups()
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			copyFG.Flows.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			copyFG.Groups.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	return copyFG
+}
+
+func (fg *FlowsAndGroups) GetFlow(index int) *ofp.OfpFlowStats {
+	iter := fg.Flows.IterFunc()
+	pos := 0
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if pos == index {
+			if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+				return protoMsg
+			}
+			return nil
+		}
+		pos += 1
+	}
+	return nil
+}
+
+func (fg *FlowsAndGroups) ListFlows() []*ofp.OfpFlowStats {
+	flows := make([]*ofp.OfpFlowStats, 0)
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			flows = append(flows, protoMsg)
+		}
+	}
+	return flows
+}
+
+func (fg *FlowsAndGroups) ListGroups() []*ofp.OfpGroupEntry {
+	groups := make([]*ofp.OfpGroupEntry, 0)
+	iter := fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			groups = append(groups, protoMsg)
+		}
+	}
+	return groups
+}
+
+func (fg *FlowsAndGroups) String() string {
+	var buffer bytes.Buffer
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			buffer.WriteString("\nFlow:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			buffer.WriteString("\nGroup:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	return buffer.String()
+}
+
+func (fg *FlowsAndGroups) AddFlow(flow *ofp.OfpFlowStats) {
+	if flow == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add flow only if absent
+	if _, exist := fg.Flows.Get(flow.Id); !exist {
+		fg.Flows.Set(flow.Id, flow)
+	}
+}
+
+func (fg *FlowsAndGroups) AddGroup(group *ofp.OfpGroupEntry) {
+	if group == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add group only if absent
+	if _, exist := fg.Groups.Get(group.Desc.GroupId); !exist {
+		fg.Groups.Set(group.Desc.GroupId, group)
+	}
+}
+
+//AddFrom add flows and groups from the argument into this structure only if they do not already exist
+func (fg *FlowsAndGroups) AddFrom(from *FlowsAndGroups) {
+	iter := from.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			if _, exist := fg.Flows.Get(protoMsg.Id); !exist {
+				fg.Flows.Set(protoMsg.Id, protoMsg)
+			}
+		}
+	}
+	iter = from.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			if _, exist := fg.Groups.Get(protoMsg.Stats.GroupId); !exist {
+				fg.Groups.Set(protoMsg.Stats.GroupId, protoMsg)
+			}
+		}
+	}
+}
+
+type DeviceRules struct {
+	Rules map[string]*FlowsAndGroups
+}
+
+func NewDeviceRules() *DeviceRules {
+	var dr DeviceRules
+	dr.Rules = make(map[string]*FlowsAndGroups)
+	return &dr
+}
+
+func (dr *DeviceRules) Copy() *DeviceRules {
+	copyDR := NewDeviceRules()
+	if dr != nil {
+		for key, val := range dr.Rules {
+			if val != nil {
+				copyDR.Rules[key] = val.Copy()
+			}
+		}
+	}
+	return copyDR
+}
+
+func (dr *DeviceRules) ClearFlows(deviceId string) {
+	if _, exist := dr.Rules[deviceId]; exist {
+		dr.Rules[deviceId].Flows = ordered_map.NewOrderedMap()
+	}
+}
+
+func (dr *DeviceRules) FilterRules(deviceIds map[string]string) *DeviceRules {
+	filteredDR := NewDeviceRules()
+	for key, val := range dr.Rules {
+		if _, exist := deviceIds[key]; exist {
+			filteredDR.Rules[key] = val.Copy()
+		}
+	}
+	return filteredDR
+}
+
+func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
+	if _, exist := dr.Rules[deviceId]; !exist {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId].AddFlow(flow)
+}
+
+func (dr *DeviceRules) GetRules() map[string]*FlowsAndGroups {
+	return dr.Rules
+}
+
+func (dr *DeviceRules) String() string {
+	var buffer bytes.Buffer
+	for key, value := range dr.Rules {
+		buffer.WriteString("DeviceId:")
+		buffer.WriteString(key)
+		buffer.WriteString(value.String())
+		buffer.WriteString("\n\n")
+	}
+	return buffer.String()
+}
+
+func (dr *DeviceRules) AddFlowsAndGroup(deviceId string, fg *FlowsAndGroups) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId] = fg
+}
+
+// CreateEntryIfNotExist creates a new deviceId in the Map if it does not exist and assigns an
+// empty FlowsAndGroups to it.  Otherwise, it does nothing.
+func (dr *DeviceRules) CreateEntryIfNotExist(deviceId string) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+}
+
+/*
+ *  Common flow routines
+ */
+
+//FindOverlappingFlows return a list of overlapping flow(s) where mod is the flow request
+func FindOverlappingFlows(flows []*ofp.OfpFlowStats, mod *ofp.OfpFlowMod) []*ofp.OfpFlowStats {
+	return nil //TODO - complete implementation
+}
+
+// FindFlowById returns the index of the flow in the flows array if present. Otherwise, it returns -1
+func FindFlowById(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if flow.Id == f.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+// FindFlows returns the index in flows where flow if present.  Otherwise, it returns -1
+func FindFlows(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if f.Id == flow.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+//FlowMatch returns true if two flows matches on the following flow attributes:
+//TableId, Priority, Flags, Cookie, Match
+func FlowMatch(f1 *ofp.OfpFlowStats, f2 *ofp.OfpFlowStats) bool {
+	return f1 != nil && f2 != nil && f1.Id == f2.Id
+}
+
+//FlowMatchesMod returns True if given flow is "covered" by the wildcard flow_mod, taking into consideration of
+//both exact matches as well as masks-based match fields if any. Otherwise return False
+func FlowMatchesMod(flow *ofp.OfpFlowStats, mod *ofp.OfpFlowMod) bool {
+	if flow == nil || mod == nil {
+		return false
+	}
+	//Check if flow.cookie is covered by mod.cookie and mod.cookie_mask
+	if (flow.Cookie & mod.CookieMask) != (mod.Cookie & mod.CookieMask) {
+		return false
+	}
+
+	//Check if flow.table_id is covered by flow_mod.table_id
+	if mod.TableId != uint32(ofp.OfpTable_OFPTT_ALL) && flow.TableId != mod.TableId {
+		return false
+	}
+
+	//Check out_port
+	if (mod.OutPort&0x7fffffff) != uint32(ofp.OfpPortNo_OFPP_ANY) && !FlowHasOutPort(flow, mod.OutPort) {
+		return false
+	}
+
+	//	Check out_group
+	if (mod.OutGroup&0x7fffffff) != uint32(ofp.OfpGroup_OFPG_ANY) && !FlowHasOutGroup(flow, mod.OutGroup) {
+		return false
+	}
+
+	//Priority is ignored
+
+	//Check match condition
+	//If the flow_mod match field is empty, that is a special case and indicates the flow entry matches
+	if (mod.Match == nil) || (mod.Match.OxmFields == nil) || (len(mod.Match.OxmFields) == 0) {
+		//If we got this far and the match is empty in the flow spec, than the flow matches
+		return true
+	} // TODO : implement the flow match analysis
+	return false
+
+}
+
+//FlowHasOutPort returns True if flow has a output command with the given out_port
+func FlowHasOutPort(flow *ofp.OfpFlowStats, outPort uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_OUTPUT {
+					if (action.GetOutput() != nil) && (action.GetOutput().Port == outPort) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FlowHasOutGroup return True if flow has a output command with the given out_group
+func FlowHasOutGroup(flow *ofp.OfpFlowStats, groupID uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_GROUP {
+					if (action.GetGroup() != nil) && (action.GetGroup().GroupId == groupID) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FindGroup returns index of group if found, else returns -1
+func FindGroup(groups []*ofp.OfpGroupEntry, groupId uint32) int {
+	for idx, group := range groups {
+		if group.Desc.GroupId == groupId {
+			return idx
+		}
+	}
+	return -1
+}
+
+func FlowsDeleteByGroupId(flows []*ofp.OfpFlowStats, groupId uint32) (bool, []*ofp.OfpFlowStats) {
+	toKeep := make([]*ofp.OfpFlowStats, 0)
+
+	for _, f := range flows {
+		if !FlowHasOutGroup(f, groupId) {
+			toKeep = append(toKeep, f)
+		}
+	}
+	return len(toKeep) < len(flows), toKeep
+}
+
+func ToOfpOxmField(from []*ofp.OfpOxmOfbField) []*ofp.OfpOxmField {
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range from {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return matchFields
+}
+
+//IsMulticastIp returns true if the ip starts with the byte sequence of 1110;
+//false otherwise.
+func IsMulticastIp(ip uint32) bool {
+	return ip>>28 == 14
+}
+
+//ConvertToMulticastMacInt returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacInt(ip uint32) uint64 {
+	//get last 23 bits of ip address by ip & 00000000011111111111111111111111
+	theLast23BitsOfIp := ip & 8388607
+	// perform OR with 0x1005E000000 to build mcast mac address
+	return 1101088686080 | uint64(theLast23BitsOfIp)
+}
+
+//ConvertToMulticastMacBytes returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacBytes(ip uint32) []byte {
+	mac := ConvertToMulticastMacInt(ip)
+	var b bytes.Buffer
+	// catalyze (48 bits) in binary:111111110000000000000000000000000000000000000000
+	catalyze := uint64(280375465082880)
+	//convert each octet to decimal
+	for i := 0; i < 6; i++ {
+		if i != 0 {
+			catalyze = catalyze >> 8
+		}
+		octet := mac & catalyze
+		octetDecimal := octet >> uint8(40-i*8)
+		b.WriteByte(byte(octetDecimal))
+	}
+	return b.Bytes()
+}
+
+func GetMeterIdFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni) or MeterId
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var meterID uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "found-metadata-for-egress/uni-port", log.Fields{"metadata": md})
+	if md != 0 {
+		meterID = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "found-meterID-in-write-metadata-action", log.Fields{"meterID": meterID})
+	}
+	return meterID
+}
+
+func SetMeterIdToFlow(flow *ofp.OfpFlowStats, meterId uint32) {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					meterInst.MeterId = meterId
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/common.go
new file mode 100644
index 0000000..cb5480b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/security.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/security.go
new file mode 100644
index 0000000..930d2c8
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/security.go
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+type GrpcSecurity struct {
+	KeyFile  string
+	CertFile string
+	CaFile   string
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/server.go
new file mode 100644
index 0000000..38dc308
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/grpc/server.go
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"context"
+	"net"
+
+	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+	grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/reflection"
+	"google.golang.org/grpc/status"
+)
+
+/*
+To add a GRPC server to your existing component simply follow these steps:
+
+1. Create a server instance by passing the host and port where it should run and optionally add certificate information
+
+	e.g.
+	s.server = server.NewGrpcServer(s.config.GrpcHost, s.config.GrpcPort, nil, false)
+
+2. Create a function that will register your service with the GRPC server
+
+    e.g.
+	f := func(gs *grpc.Server) {
+		voltha.RegisterVolthaReadOnlyServiceServer(
+			gs,
+			core.NewReadOnlyServiceHandler(s.root),
+		)
+	}
+
+3. Add the service to the server
+
+    e.g.
+	s.server.AddService(f)
+
+4. Start the server
+
+	s.server.Start(ctx)
+*/
+
+// Interface allows probes to be attached to server
+// A probe must support the IsReady() method
+type ReadyProbe interface {
+	IsReady() bool
+}
+
+type GrpcServer struct {
+	gs       *grpc.Server
+	address  string
+	secure   bool
+	services []func(*grpc.Server)
+	probe    ReadyProbe // optional
+
+	*GrpcSecurity
+}
+
+/*
+Instantiate a GRPC server data structure
+*/
+func NewGrpcServer(
+	address string,
+	certs *GrpcSecurity,
+	secure bool,
+	probe ReadyProbe,
+) *GrpcServer {
+	server := &GrpcServer{
+		address:      address,
+		secure:       secure,
+		GrpcSecurity: certs,
+		probe:        probe,
+	}
+	return server
+}
+
+/*
+Start prepares the GRPC server and starts servicing requests
+*/
+func (s *GrpcServer) Start(ctx context.Context) {
+
+	lis, err := net.Listen("tcp", s.address)
+	if err != nil {
+		logger.Fatalf(ctx, "failed to listen: %v", err)
+	}
+
+	// Use Intercepters to automatically inject and publish Open Tracing Spans by this GRPC server
+	serverOptions := []grpc.ServerOption{
+		grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+			grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+		)),
+		grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+			grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+			mkServerInterceptor(s),
+		))}
+
+	if s.secure && s.GrpcSecurity != nil {
+		creds, err := credentials.NewServerTLSFromFile(s.CertFile, s.KeyFile)
+		if err != nil {
+			logger.Fatalf(ctx, "could not load TLS keys: %s", err)
+		}
+
+		serverOptions = append(serverOptions, grpc.Creds(creds))
+		s.gs = grpc.NewServer(serverOptions...)
+	} else {
+		logger.Info(ctx, "starting-insecure-grpc-server")
+		s.gs = grpc.NewServer(serverOptions...)
+	}
+
+	// Register all required services
+	for _, service := range s.services {
+		service(s.gs)
+	}
+	reflection.Register(s.gs)
+
+	if err := s.gs.Serve(lis); err != nil {
+		logger.Fatalf(ctx, "failed to serve: %v\n", err)
+	}
+}
+
+// Make a serverInterceptor for the given GrpcServer
+// This interceptor will check whether there is an attached probe,
+// and if that probe indicates NotReady, then an UNAVAILABLE
+// response will be returned.
+func mkServerInterceptor(s *GrpcServer) func(ctx context.Context,
+	req interface{},
+	info *grpc.UnaryServerInfo,
+	handler grpc.UnaryHandler) (interface{}, error) {
+
+	return func(ctx context.Context,
+		req interface{},
+		info *grpc.UnaryServerInfo,
+		handler grpc.UnaryHandler) (interface{}, error) {
+
+		if (s.probe != nil) && (!s.probe.IsReady()) {
+			logger.Warnf(ctx, "Grpc request received while not ready %v", req)
+			return nil, status.Error(codes.Unavailable, "system is not ready")
+		}
+
+		// Calls the handler
+		h, err := handler(ctx, req)
+
+		return h, err
+	}
+}
+
+/*
+Stop servicing GRPC requests
+*/
+func (s *GrpcServer) Stop() {
+	if s.gs != nil {
+		s.gs.Stop()
+	}
+}
+
+/*
+AddService appends a generic service request function
+*/
+func (s *GrpcServer) AddService(
+	registerFunction func(*grpc.Server),
+) {
+	s.services = append(s.services, registerFunction)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
new file mode 100644
index 0000000..eee9631
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"time"
+
+	ca "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+const (
+	PartitionConsumer = iota
+	GroupCustomer     = iota
+)
+
+const (
+	OffsetNewest = -1
+	OffsetOldest = -2
+)
+
+const (
+	GroupIdKey = "groupId"
+	Offset     = "offset"
+)
+
+const (
+	DefaultKafkaAddress             = "127.0.0.1:9092"
+	DefaultGroupName                = "voltha"
+	DefaultSleepOnError             = 1
+	DefaultProducerFlushFrequency   = 10
+	DefaultProducerFlushMessages    = 10
+	DefaultProducerFlushMaxmessages = 100
+	DefaultProducerReturnSuccess    = true
+	DefaultProducerReturnErrors     = true
+	DefaultProducerRetryMax         = 3
+	DefaultProducerRetryBackoff     = time.Millisecond * 100
+	DefaultConsumerMaxwait          = 100
+	DefaultMaxProcessingTime        = 100
+	DefaultConsumerType             = PartitionConsumer
+	DefaultNumberPartitions         = 3
+	DefaultNumberReplicas           = 1
+	DefaultAutoCreateTopic          = false
+	DefaultMetadataMaxRetry         = 3
+	DefaultLivenessChannelInterval  = time.Second * 30
+)
+
+// MsgClient represents the set of APIs  a Kafka MsgClient must implement
+type Client interface {
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error
+	DeleteTopic(ctx context.Context, topic *Topic) error
+	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
+	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time))
+	Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error
+	SendLiveness(ctx context.Context) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	EnableHealthinessChannel(ctx context.Context, enable bool) chan bool
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
new file mode 100644
index 0000000..f4d7661
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
new file mode 100644
index 0000000..962b932
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"github.com/buraksezer/consistent"
+	"github.com/cespare/xxhash"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"sync"
+)
+
+const (
+	// All the values below can be tuned to get optimal data distribution.  The numbers below seems to work well when
+	// supporting 1000-10000 devices and 1 - 20 replicas of a service
+
+	// Keys are distributed among partitions. Prime numbers are good to distribute keys uniformly.
+	DefaultPartitionCount = 1117
+
+	// Represents how many times a node is replicated on the consistent ring.
+	DefaultReplicationFactor = 117
+
+	// Load is used to calculate average load.
+	DefaultLoad = 1.1
+)
+
+type Endpoint string // Endpoint of a service instance.  When using kafka, this is the topic name of a service
+type ReplicaID int32 // The replication ID of a service instance
+
+type EndpointManager interface {
+
+	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
+	// now this will return the topic name
+	GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error)
+
+	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
+	// devices owned by that service need to be reconciled
+	IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error)
+
+	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
+	// test only
+	GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error)
+}
+
+type service struct {
+	id             string // Id of the service.  The same id is used for all replicas
+	totalReplicas  int32
+	replicas       map[ReplicaID]Endpoint
+	consistentRing *consistent.Consistent
+}
+
+type endpointManager struct {
+	partitionCount           int
+	replicationFactor        int
+	load                     float64
+	backend                  *db.Backend
+	services                 map[string]*service
+	servicesLock             sync.RWMutex
+	deviceTypeServiceMap     map[string]string
+	deviceTypeServiceMapLock sync.RWMutex
+}
+
+type EndpointManagerOption func(*endpointManager)
+
+func PartitionCount(count int) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.partitionCount = count
+	}
+}
+
+func ReplicationFactor(replicas int) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.replicationFactor = replicas
+	}
+}
+
+func Load(load float64) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.load = load
+	}
+}
+
+func newEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
+	tm := &endpointManager{
+		partitionCount:       DefaultPartitionCount,
+		replicationFactor:    DefaultReplicationFactor,
+		load:                 DefaultLoad,
+		backend:              backend,
+		services:             make(map[string]*service),
+		deviceTypeServiceMap: make(map[string]string),
+	}
+
+	for _, option := range opts {
+		option(tm)
+	}
+	return tm
+}
+
+func NewEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
+	return newEndpointManager(backend, opts...)
+}
+
+func (ep *endpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error) {
+	logger.Debugw(ctx, "getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+	if err != nil {
+		return "", err
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return "", status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	endpoint := m.getEndPoint()
+	if endpoint == "" {
+		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
+	}
+	logger.Debugw(ctx, "returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
+	return endpoint, nil
+}
+
+func (ep *endpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	logger.Debugw(ctx, "device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+	if err != nil {
+		return false, nil
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return false, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	return m.getReplica() == ReplicaID(replicaNumber), nil
+}
+
+func (ep *endpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error) {
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+	if err != nil {
+		return 0, nil
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return 0, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	return m.getReplica(), nil
+}
+
+func (ep *endpointManager) getOwner(ctx context.Context, deviceID string, serviceType string) (consistent.Member, error) {
+	serv, dType, err := ep.getServiceAndDeviceType(ctx, serviceType)
+	if err != nil {
+		return nil, err
+	}
+	key := ep.makeKey(deviceID, dType, serviceType)
+	return serv.consistentRing.LocateKey(key), nil
+}
+
+func (ep *endpointManager) getServiceAndDeviceType(ctx context.Context, serviceType string) (*service, string, error) {
+	// Check whether service exist
+	ep.servicesLock.RLock()
+	serv, serviceExist := ep.services[serviceType]
+	ep.servicesLock.RUnlock()
+
+	// Load the service and device types if needed
+	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
+		if err := ep.loadServices(ctx); err != nil {
+			return nil, "", err
+		}
+
+		// Check whether the service exists now
+		ep.servicesLock.RLock()
+		serv, serviceExist = ep.services[serviceType]
+		ep.servicesLock.RUnlock()
+		if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
+			return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
+		}
+	}
+
+	ep.deviceTypeServiceMapLock.RLock()
+	defer ep.deviceTypeServiceMapLock.RUnlock()
+	for dType, sType := range ep.deviceTypeServiceMap {
+		if sType == serviceType {
+			return serv, dType, nil
+		}
+	}
+	return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
+}
+
+func (ep *endpointManager) getConsistentConfig() consistent.Config {
+	return consistent.Config{
+		PartitionCount:    ep.partitionCount,
+		ReplicationFactor: ep.replicationFactor,
+		Load:              ep.load,
+		Hasher:            hasher{},
+	}
+}
+
+// loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
+// the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
+// instead of watching for updates in the dB and acting on it.
+func (ep *endpointManager) loadServices(ctx context.Context) error {
+	ep.servicesLock.Lock()
+	defer ep.servicesLock.Unlock()
+	ep.deviceTypeServiceMapLock.Lock()
+	defer ep.deviceTypeServiceMapLock.Unlock()
+
+	if ep.backend == nil {
+		return status.Error(codes.Aborted, "backend-not-set")
+	}
+	ep.services = make(map[string]*service)
+	ep.deviceTypeServiceMap = make(map[string]string)
+
+	// Load the adapters
+	blobs, err := ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "adapters")
+	if err != nil {
+		return err
+	}
+
+	// Data is marshalled as proto bytes in the data store
+	for _, blob := range blobs {
+		data := blob.Value.([]byte)
+		adapter := &voltha.Adapter{}
+		if err := proto.Unmarshal(data, adapter); err != nil {
+			return err
+		}
+		// A valid adapter should have the vendorID set
+		if adapter.Vendor != "" {
+			if _, ok := ep.services[adapter.Type]; !ok {
+				ep.services[adapter.Type] = &service{
+					id:             adapter.Type,
+					totalReplicas:  adapter.TotalReplicas,
+					replicas:       make(map[ReplicaID]Endpoint),
+					consistentRing: consistent.New(nil, ep.getConsistentConfig()),
+				}
+
+			}
+			currentReplica := ReplicaID(adapter.CurrentReplica)
+			endpoint := Endpoint(adapter.Endpoint)
+			ep.services[adapter.Type].replicas[currentReplica] = endpoint
+			ep.services[adapter.Type].consistentRing.Add(newMember(adapter.Id, adapter.Type, adapter.Vendor, endpoint, adapter.Version, currentReplica))
+		}
+	}
+	// Load the device types
+	blobs, err = ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "device_types")
+	if err != nil {
+		return err
+	}
+	for _, blob := range blobs {
+		data := blob.Value.([]byte)
+		deviceType := &voltha.DeviceType{}
+		if err := proto.Unmarshal(data, deviceType); err != nil {
+			return err
+		}
+		if _, ok := ep.deviceTypeServiceMap[deviceType.Id]; !ok {
+			ep.deviceTypeServiceMap[deviceType.Id] = deviceType.Adapter
+		}
+	}
+
+	// Log the loaded data in debug mode to facilitate trouble shooting
+	if logger.V(log.DebugLevel) {
+		for key, val := range ep.services {
+			members := val.consistentRing.GetMembers()
+			logger.Debugw(ctx, "service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
+			for _, m := range members {
+				n := m.(Member)
+				logger.Debugw(ctx, "service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
+			}
+		}
+		logger.Debugw(ctx, "device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
+	}
+	return nil
+}
+
+// makeKey creates the string that the hash function uses to create the hash
+func (ep *endpointManager) makeKey(deviceID string, deviceType string, serviceType string) []byte {
+	return []byte(fmt.Sprintf("%s_%s_%s", serviceType, deviceType, deviceID))
+}
+
+// The consistent package requires a hasher function
+type hasher struct{}
+
+// Sum64 provides the hasher function.  Based upon numerous testing scenarios, the xxhash package seems to provide the
+// best distribution compare to other hash packages
+func (h hasher) Sum64(data []byte) uint64 {
+	return xxhash.Sum64(data)
+}
+
+// Member represents a member on the consistent ring
+type Member interface {
+	String() string
+	getReplica() ReplicaID
+	getEndPoint() Endpoint
+	getID() string
+	getServiceType() string
+}
+
+// member implements the Member interface
+type member struct {
+	id          string
+	serviceType string
+	vendor      string
+	version     string
+	replica     ReplicaID
+	endpoint    Endpoint
+}
+
+func newMember(ID string, serviceType string, vendor string, endPoint Endpoint, version string, replica ReplicaID) Member {
+	return &member{
+		id:          ID,
+		serviceType: serviceType,
+		vendor:      vendor,
+		version:     version,
+		replica:     replica,
+		endpoint:    endPoint,
+	}
+}
+
+func (m *member) String() string {
+	return string(m.endpoint)
+}
+
+func (m *member) getReplica() ReplicaID {
+	return m.replica
+}
+
+func (m *member) getEndPoint() Endpoint {
+	return m.endpoint
+}
+
+func (m *member) getID() string {
+	return m.id
+}
+
+func (m *member) getServiceType() string {
+	return m.serviceType
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
new file mode 100644
index 0000000..b149e7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
@@ -0,0 +1,1085 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opentracing/opentracing-go"
+)
+
+const (
+	DefaultMaxRetries     = 3
+	DefaultRequestTimeout = 60000 // 60000 milliseconds - to handle a wider latency range
+)
+
+const (
+	TransactionKey = "transactionID"
+	FromTopic      = "fromTopic"
+)
+
+var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
+var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
+
+// requestHandlerChannel represents an interface associated with a channel.  Whenever, an event is
+// obtained from that channel, this interface is invoked.   This is used to handle
+// async requests into the Core via the kafka messaging bus
+type requestHandlerChannel struct {
+	requesthandlerInterface interface{}
+	ch                      <-chan *ic.InterContainerMessage
+}
+
+// transactionChannel represents a combination of a topic and a channel onto which a response received
+// on the kafka bus will be sent to
+type transactionChannel struct {
+	topic *Topic
+	ch    chan *ic.InterContainerMessage
+}
+
+type InterContainerProxy interface {
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	GetDefaultTopic() *Topic
+	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
+	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
+	SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error
+	SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error
+	UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error
+	DeleteTopic(ctx context.Context, topic Topic) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	SendLiveness(ctx context.Context) error
+}
+
+// interContainerProxy represents the messaging proxy
+type interContainerProxy struct {
+	kafkaAddress                   string
+	defaultTopic                   *Topic
+	defaultRequestHandlerInterface interface{}
+	kafkaClient                    Client
+	doneCh                         chan struct{}
+	doneOnce                       sync.Once
+
+	// This map is used to map a topic to an interface and channel.   When a request is received
+	// on that channel (registered to the topic) then that interface is invoked.
+	topicToRequestHandlerChannelMap   map[string]*requestHandlerChannel
+	lockTopicRequestHandlerChannelMap sync.RWMutex
+
+	// This map is used to map a channel to a response topic.   This channel handles all responses on that
+	// channel for that topic and forward them to the appropriate consumers channel, using the
+	// transactionIdToChannelMap.
+	topicToResponseChannelMap   map[string]<-chan *ic.InterContainerMessage
+	lockTopicResponseChannelMap sync.RWMutex
+
+	// This map is used to map a transaction to a consumers channel.  This is used whenever a request has been
+	// sent out and we are waiting for a response.
+	transactionIdToChannelMap     map[string]*transactionChannel
+	lockTransactionIdToChannelMap sync.RWMutex
+}
+
+type InterContainerProxyOption func(*interContainerProxy)
+
+func InterContainerAddress(address string) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.kafkaAddress = address
+	}
+}
+
+func DefaultTopic(topic *Topic) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.defaultTopic = topic
+	}
+}
+
+func RequestHandlerInterface(handler interface{}) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.defaultRequestHandlerInterface = handler
+	}
+}
+
+func MsgClient(client Client) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func newInterContainerProxy(opts ...InterContainerProxyOption) *interContainerProxy {
+	proxy := &interContainerProxy{
+		kafkaAddress: DefaultKafkaAddress,
+		doneCh:       make(chan struct{}),
+	}
+
+	for _, option := range opts {
+		option(proxy)
+	}
+
+	return proxy
+}
+
+func NewInterContainerProxy(opts ...InterContainerProxyOption) InterContainerProxy {
+	return newInterContainerProxy(opts...)
+}
+
+func (kp *interContainerProxy) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-Proxy")
+
+	// Kafka MsgClient should already have been created.  If not, output fatal error
+	if kp.kafkaClient == nil {
+		logger.Fatal(ctx, "kafka-client-not-set")
+	}
+
+	// Start the kafka client
+	if err := kp.kafkaClient.Start(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-proxy", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the topic to response channel map
+	kp.topicToResponseChannelMap = make(map[string]<-chan *ic.InterContainerMessage)
+	//
+	// Create the transactionId to Channel Map
+	kp.transactionIdToChannelMap = make(map[string]*transactionChannel)
+
+	// Create the topic to request channel map
+	kp.topicToRequestHandlerChannelMap = make(map[string]*requestHandlerChannel)
+
+	return nil
+}
+
+func (kp *interContainerProxy) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-intercontainer-proxy")
+	kp.doneOnce.Do(func() { close(kp.doneCh) })
+	// TODO : Perform cleanup
+	kp.kafkaClient.Stop(ctx)
+	err := kp.deleteAllTopicRequestHandlerChannelMap(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+	}
+	err = kp.deleteAllTopicResponseChannelMap(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+	}
+	kp.deleteAllTransactionIdToChannelMap(ctx)
+}
+
+func (kp *interContainerProxy) GetDefaultTopic() *Topic {
+	return kp.defaultTopic
+}
+
+// InvokeAsyncRPC is used to make an RPC request asynchronously
+func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
+
+	logger.Debugw(ctx, "InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
+
+	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, !waitForResponse)
+	if spanArg != nil {
+		kvArgs = append(kvArgs, &spanArg[0])
+	}
+	defer span.Finish()
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.GetDefaultTopic()
+	}
+
+	chnl := make(chan *RpcResponse)
+
+	go func() {
+
+		// once we're done,
+		// close the response channel
+		defer close(chnl)
+
+		var err error
+		var protoRequest *ic.InterContainerMessage
+
+		// Encode the request
+		protoRequest, err = encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+		if err != nil {
+			logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+			log.MarkSpanError(ctx, errors.New("cannot-format-request"))
+			chnl <- NewResponse(RpcFormattingError, err, nil)
+			return
+		}
+
+		// Subscribe for response, if needed, before sending request
+		var ch <-chan *ic.InterContainerMessage
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+			log.MarkSpanError(ctx, errors.New("failed-to-subscribe-for-response"))
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// Send request - if the topic is formatted with a device Id then we will send the request using a
+		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+		logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+
+		// if the message is not sent on kafka publish an event an close the channel
+		if err = kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// if the client is not waiting for a response send the ack and close the channel
+		chnl <- NewResponse(RpcSent, nil, nil)
+		if !waitForResponse {
+			return
+		}
+
+		defer func() {
+			// Remove the subscription for a response on return
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Warnw(ctx, "invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
+			}
+		}()
+
+		// Wait for response as well as timeout or cancellation
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				log.MarkSpanError(ctx, errors.New("channel-closed"))
+				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
+			}
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			if responseBody, err := decodeResponse(ctx, msg); err != nil {
+				chnl <- NewResponse(RpcReply, err, nil)
+			} else {
+				if responseBody.Success {
+					chnl <- NewResponse(RpcReply, nil, responseBody.Result)
+				} else {
+					// response body contains an error
+					unpackErr := &ic.Error{}
+					if err := ptypes.UnmarshalAny(responseBody.Result, unpackErr); err != nil {
+						chnl <- NewResponse(RpcReply, err, nil)
+					} else {
+						chnl <- NewResponse(RpcReply, status.Error(codes.Internal, unpackErr.Reason), nil)
+					}
+				}
+			}
+		case <-ctx.Done():
+			logger.Errorw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
+			chnl <- NewResponse(RpcTimeout, err, nil)
+		case <-kp.doneCh:
+			chnl <- NewResponse(RpcSystemClosing, nil, nil)
+			logger.Warnw(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+		}
+	}()
+	return chnl
+}
+
+// Method to extract Open-tracing Span from Context and serialize it for transport over Kafka embedded as a additional argument.
+// Additional argument is injected using key as "span" and value as Span marshalled into a byte slice
+//
+// The span name is automatically constructed using the RPC name with following convention (<rpc-name> represents name of invoked method):
+// - RPC invoked in Sync manner (WaitForResponse=true) : kafka-rpc-<rpc-name>
+// - RPC invoked in Async manner (WaitForResponse=false) : kafka-async-rpc-<rpc-name>
+// - Inter Adapter RPC invoked in Sync manner (WaitForResponse=true) : kafka-inter-adapter-rpc-<rpc-name>
+// - Inter Adapter RPC invoked in Async manner (WaitForResponse=false) : kafka-inter-adapter-async-rpc-<rpc-name>
+func (kp *interContainerProxy) embedSpanAsArg(ctx context.Context, rpc string, isAsync bool) ([]KVArg, opentracing.Span, context.Context) {
+	var err error
+	var newCtx context.Context
+	var spanToInject opentracing.Span
+
+	var spanName strings.Builder
+	spanName.WriteString("kafka-")
+
+	// In case of inter adapter message, use Msg Type for constructing RPC name
+	if rpc == "process_inter_adapter_message" {
+		if msgType, ok := ctx.Value("inter-adapter-msg-type").(ic.InterAdapterMessageType_Types); ok {
+			spanName.WriteString("inter-adapter-")
+			rpc = msgType.String()
+		}
+	}
+
+	if isAsync {
+		spanName.WriteString("async-rpc-")
+	} else {
+		spanName.WriteString("rpc-")
+	}
+	spanName.WriteString(rpc)
+
+	if isAsync {
+		spanToInject, newCtx = log.CreateAsyncSpan(ctx, spanName.String())
+	} else {
+		spanToInject, newCtx = log.CreateChildSpan(ctx, spanName.String())
+	}
+
+	spanToInject.SetBaggageItem("rpc-span-name", spanName.String())
+
+	textMapCarrier := opentracing.TextMapCarrier(make(map[string]string))
+	if err = opentracing.GlobalTracer().Inject(spanToInject.Context(), opentracing.TextMap, textMapCarrier); err != nil {
+		logger.Warnw(ctx, "unable-to-serialize-span-to-textmap", log.Fields{"span": spanToInject, "error": err})
+		return nil, spanToInject, newCtx
+	}
+
+	var textMapJson []byte
+	if textMapJson, err = json.Marshal(textMapCarrier); err != nil {
+		logger.Warnw(ctx, "unable-to-marshal-textmap-to-json-string", log.Fields{"textMap": textMapCarrier, "error": err})
+		return nil, spanToInject, newCtx
+	}
+
+	spanArg := make([]KVArg, 1)
+	spanArg[0] = KVArg{Key: "span", Value: &ic.StrType{Val: string(textMapJson)}}
+	return spanArg, spanToInject, newCtx
+}
+
+// InvokeRPC is used to send a request to a given topic
+func (kp *interContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
+
+	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, false)
+	if spanArg != nil {
+		kvArgs = append(kvArgs, &spanArg[0])
+	}
+	defer span.Finish()
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.defaultTopic
+	}
+
+	// Encode the request
+	protoRequest, err := encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+	if err != nil {
+		logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		log.MarkSpanError(ctx, errors.New("cannot-format-request"))
+		return false, nil
+	}
+
+	// Subscribe for response, if needed, before sending request
+	var ch <-chan *ic.InterContainerMessage
+	if waitForResponse {
+		var err error
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		}
+	}
+
+	// Send request - if the topic is formatted with a device Id then we will send the request using a
+	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+	//key := GetDeviceIdFromTopic(*toTopic)
+	logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	go func() {
+		if err := kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			log.MarkSpanError(ctx, errors.New("send-failed"))
+			logger.Errorw(ctx, "send-failed", log.Fields{
+				"topic": toTopic,
+				"key":   key,
+				"error": err})
+		}
+	}()
+
+	if waitForResponse {
+		// Create a child context based on the parent context, if any
+		var cancel context.CancelFunc
+		childCtx := context.Background()
+		if ctx == nil {
+			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
+		} else {
+			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
+		}
+		defer cancel()
+
+		// Wait for response as well as timeout or cancellation
+		// Remove the subscription for a response on return
+		defer func() {
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Errorw(ctx, "response-unsubscribe-failed", log.Fields{
+					"id":    protoRequest.Header.Id,
+					"error": err})
+			}
+		}()
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				log.MarkSpanError(ctx, errors.New("channel-closed"))
+				protoError := &ic.Error{Reason: "channel-closed"}
+				var marshalledArg *any.Any
+				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+					return false, nil // Should never happen
+				}
+				return false, marshalledArg
+			}
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			var responseBody *ic.InterContainerResponseBody
+			var err error
+			if responseBody, err = decodeResponse(ctx, msg); err != nil {
+				logger.Errorw(ctx, "decode-response-error", log.Fields{"error": err})
+				// FIXME we should return something
+			}
+			return responseBody.Success, responseBody.Result
+		case <-ctx.Done():
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-childCtx.Done():
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-kp.doneCh:
+			logger.Infow(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			return true, nil
+		}
+	}
+	return true, nil
+}
+
+// SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
+// when a message is received on a given topic
+func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error {
+
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic); err != nil {
+		//if ch, err = kp.Subscribe(topic); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+
+	kp.defaultRequestHandlerInterface = handler
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ctx, ch, topic, handler)
+
+	return nil
+}
+
+// SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
+// when a message is received on a given topic.  So far there is only 1 target registered per microservice
+func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error {
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
+
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ctx, ch, topic, kp.defaultRequestHandlerInterface)
+
+	return nil
+}
+
+func (kp *interContainerProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error {
+	return kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name)
+}
+
+func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(ctx context.Context, topic string) error {
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		var err error
+		if err = kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic})
+		}
+		delete(kp.topicToResponseChannelMap, topic)
+		return err
+	} else {
+		return fmt.Errorf("%s-Topic-not-found", topic)
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTopicResponseChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-response-channel")
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToResponseChannelMap {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToResponseChannelMap, topic)
+		}
+	}
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
+}
+
+func (kp *interContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; !exist {
+		kp.topicToRequestHandlerChannelMap[topic] = arg
+	}
+}
+
+func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(ctx context.Context, topic string) error {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			return err
+		}
+		delete(kp.topicToRequestHandlerChannelMap, topic)
+		return nil
+	} else {
+		return fmt.Errorf("%s-Topic-not-found", topic)
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-request-channel")
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToRequestHandlerChannelMap {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToRequestHandlerChannelMap, topic)
+		}
+	}
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
+}
+
+func (kp *interContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if _, exist := kp.transactionIdToChannelMap[id]; !exist {
+		kp.transactionIdToChannelMap[id] = &transactionChannel{topic: topic, ch: arg}
+	}
+}
+
+func (kp *interContainerProxy) deleteFromTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if transChannel, exist := kp.transactionIdToChannelMap[id]; exist {
+		// Close the channel first
+		close(transChannel.ch)
+		delete(kp.transactionIdToChannelMap, id)
+	}
+}
+
+func (kp *interContainerProxy) deleteTopicTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		if value.topic.Name == id {
+			close(value.ch)
+			delete(kp.transactionIdToChannelMap, key)
+		}
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap(ctx context.Context) {
+	logger.Debug(ctx, "delete-all-transaction-id-channel-map")
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		close(value.ch)
+		delete(kp.transactionIdToChannelMap, key)
+	}
+}
+
+func (kp *interContainerProxy) DeleteTopic(ctx context.Context, topic Topic) error {
+	// If we have any consumers on that topic we need to close them
+	if err := kp.deleteFromTopicResponseChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+	}
+	if err := kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+	}
+	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
+
+	return kp.kafkaClient.DeleteTopic(ctx, &topic)
+}
+
+func encodeReturnedValue(ctx context.Context, returnedVal interface{}) (*any.Any, error) {
+	// Encode the response argument - needs to be a proto message
+	if returnedVal == nil {
+		return nil, nil
+	}
+	protoValue, ok := returnedVal.(proto.Message)
+	if !ok {
+		logger.Warnw(ctx, "response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		err := errors.New("response-value-not-proto-message")
+		return nil, err
+	}
+
+	// Marshal the returned value, if any
+	var marshalledReturnedVal *any.Any
+	var err error
+	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-returned-val", log.Fields{"error": err})
+		return nil, err
+	}
+	return marshalledReturnedVal, nil
+}
+
+func encodeDefaultFailedResponse(ctx context.Context, request *ic.InterContainerMessage) *ic.InterContainerMessage {
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		Timestamp: ptypes.TimestampNow(),
+	}
+	responseBody := &ic.InterContainerResponseBody{
+		Success: false,
+		Result:  nil,
+	}
+	var marshalledResponseBody *any.Any
+	var err error
+	// Error should never happen here
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-failed-response-body", log.Fields{"error": err})
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}
+
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeResponse(ctx context.Context, request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
+	//logger.Debugw(ctx, "encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		KeyTopic:  request.Header.KeyTopic,
+		Timestamp: ptypes.TimestampNow(),
+	}
+
+	// Go over all returned values
+	var marshalledReturnedVal *any.Any
+	var err error
+
+	// for now we support only 1 returned value - (excluding the error)
+	if len(returnedValues) > 0 {
+		if marshalledReturnedVal, err = encodeReturnedValue(ctx, returnedValues[0]); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		}
+	}
+
+	responseBody := &ic.InterContainerResponseBody{
+		Success: success,
+		Result:  marshalledReturnedVal,
+	}
+
+	// Marshal the response body
+	var marshalledResponseBody *any.Any
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		return nil, err
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}, nil
+}
+
+func CallFuncByName(ctx context.Context, myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
+	myClassValue := reflect.ValueOf(myClass)
+	// Capitalize the first letter in the funcName to workaround the first capital letters required to
+	// invoke a function from a different package
+	funcName = strings.Title(funcName)
+	m := myClassValue.MethodByName(funcName)
+	if !m.IsValid() {
+		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
+	}
+	in := make([]reflect.Value, len(params)+1)
+	in[0] = reflect.ValueOf(ctx)
+	for i, param := range params {
+		in[i+1] = reflect.ValueOf(param)
+	}
+	out = m.Call(in)
+	return
+}
+
+func (kp *interContainerProxy) addTransactionId(ctx context.Context, transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
+	arg := &KVArg{
+		Key:   TransactionKey,
+		Value: &ic.StrType{Val: transactionId},
+	}
+
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   arg.Key,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+func (kp *interContainerProxy) addFromTopic(ctx context.Context, fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   FromTopic,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+// Method to extract the Span embedded in Kafka RPC request on the receiver side. If span is found embedded in the KV args (with key as "span"),
+// it is de-serialized and injected into the Context to be carried forward by the RPC request processor thread.
+// If no span is found embedded, even then a span is created with name as "kafka-rpc-<rpc-name>" to enrich the Context for RPC calls coming
+// from components currently not sending the span (e.g. openonu adapter)
+func (kp *interContainerProxy) enrichContextWithSpan(ctx context.Context, rpcName string, args []*ic.Argument) (opentracing.Span, context.Context) {
+
+	for _, arg := range args {
+		if arg.Key == "span" {
+			var err error
+			var textMapString ic.StrType
+			if err = ptypes.UnmarshalAny(arg.Value, &textMapString); err != nil {
+				logger.Warnw(ctx, "unable-to-unmarshal-kvarg-to-textmap-string", log.Fields{"value": arg.Value})
+				break
+			}
+
+			spanTextMap := make(map[string]string)
+			if err = json.Unmarshal([]byte(textMapString.Val), &spanTextMap); err != nil {
+				logger.Warnw(ctx, "unable-to-unmarshal-textmap-from-json-string", log.Fields{"textMapString": textMapString, "error": err})
+				break
+			}
+
+			var spanContext opentracing.SpanContext
+			if spanContext, err = opentracing.GlobalTracer().Extract(opentracing.TextMap, opentracing.TextMapCarrier(spanTextMap)); err != nil {
+				logger.Warnw(ctx, "unable-to-deserialize-textmap-to-span", log.Fields{"textMap": spanTextMap, "error": err})
+				break
+			}
+
+			var receivedRpcName string
+			extractBaggage := func(k, v string) bool {
+				if k == "rpc-span-name" {
+					receivedRpcName = v
+					return false
+				}
+				return true
+			}
+
+			spanContext.ForeachBaggageItem(extractBaggage)
+
+			return opentracing.StartSpanFromContext(ctx, receivedRpcName, opentracing.FollowsFrom(spanContext))
+		}
+	}
+
+	// Create new Child Span with rpc as name if no span details were received in kafka arguments
+	var spanName strings.Builder
+	spanName.WriteString("kafka-")
+
+	// In case of inter adapter message, use Msg Type for constructing RPC name
+	if rpcName == "process_inter_adapter_message" {
+		for _, arg := range args {
+			if arg.Key == "msg" {
+				iamsg := ic.InterAdapterMessage{}
+				if err := ptypes.UnmarshalAny(arg.Value, &iamsg); err == nil {
+					spanName.WriteString("inter-adapter-")
+					rpcName = iamsg.Header.Type.String()
+				}
+			}
+		}
+	}
+
+	spanName.WriteString("rpc-")
+	spanName.WriteString(rpcName)
+
+	return opentracing.StartSpanFromContext(ctx, spanName.String())
+}
+
+func (kp *interContainerProxy) handleMessage(ctx context.Context, msg *ic.InterContainerMessage, targetInterface interface{}) {
+
+	// First extract the header to know whether this is a request - responses are handled by a different handler
+	if msg.Header.Type == ic.MessageType_REQUEST {
+		var out []reflect.Value
+		var err error
+
+		// Get the request body
+		requestBody := &ic.InterContainerRequestBody{}
+		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-request", log.Fields{"error": err})
+		} else {
+			span, ctx := kp.enrichContextWithSpan(ctx, requestBody.Rpc, requestBody.Args)
+			defer span.Finish()
+
+			logger.Debugw(ctx, "received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			// let the callee unpack the arguments as its the only one that knows the real proto type
+			// Augment the requestBody with the message Id as it will be used in scenarios where cores
+			// are set in pairs and competing
+			requestBody.Args = kp.addTransactionId(ctx, msg.Header.Id, requestBody.Args)
+
+			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
+			// needs to send an unsollicited message to the currently requested container
+			requestBody.Args = kp.addFromTopic(ctx, msg.Header.FromTopic, requestBody.Args)
+
+			out, err = CallFuncByName(ctx, targetInterface, requestBody.Rpc, requestBody.Args)
+			if err != nil {
+				logger.Warn(ctx, err)
+			}
+		}
+		// Response required?
+		if requestBody.ResponseRequired {
+			// If we already have an error before then just return that
+			var returnError *ic.Error
+			var returnedValues []interface{}
+			var success bool
+			if err != nil {
+				returnError = &ic.Error{Reason: err.Error()}
+				returnedValues = make([]interface{}, 1)
+				returnedValues[0] = returnError
+			} else {
+				returnedValues = make([]interface{}, 0)
+				// Check for errors first
+				lastIndex := len(out) - 1
+				if out[lastIndex].Interface() != nil { // Error
+					if retError, ok := out[lastIndex].Interface().(error); ok {
+						if retError.Error() == ErrorTransactionNotAcquired.Error() {
+							logger.Debugw(ctx, "Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							return // Ignore - process is in competing mode and ignored transaction
+						}
+						returnError = &ic.Error{Reason: retError.Error()}
+						returnedValues = append(returnedValues, returnError)
+					} else { // Should never happen
+						returnError = &ic.Error{Reason: "incorrect-error-returns"}
+						returnedValues = append(returnedValues, returnError)
+					}
+				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
+					logger.Warnw(ctx, "Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					return // Ignore - should not happen
+				} else { // Non-error case
+					success = true
+					for idx, val := range out {
+						//logger.Debugw(ctx, "returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						if idx != lastIndex {
+							returnedValues = append(returnedValues, val.Interface())
+						}
+					}
+				}
+			}
+
+			var icm *ic.InterContainerMessage
+			if icm, err = encodeResponse(ctx, msg, success, returnedValues...); err != nil {
+				logger.Warnw(ctx, "error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				icm = encodeDefaultFailedResponse(ctx, msg)
+			}
+			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
+			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
+			// present then the key will be empty, hence all messages for a given topic will be sent to all
+			// partitions.
+			replyTopic := &Topic{Name: msg.Header.FromTopic}
+			key := msg.Header.KeyTopic
+			logger.Debugw(ctx, "sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			// TODO: handle error response.
+			go func() {
+				if err := kp.kafkaClient.Send(ctx, icm, replyTopic, key); err != nil {
+					logger.Errorw(ctx, "send-reply-failed", log.Fields{
+						"topic": replyTopic,
+						"key":   key,
+						"error": err})
+				}
+			}()
+		}
+	} else if msg.Header.Type == ic.MessageType_RESPONSE {
+		logger.Debugw(ctx, "response-received", log.Fields{"msg-header": msg.Header})
+		go kp.dispatchResponse(ctx, msg)
+	} else {
+		logger.Warnw(ctx, "unsupported-message-received", log.Fields{"msg-header": msg.Header})
+	}
+}
+
+func (kp *interContainerProxy) waitForMessages(ctx context.Context, ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
+	//	Wait for messages
+	for msg := range ch {
+		//logger.Debugw(ctx, "request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		go kp.handleMessage(context.Background(), msg, targetInterface)
+	}
+}
+
+func (kp *interContainerProxy) dispatchResponse(ctx context.Context, msg *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.RLock()
+	defer kp.lockTransactionIdToChannelMap.RUnlock()
+	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
+		logger.Debugw(ctx, "no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		return
+	}
+	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
+}
+
+// subscribeForResponse allows a caller to subscribe to a given topic when waiting for a response.
+// This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
+// API. There is one response channel waiting for kafka messages before dispatching the message to the
+// corresponding waiting channel
+func (kp *interContainerProxy) subscribeForResponse(ctx context.Context, topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
+	logger.Debugw(ctx, "subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+
+	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
+	// broadcast any message for this topic to all channels waiting on it.
+	// Set channel size to 1 to prevent deadlock, see VOL-2708
+	ch := make(chan *ic.InterContainerMessage, 1)
+	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
+
+	return ch, nil
+}
+
+func (kp *interContainerProxy) unSubscribeForResponse(ctx context.Context, trnsId string) error {
+	logger.Debugw(ctx, "unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+	kp.deleteFromTransactionIdToChannelMap(trnsId)
+	return nil
+}
+
+func (kp *interContainerProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableLivenessChannel(ctx, enable)
+}
+
+func (kp *interContainerProxy) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableHealthinessChannel(ctx, enable)
+}
+
+func (kp *interContainerProxy) SendLiveness(ctx context.Context) error {
+	return kp.kafkaClient.SendLiveness(ctx)
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeRequest(ctx context.Context, rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
+	requestHeader := &ic.Header{
+		Id:        uuid.New().String(),
+		Type:      ic.MessageType_REQUEST,
+		FromTopic: replyTopic.Name,
+		ToTopic:   toTopic.Name,
+		KeyTopic:  key,
+		Timestamp: ptypes.TimestampNow(),
+	}
+	requestBody := &ic.InterContainerRequestBody{
+		Rpc:              rpc,
+		ResponseRequired: true,
+		ReplyToTopic:     replyTopic.Name,
+	}
+
+	for _, arg := range kvArgs {
+		if arg == nil {
+			// In case the caller sends an array with empty args
+			continue
+		}
+		var marshalledArg *any.Any
+		var err error
+		// ascertain the value interface type is a proto.Message
+		protoValue, ok := arg.Value.(proto.Message)
+		if !ok {
+			logger.Warnw(ctx, "argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			err := errors.New("argument-value-not-proto-message")
+			return nil, err
+		}
+		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+			return nil, err
+		}
+		protoArg := &ic.Argument{
+			Key:   arg.Key,
+			Value: marshalledArg,
+		}
+		requestBody.Args = append(requestBody.Args, protoArg)
+	}
+
+	var marshalledData *any.Any
+	var err error
+	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+		return nil, err
+	}
+	request := &ic.InterContainerMessage{
+		Header: requestHeader,
+		Body:   marshalledData,
+	}
+	return request, nil
+}
+
+func decodeResponse(ctx context.Context, response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
+	//	Extract the message body
+	responseBody := ic.InterContainerResponseBody{}
+	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		return nil, err
+	}
+	//logger.Debugw(ctx, "response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+
+	return &responseBody, nil
+
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
new file mode 100644
index 0000000..3273470
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
@@ -0,0 +1,1157 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Shopify/sarama"
+	scc "github.com/bsm/sarama-cluster"
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+// consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
+// topic, the consumer(s) broadcasts the message to all the listening channels.   The consumer can be a partition
+//consumer or a group consumer
+type consumerChannels struct {
+	consumers []interface{}
+	channels  []chan *ic.InterContainerMessage
+}
+
+// static check to ensure SaramaClient implements Client
+var _ Client = &SaramaClient{}
+
+// SaramaClient represents the messaging proxy
+type SaramaClient struct {
+	cAdmin                        sarama.ClusterAdmin
+	KafkaAddress                  string
+	producer                      sarama.AsyncProducer
+	consumer                      sarama.Consumer
+	groupConsumers                map[string]*scc.Consumer
+	lockOfGroupConsumers          sync.RWMutex
+	consumerGroupPrefix           string
+	consumerType                  int
+	consumerGroupName             string
+	producerFlushFrequency        int
+	producerFlushMessages         int
+	producerFlushMaxmessages      int
+	producerRetryMax              int
+	producerRetryBackOff          time.Duration
+	producerReturnSuccess         bool
+	producerReturnErrors          bool
+	consumerMaxwait               int
+	maxProcessingTime             int
+	numPartitions                 int
+	numReplicas                   int
+	autoCreateTopic               bool
+	doneCh                        chan int
+	metadataCallback              func(fromTopic string, timestamp time.Time)
+	topicToConsumerChannelMap     map[string]*consumerChannels
+	lockTopicToConsumerChannelMap sync.RWMutex
+	topicLockMap                  map[string]*sync.RWMutex
+	lockOfTopicLockMap            sync.RWMutex
+	metadataMaxRetry              int
+	alive                         bool
+	livenessMutex                 sync.Mutex
+	liveness                      chan bool
+	livenessChannelInterval       time.Duration
+	lastLivenessTime              time.Time
+	started                       bool
+	healthinessMutex              sync.Mutex
+	healthy                       bool
+	healthiness                   chan bool
+}
+
+type SaramaClientOption func(*SaramaClient)
+
+func Address(address string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.KafkaAddress = address
+	}
+}
+
+func ConsumerGroupPrefix(prefix string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerGroupPrefix = prefix
+	}
+}
+
+func ConsumerGroupName(name string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerGroupName = name
+	}
+}
+
+func ConsumerType(consumer int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerType = consumer
+	}
+}
+
+func ProducerFlushFrequency(frequency int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushFrequency = frequency
+	}
+}
+
+func ProducerFlushMessages(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushMessages = num
+	}
+}
+
+func ProducerFlushMaxMessages(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushMaxmessages = num
+	}
+}
+
+func ProducerMaxRetries(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerRetryMax = num
+	}
+}
+
+func ProducerRetryBackoff(duration time.Duration) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerRetryBackOff = duration
+	}
+}
+
+func ProducerReturnOnErrors(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerReturnErrors = opt
+	}
+}
+
+func ProducerReturnOnSuccess(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerReturnSuccess = opt
+	}
+}
+
+func ConsumerMaxWait(wait int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerMaxwait = wait
+	}
+}
+
+func MaxProcessingTime(pTime int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.maxProcessingTime = pTime
+	}
+}
+
+func NumPartitions(number int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.numPartitions = number
+	}
+}
+
+func NumReplicas(number int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.numReplicas = number
+	}
+}
+
+func AutoCreateTopic(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.autoCreateTopic = opt
+	}
+}
+
+func MetadatMaxRetries(retry int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.metadataMaxRetry = retry
+	}
+}
+
+func LivenessChannelInterval(opt time.Duration) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.livenessChannelInterval = opt
+	}
+}
+
+func NewSaramaClient(opts ...SaramaClientOption) *SaramaClient {
+	client := &SaramaClient{
+		KafkaAddress: DefaultKafkaAddress,
+	}
+	client.consumerType = DefaultConsumerType
+	client.producerFlushFrequency = DefaultProducerFlushFrequency
+	client.producerFlushMessages = DefaultProducerFlushMessages
+	client.producerFlushMaxmessages = DefaultProducerFlushMaxmessages
+	client.producerReturnErrors = DefaultProducerReturnErrors
+	client.producerReturnSuccess = DefaultProducerReturnSuccess
+	client.producerRetryMax = DefaultProducerRetryMax
+	client.producerRetryBackOff = DefaultProducerRetryBackoff
+	client.consumerMaxwait = DefaultConsumerMaxwait
+	client.maxProcessingTime = DefaultMaxProcessingTime
+	client.numPartitions = DefaultNumberPartitions
+	client.numReplicas = DefaultNumberReplicas
+	client.autoCreateTopic = DefaultAutoCreateTopic
+	client.metadataMaxRetry = DefaultMetadataMaxRetry
+	client.livenessChannelInterval = DefaultLivenessChannelInterval
+
+	for _, option := range opts {
+		option(client)
+	}
+
+	client.groupConsumers = make(map[string]*scc.Consumer)
+
+	client.lockTopicToConsumerChannelMap = sync.RWMutex{}
+	client.topicLockMap = make(map[string]*sync.RWMutex)
+	client.lockOfTopicLockMap = sync.RWMutex{}
+	client.lockOfGroupConsumers = sync.RWMutex{}
+
+	// healthy and alive until proven otherwise
+	client.alive = true
+	client.healthy = true
+
+	return client
+}
+
+func (sc *SaramaClient) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-kafka-sarama-client")
+
+	// Create the Done channel
+	sc.doneCh = make(chan int, 1)
+
+	var err error
+
+	// Add a cleanup in case of failure to startup
+	defer func() {
+		if err != nil {
+			sc.Stop(ctx)
+		}
+	}()
+
+	// Create the Cluster Admin
+	if err = sc.createClusterAdmin(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-cluster-admin", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the Publisher
+	if err := sc.createPublisher(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-publisher", log.Fields{"error": err})
+		return err
+	}
+
+	if sc.consumerType == DefaultConsumerType {
+		// Create the master consumers
+		if err := sc.createConsumer(ctx); err != nil {
+			logger.Errorw(ctx, "Cannot-create-kafka-consumers", log.Fields{"error": err})
+			return err
+		}
+	}
+
+	// Create the topic to consumers/channel map
+	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
+
+	logger.Info(ctx, "kafka-sarama-client-started")
+
+	sc.started = true
+
+	return nil
+}
+
+func (sc *SaramaClient) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-sarama-client")
+
+	sc.started = false
+
+	//Send a message over the done channel to close all long running routines
+	sc.doneCh <- 1
+
+	if sc.producer != nil {
+		if err := sc.producer.Close(); err != nil {
+			logger.Errorw(ctx, "closing-producer-failed", log.Fields{"error": err})
+		}
+	}
+
+	if sc.consumer != nil {
+		if err := sc.consumer.Close(); err != nil {
+			logger.Errorw(ctx, "closing-partition-consumer-failed", log.Fields{"error": err})
+		}
+	}
+
+	for key, val := range sc.groupConsumers {
+		logger.Debugw(ctx, "closing-group-consumer", log.Fields{"topic": key})
+		if err := val.Close(); err != nil {
+			logger.Errorw(ctx, "closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+		}
+	}
+
+	if sc.cAdmin != nil {
+		if err := sc.cAdmin.Close(); err != nil {
+			logger.Errorw(ctx, "closing-cluster-admin-failed", log.Fields{"error": err})
+		}
+	}
+
+	//TODO: Clear the consumers map
+	//sc.clearConsumerChannelMap()
+
+	logger.Info(ctx, "sarama-client-stopped")
+}
+
+//createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
+// the invoking function must hold the lock
+func (sc *SaramaClient) createTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
+	// Set the topic details
+	topicDetail := &sarama.TopicDetail{}
+	topicDetail.NumPartitions = int32(numPartition)
+	topicDetail.ReplicationFactor = int16(repFactor)
+	topicDetail.ConfigEntries = make(map[string]*string)
+	topicDetails := make(map[string]*sarama.TopicDetail)
+	topicDetails[topic.Name] = topicDetail
+
+	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
+		if err == sarama.ErrTopicAlreadyExists {
+			//	Not an error
+			logger.Debugw(ctx, "topic-already-exist", log.Fields{"topic": topic.Name})
+			return nil
+		}
+		logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err})
+		return err
+	}
+	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
+	// do so.
+	logger.Debugw(ctx, "topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	return nil
+}
+
+//CreateTopic is a public API to create a topic on the Kafka Broker.  It uses a lock on a specific topic to
+// ensure no two go routines are performing operations on the same topic
+func (sc *SaramaClient) CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	return sc.createTopic(ctx, topic, numPartition, repFactor)
+}
+
+//DeleteTopic removes a topic from the kafka Broker
+func (sc *SaramaClient) DeleteTopic(ctx context.Context, topic *Topic) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	// Remove the topic from the broker
+	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
+		if err == sarama.ErrUnknownTopicOrPartition {
+			//	Not an error as does not exist
+			logger.Debugw(ctx, "topic-not-exist", log.Fields{"topic": topic.Name})
+			return nil
+		}
+		logger.Errorw(ctx, "delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		return err
+	}
+
+	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
+	if err := sc.clearTopicFromConsumerChannelMap(ctx, *topic); err != nil {
+		logger.Errorw(ctx, "failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+		return err
+	}
+	return nil
+}
+
+// Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
+// messages from that topic
+func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	logger.Debugw(ctx, "subscribe", log.Fields{"topic": topic.Name})
+
+	// If a consumers already exist for that topic then resuse it
+	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
+		logger.Debugw(ctx, "topic-already-subscribed", log.Fields{"topic": topic.Name})
+		// Create a channel specific for that consumers and add it to the consumers channel map
+		ch := make(chan *ic.InterContainerMessage)
+		sc.addChannelToConsumerChannelMap(ctx, topic, ch)
+		return ch, nil
+	}
+
+	// Register for the topic and set it up
+	var consumerListeningChannel chan *ic.InterContainerMessage
+	var err error
+
+	// Use the consumerType option to figure out the type of consumer to launch
+	if sc.consumerType == PartitionConsumer {
+		if sc.autoCreateTopic {
+			if err = sc.createTopic(ctx, topic, sc.numPartitions, sc.numReplicas); err != nil {
+				logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+				return nil, err
+			}
+		}
+		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(ctx, topic, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+			return nil, err
+		}
+	} else if sc.consumerType == GroupCustomer {
+		// TODO: create topic if auto create is on.  There is an issue with the sarama cluster library that
+		// does not consume from a precreated topic in some scenarios
+		//if sc.autoCreateTopic {
+		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
+		//		logger.Errorw(ctx, "create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
+		//		return nil, err
+		//	}
+		//}
+		//groupId := sc.consumerGroupName
+		groupId := getGroupId(kvArgs...)
+		// Include the group prefix
+		if groupId != "" {
+			groupId = sc.consumerGroupPrefix + groupId
+		} else {
+			// Need to use a unique group Id per topic
+			groupId = sc.consumerGroupPrefix + topic.Name
+		}
+		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(ctx, topic, groupId, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+			return nil, err
+		}
+
+	} else {
+		logger.Warnw(ctx, "unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		return nil, errors.New("unknown-consumer-type")
+	}
+
+	return consumerListeningChannel, nil
+}
+
+//UnSubscribe unsubscribe a consumer from a given topic
+func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	logger.Debugw(ctx, "unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	var err error
+	if err = sc.removeChannelFromConsumerChannelMap(ctx, *topic, ch); err != nil {
+		logger.Errorw(ctx, "failed-removing-channel", log.Fields{"error": err})
+	}
+	if err = sc.deleteFromGroupConsumers(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "failed-deleting-group-consumer", log.Fields{"error": err})
+	}
+	return err
+}
+
+func (sc *SaramaClient) SubscribeForMetadata(ctx context.Context, callback func(fromTopic string, timestamp time.Time)) {
+	sc.metadataCallback = callback
+}
+
+func (sc *SaramaClient) updateLiveness(ctx context.Context, alive bool) {
+	// Post a consistent stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Production of liveness
+	// events to the channel is rate-limited by livenessChannelInterval.
+	sc.livenessMutex.Lock()
+	defer sc.livenessMutex.Unlock()
+	if sc.liveness != nil {
+		if sc.alive != alive {
+			logger.Info(ctx, "update-liveness-channel-because-change")
+			sc.liveness <- alive
+			sc.lastLivenessTime = time.Now()
+		} else if time.Since(sc.lastLivenessTime) > sc.livenessChannelInterval {
+			logger.Info(ctx, "update-liveness-channel-because-interval")
+			sc.liveness <- alive
+			sc.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Only emit a log message when the state changes
+	if sc.alive != alive {
+		logger.Info(ctx, "set-client-alive", log.Fields{"alive": alive})
+		sc.alive = alive
+	}
+}
+
+// Once unhealthy, we never go back
+func (sc *SaramaClient) setUnhealthy(ctx context.Context) {
+	sc.healthy = false
+	sc.healthinessMutex.Lock()
+	defer sc.healthinessMutex.Unlock()
+	if sc.healthiness != nil {
+		logger.Infow(ctx, "set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		sc.healthiness <- sc.healthy
+	}
+}
+
+func (sc *SaramaClient) isLivenessError(ctx context.Context, err error) bool {
+	// Sarama producers and consumers encapsulate the error inside
+	// a ProducerError or ConsumerError struct.
+	if prodError, ok := err.(*sarama.ProducerError); ok {
+		err = prodError.Err
+	} else if consumerError, ok := err.(*sarama.ConsumerError); ok {
+		err = consumerError.Err
+	}
+
+	// Sarama-Cluster will compose the error into a ClusterError struct,
+	// which we can't do a compare by reference. To handle that, we the
+	// best we can do is compare the error strings.
+
+	switch err.Error() {
+	case context.DeadlineExceeded.Error():
+		logger.Info(ctx, "is-liveness-error-timeout")
+		return true
+	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
+		logger.Info(ctx, "is-liveness-error-no-brokers")
+		return true
+	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
+		logger.Info(ctx, "is-liveness-error-shutting-down")
+		return true
+	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
+		logger.Info(ctx, "is-liveness-error-not-available")
+		return true
+	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
+		logger.Info(ctx, "is-liveness-error-circuit-breaker-open")
+		return true
+	}
+
+	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
+		logger.Info(ctx, "is-liveness-error-connection-refused")
+		return true
+	}
+
+	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
+		logger.Info(ctx, "is-liveness-error-io-timeout")
+		return true
+	}
+
+	// Other errors shouldn't trigger a loss of liveness
+
+	logger.Infow(ctx, "is-liveness-error-ignored", log.Fields{"err": err})
+
+	return false
+}
+
+// send formats and sends the request onto the kafka messaging bus.
+func (sc *SaramaClient) Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error {
+
+	// Assert message is a proto message
+	var protoMsg proto.Message
+	var ok bool
+	// ascertain the value interface type is a proto.Message
+	if protoMsg, ok = msg.(proto.Message); !ok {
+		logger.Warnw(ctx, "message-not-proto-message", log.Fields{"msg": msg})
+		return fmt.Errorf("not-a-proto-msg-%s", msg)
+	}
+
+	var marshalled []byte
+	var err error
+	//	Create the Sarama producer message
+	if marshalled, err = proto.Marshal(protoMsg); err != nil {
+		logger.Errorw(ctx, "marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		return err
+	}
+	key := ""
+	if len(keys) > 0 {
+		key = keys[0] // Only the first key is relevant
+	}
+	kafkaMsg := &sarama.ProducerMessage{
+		Topic: topic.Name,
+		Key:   sarama.StringEncoder(key),
+		Value: sarama.ByteEncoder(marshalled),
+	}
+
+	// Send message to kafka
+	sc.producer.Input() <- kafkaMsg
+	// Wait for result
+	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
+	select {
+	case ok := <-sc.producer.Successes():
+		logger.Debugw(ctx, "message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
+	case notOk := <-sc.producer.Errors():
+		logger.Debugw(ctx, "error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
+		}
+		return notOk
+	}
+	return nil
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every publish, which indicates whether
+// or not the channel is still live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (sc *SaramaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-liveness-channel", log.Fields{"enable": enable})
+	if enable {
+		sc.livenessMutex.Lock()
+		defer sc.livenessMutex.Unlock()
+		if sc.liveness == nil {
+			logger.Info(ctx, "kafka-create-liveness-channel")
+			// At least 1, so we can immediately post to it without blocking
+			// Setting a bigger number (10) allows the monitor to fall behind
+			// without blocking others. The monitor shouldn't really fall
+			// behind...
+			sc.liveness = make(chan bool, 10)
+			// post intial state to the channel
+			sc.liveness <- sc.alive
+		}
+	} else {
+		// TODO: Think about whether we need the ability to turn off
+		// liveness monitoring
+		panic("Turning off liveness reporting is not supported")
+	}
+	return sc.liveness
+}
+
+// Enable the Healthiness monitor channel. This channel will report "false"
+// if the kafka consumers die, or some other problem occurs which is
+// catastrophic that would require re-creating the client.
+func (sc *SaramaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+	if enable {
+		sc.healthinessMutex.Lock()
+		defer sc.healthinessMutex.Unlock()
+		if sc.healthiness == nil {
+			logger.Info(ctx, "kafka-create-healthiness-channel")
+			// At least 1, so we can immediately post to it without blocking
+			// Setting a bigger number (10) allows the monitor to fall behind
+			// without blocking others. The monitor shouldn't really fall
+			// behind...
+			sc.healthiness = make(chan bool, 10)
+			// post intial state to the channel
+			sc.healthiness <- sc.healthy
+		}
+	} else {
+		// TODO: Think about whether we need the ability to turn off
+		// liveness monitoring
+		panic("Turning off healthiness reporting is not supported")
+	}
+	return sc.healthiness
+}
+
+// send an empty message on the liveness channel to check whether connectivity has
+// been restored.
+func (sc *SaramaClient) SendLiveness(ctx context.Context) error {
+	if !sc.started {
+		return fmt.Errorf("SendLiveness() called while not started")
+	}
+
+	kafkaMsg := &sarama.ProducerMessage{
+		Topic: "_liveness_test",
+		Value: sarama.StringEncoder(time.Now().Format(time.RFC3339)), // for debugging / informative use
+	}
+
+	// Send message to kafka
+	sc.producer.Input() <- kafkaMsg
+	// Wait for result
+	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
+	select {
+	case ok := <-sc.producer.Successes():
+		logger.Debugw(ctx, "liveness-message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
+	case notOk := <-sc.producer.Errors():
+		logger.Debugw(ctx, "liveness-error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
+		}
+		return notOk
+	}
+	return nil
+}
+
+// getGroupId returns the group id from the key-value args.
+func getGroupId(kvArgs ...*KVArg) string {
+	for _, arg := range kvArgs {
+		if arg.Key == GroupIdKey {
+			return arg.Value.(string)
+		}
+	}
+	return ""
+}
+
+// getOffset returns the offset from the key-value args.
+func getOffset(kvArgs ...*KVArg) int64 {
+	for _, arg := range kvArgs {
+		if arg.Key == Offset {
+			return arg.Value.(int64)
+		}
+	}
+	return sarama.OffsetNewest
+}
+
+func (sc *SaramaClient) createClusterAdmin(ctx context.Context) error {
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+
+	// Create a cluster Admin
+	var cAdmin sarama.ClusterAdmin
+	var err error
+	if cAdmin, err = sarama.NewClusterAdmin([]string{sc.KafkaAddress}, config); err != nil {
+		logger.Errorw(ctx, "cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
+		return err
+	}
+	sc.cAdmin = cAdmin
+	return nil
+}
+
+func (sc *SaramaClient) lockTopic(topic *Topic) {
+	sc.lockOfTopicLockMap.Lock()
+	if _, exist := sc.topicLockMap[topic.Name]; exist {
+		sc.lockOfTopicLockMap.Unlock()
+		sc.topicLockMap[topic.Name].Lock()
+	} else {
+		sc.topicLockMap[topic.Name] = &sync.RWMutex{}
+		sc.lockOfTopicLockMap.Unlock()
+		sc.topicLockMap[topic.Name].Lock()
+	}
+}
+
+func (sc *SaramaClient) unLockTopic(topic *Topic) {
+	sc.lockOfTopicLockMap.Lock()
+	defer sc.lockOfTopicLockMap.Unlock()
+	if _, exist := sc.topicLockMap[topic.Name]; exist {
+		sc.topicLockMap[topic.Name].Unlock()
+	}
+}
+
+func (sc *SaramaClient) addTopicToConsumerChannelMap(id string, arg *consumerChannels) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if _, exist := sc.topicToConsumerChannelMap[id]; !exist {
+		sc.topicToConsumerChannelMap[id] = arg
+	}
+}
+
+func (sc *SaramaClient) getConsumerChannel(topic *Topic) *consumerChannels {
+	sc.lockTopicToConsumerChannelMap.RLock()
+	defer sc.lockTopicToConsumerChannelMap.RUnlock()
+
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		return consumerCh
+	}
+	return nil
+}
+
+func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan *ic.InterContainerMessage) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		consumerCh.channels = append(consumerCh.channels, ch)
+		return
+	}
+	logger.Warnw(ctx, "consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+}
+
+//closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
+func closeConsumers(ctx context.Context, consumers []interface{}) error {
+	var err error
+	for _, consumer := range consumers {
+		//	Is it a partition consumers?
+		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
+			if errTemp := partionConsumer.Close(); errTemp != nil {
+				logger.Debugw(ctx, "partition!!!", log.Fields{"err": errTemp})
+				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
+					// This can occur on race condition
+					err = nil
+				} else {
+					err = errTemp
+				}
+			}
+		} else if groupConsumer, ok := consumer.(*scc.Consumer); ok {
+			if errTemp := groupConsumer.Close(); errTemp != nil {
+				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
+					// This can occur on race condition
+					err = nil
+				} else {
+					err = errTemp
+				}
+			}
+		}
+	}
+	return err
+}
+
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan *ic.InterContainerMessage) error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		// Channel will be closed in the removeChannel method
+		consumerCh.channels = removeChannel(ctx, consumerCh.channels, ch)
+		// If there are no more channels then we can close the consumers itself
+		if len(consumerCh.channels) == 0 {
+			logger.Debugw(ctx, "closing-consumers", log.Fields{"topic": topic})
+			err := closeConsumers(ctx, consumerCh.consumers)
+			//err := consumerCh.consumers.Close()
+			delete(sc.topicToConsumerChannelMap, topic.Name)
+			return err
+		}
+		return nil
+	}
+	logger.Warnw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
+	return errors.New("topic-does-not-exist")
+}
+
+func (sc *SaramaClient) clearTopicFromConsumerChannelMap(ctx context.Context, topic Topic) error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		for _, ch := range consumerCh.channels {
+			// Channel will be closed in the removeChannel method
+			removeChannel(ctx, consumerCh.channels, ch)
+		}
+		err := closeConsumers(ctx, consumerCh.consumers)
+		//if err == sarama.ErrUnknownTopicOrPartition {
+		//	// Not an error
+		//	err = nil
+		//}
+		//err := consumerCh.consumers.Close()
+		delete(sc.topicToConsumerChannelMap, topic.Name)
+		return err
+	}
+	logger.Debugw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
+	return nil
+}
+
+//createPublisher creates the publisher which is used to send a message onto kafka
+func (sc *SaramaClient) createPublisher(ctx context.Context) error {
+	// This Creates the publisher
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+	config.Producer.Partitioner = sarama.NewRandomPartitioner
+	config.Producer.Flush.Frequency = time.Duration(sc.producerFlushFrequency)
+	config.Producer.Flush.Messages = sc.producerFlushMessages
+	config.Producer.Flush.MaxMessages = sc.producerFlushMaxmessages
+	config.Producer.Return.Errors = sc.producerReturnErrors
+	config.Producer.Return.Successes = sc.producerReturnSuccess
+	//config.Producer.RequiredAcks = sarama.WaitForAll
+	config.Producer.RequiredAcks = sarama.WaitForLocal
+
+	brokers := []string{sc.KafkaAddress}
+
+	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
+		logger.Errorw(ctx, "error-starting-publisher", log.Fields{"error": err})
+		return err
+	} else {
+		sc.producer = producer
+	}
+	logger.Info(ctx, "Kafka-publisher-created")
+	return nil
+}
+
+func (sc *SaramaClient) createConsumer(ctx context.Context) error {
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+	config.Consumer.Return.Errors = true
+	config.Consumer.Fetch.Min = 1
+	config.Consumer.MaxWaitTime = time.Duration(sc.consumerMaxwait) * time.Millisecond
+	config.Consumer.MaxProcessingTime = time.Duration(sc.maxProcessingTime) * time.Millisecond
+	config.Consumer.Offsets.Initial = sarama.OffsetNewest
+	config.Metadata.Retry.Max = sc.metadataMaxRetry
+	brokers := []string{sc.KafkaAddress}
+
+	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
+		logger.Errorw(ctx, "error-starting-consumers", log.Fields{"error": err})
+		return err
+	} else {
+		sc.consumer = consumer
+	}
+	logger.Info(ctx, "Kafka-consumers-created")
+	return nil
+}
+
+// createGroupConsumer creates a consumers group
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
+	config := scc.NewConfig()
+	config.Version = sarama.V1_0_0_0
+	config.ClientID = uuid.New().String()
+	config.Group.Mode = scc.ConsumerModeMultiplex
+	config.Consumer.Group.Heartbeat.Interval, _ = time.ParseDuration("1s")
+	config.Consumer.Return.Errors = true
+	//config.Group.Return.Notifications = false
+	//config.Consumer.MaxWaitTime = time.Duration(DefaultConsumerMaxwait) * time.Millisecond
+	//config.Consumer.MaxProcessingTime = time.Duration(DefaultMaxProcessingTime) * time.Millisecond
+	config.Consumer.Offsets.Initial = initialOffset
+	//config.Consumer.Offsets.Initial = sarama.OffsetOldest
+	brokers := []string{sc.KafkaAddress}
+
+	topics := []string{topic.Name}
+	var consumer *scc.Consumer
+	var err error
+
+	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
+		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		return nil, err
+	}
+	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+
+	//sc.groupConsumers[topic.Name] = consumer
+	sc.addToGroupConsumers(topic.Name, consumer)
+	return consumer, nil
+}
+
+// dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
+// topic via the unique channel each subscriber received during subscription
+func (sc *SaramaClient) dispatchToConsumers(consumerCh *consumerChannels, protoMessage *ic.InterContainerMessage) {
+	// Need to go over all channels and publish messages to them - do we need to copy msg?
+	sc.lockTopicToConsumerChannelMap.RLock()
+	for _, ch := range consumerCh.channels {
+		go func(c chan *ic.InterContainerMessage) {
+			c <- protoMessage
+		}(ch)
+	}
+	sc.lockTopicToConsumerChannelMap.RUnlock()
+
+	if callback := sc.metadataCallback; callback != nil {
+		ts, _ := ptypes.Timestamp(protoMessage.Header.Timestamp)
+		callback(protoMessage.Header.FromTopic, ts)
+	}
+}
+
+func (sc *SaramaClient) consumeFromAPartition(ctx context.Context, topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+startloop:
+	for {
+		select {
+		case err, ok := <-consumer.Errors():
+			if ok {
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
+					logger.Warnw(ctx, "partition-consumers-error", log.Fields{"error": err})
+				}
+			} else {
+				// Channel is closed
+				break startloop
+			}
+		case msg, ok := <-consumer.Messages():
+			//logger.Debugw(ctx, "message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			if !ok {
+				// channel is closed
+				break startloop
+			}
+			msgBody := msg.Value
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			icm := &ic.InterContainerMessage{}
+			if err := proto.Unmarshal(msgBody, icm); err != nil {
+				logger.Warnw(ctx, "partition-invalid-message", log.Fields{"error": err})
+				continue
+			}
+			go sc.dispatchToConsumers(consumerChnls, icm)
+		case <-sc.doneCh:
+			logger.Infow(ctx, "partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			break startloop
+		}
+	}
+	logger.Infow(ctx, "partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
+}
+
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+
+startloop:
+	for {
+		select {
+		case err, ok := <-consumer.Errors():
+			if ok {
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
+				}
+				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+			} else {
+				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				// channel is closed
+				break startloop
+			}
+		case msg, ok := <-consumer.Messages():
+			if !ok {
+				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				// Channel closed
+				break startloop
+			}
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			msgBody := msg.Value
+			icm := &ic.InterContainerMessage{}
+			if err := proto.Unmarshal(msgBody, icm); err != nil {
+				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
+				continue
+			}
+			go sc.dispatchToConsumers(consumerChnls, icm)
+			consumer.MarkOffset(msg, "")
+		case ntf := <-consumer.Notifications():
+			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
+		case <-sc.doneCh:
+			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
+			break startloop
+		}
+	}
+	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
+}
+
+func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
+	logger.Debugw(ctx, "starting-consumers", log.Fields{"topic": topic.Name})
+	var consumerCh *consumerChannels
+	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
+		logger.Errorw(ctx, "consumers-not-exist", log.Fields{"topic": topic.Name})
+		return errors.New("consumers-not-exist")
+	}
+	// For each consumer listening for that topic, start a consumption loop
+	for _, consumer := range consumerCh.consumers {
+		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
+			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
+		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
+			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
+		} else {
+			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
+			return errors.New("invalid-consumer")
+		}
+	}
+	return nil
+}
+
+//// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
+//// for that topic.  It also starts the routine that listens for messages on that topic.
+func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+	var pConsumers []sarama.PartitionConsumer
+	var err error
+
+	if pConsumers, err = sc.createPartitionConsumers(ctx, topic, initialOffset); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+
+	consumersIf := make([]interface{}, 0)
+	for _, pConsumer := range pConsumers {
+		consumersIf = append(consumersIf, pConsumer)
+	}
+
+	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
+	// unbuffered to verify race conditions.
+	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	cc := &consumerChannels{
+		consumers: consumersIf,
+		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+	}
+
+	// Add the consumers channel to the map
+	sc.addTopicToConsumerChannelMap(topic.Name, cc)
+
+	//Start a consumers to listen on that specific topic
+	go func() {
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
+				"topic": topic,
+				"error": err})
+		}
+	}()
+
+	return consumerListeningChannel, nil
+}
+
+// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
+// for that topic.  It also starts the routine that listens for messages on that topic.
+func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+	// TODO:  Replace this development partition consumers with a group consumers
+	var pConsumer *scc.Consumer
+	var err error
+	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
+	// unbuffered to verify race conditions.
+	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	cc := &consumerChannels{
+		consumers: []interface{}{pConsumer},
+		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+	}
+
+	// Add the consumers channel to the map
+	sc.addTopicToConsumerChannelMap(topic.Name, cc)
+
+	//Start a consumers to listen on that specific topic
+	go func() {
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
+				"topic": topic,
+				"error": err})
+		}
+	}()
+
+	return consumerListeningChannel, nil
+}
+
+func (sc *SaramaClient) createPartitionConsumers(ctx context.Context, topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
+	logger.Debugw(ctx, "creating-partition-consumers", log.Fields{"topic": topic.Name})
+	partitionList, err := sc.consumer.Partitions(topic.Name)
+	if err != nil {
+		logger.Warnw(ctx, "get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+
+	pConsumers := make([]sarama.PartitionConsumer, 0)
+	for _, partition := range partitionList {
+		var pConsumer sarama.PartitionConsumer
+		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
+			logger.Warnw(ctx, "consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			return nil, err
+		}
+		pConsumers = append(pConsumers, pConsumer)
+	}
+	return pConsumers, nil
+}
+
+func removeChannel(ctx context.Context, channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+	var i int
+	var channel chan *ic.InterContainerMessage
+	for i, channel = range channels {
+		if channel == ch {
+			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
+			close(channel)
+			logger.Debug(ctx, "channel-closed")
+			return channels[:len(channels)-1]
+		}
+	}
+	return channels
+}
+
+func (sc *SaramaClient) addToGroupConsumers(topic string, consumer *scc.Consumer) {
+	sc.lockOfGroupConsumers.Lock()
+	defer sc.lockOfGroupConsumers.Unlock()
+	if _, exist := sc.groupConsumers[topic]; !exist {
+		sc.groupConsumers[topic] = consumer
+	}
+}
+
+func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
+	sc.lockOfGroupConsumers.Lock()
+	defer sc.lockOfGroupConsumers.Unlock()
+	if _, exist := sc.groupConsumers[topic]; exist {
+		consumer := sc.groupConsumers[topic]
+		delete(sc.groupConsumers, topic)
+		if err := consumer.Close(); err != nil {
+			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
new file mode 100644
index 0000000..bdc615f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"github.com/golang/protobuf/ptypes/any"
+	"strings"
+)
+
+const (
+	TopicSeparator = "_"
+	DeviceIdLength = 24
+)
+
+// A Topic definition - may be augmented with additional attributes eventually
+type Topic struct {
+	// The name of the topic. It must start with a letter,
+	// and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+	// underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+	// signs (`%`).
+	Name string
+}
+
+type KVArg struct {
+	Key   string
+	Value interface{}
+}
+
+type RpcMType int
+
+const (
+	RpcFormattingError RpcMType = iota
+	RpcSent
+	RpcReply
+	RpcTimeout
+	RpcTransportError
+	RpcSystemClosing
+)
+
+type RpcResponse struct {
+	MType RpcMType
+	Err   error
+	Reply *any.Any
+}
+
+func NewResponse(messageType RpcMType, err error, body *any.Any) *RpcResponse {
+	return &RpcResponse{
+		MType: messageType,
+		Err:   err,
+		Reply: body,
+	}
+}
+
+// TODO:  Remove and provide better may to get the device id
+// GetDeviceIdFromTopic extract the deviceId from the topic name.  The topic name is formatted either as:
+//			<any string> or <any string>_<deviceId>.  The device Id is 24 characters long.
+func GetDeviceIdFromTopic(topic Topic) string {
+	pos := strings.LastIndex(topic.Name, TopicSeparator)
+	if pos == -1 {
+		return ""
+	}
+	adjustedPos := pos + len(TopicSeparator)
+	if adjustedPos >= len(topic.Name) {
+		return ""
+	}
+	deviceId := topic.Name[adjustedPos:len(topic.Name)]
+	if len(deviceId) != DeviceIdLength {
+		return ""
+	}
+	return deviceId
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
new file mode 100644
index 0000000..b0ce81b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package log
+
+var logger CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = RegisterPackage(JSON, ErrorLevel, Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
new file mode 100644
index 0000000..7b1a123
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
@@ -0,0 +1,662 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+// 1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+// 2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+// 3. Dynamic log level change - for a given package (SetPackageLogLevel)
+// 4. Provides a default logger for unregistered packages (however avoid its usage)
+// 5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+// 6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach).  In the examples below, log refers to this log package.
+//
+// 1. In the appropriate package, add the following in the init section of the package (usually in a common.go file)
+//    The log level can be changed and any number of default fields can be added as well. The log level specifies
+//    the lowest log level that will be in the output while the fields will be automatically added to all log printouts.
+//    However, as voltha components re-initialize the log level of each registered package to default initial loglevel
+//    passed as CLI argument, the log level passed in RegisterPackage call effectively has no effect.
+//
+//    var logger log.CLogger
+//    func init() {
+//              logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"key1": "value1"})
+//    }
+//
+// 2. In the calling package, use any of the publicly available functions of local package-level logger instance created
+//    in previous step.  Here is an example to write an Info log with additional fields:
+//
+//    logger.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+// 3. To dynamically change the log level, you can use
+//          a) SetLogLevel from inside your package or
+//          b) SetPackageLogLevel from anywhere or
+//          c) SetAllLogLevel from anywhere.
+//
+//    Dynamic Loglevel configuration feature also uses SetPackageLogLevel method based on triggers received due to
+//    Changes to configured loglevels
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"path"
+	"runtime"
+	"strings"
+
+	zp "go.uber.org/zap"
+	zc "go.uber.org/zap/zapcore"
+)
+
+type LogLevel int8
+
+const (
+	// DebugLevel logs a message at debug level
+	DebugLevel = LogLevel(iota)
+	// InfoLevel logs a message at info level
+	InfoLevel
+	// WarnLevel logs a message at warning level
+	WarnLevel
+	// ErrorLevel logs a message at error level
+	ErrorLevel
+	// FatalLevel logs a message, then calls os.Exit(1).
+	FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Context Aware Logger represents an abstract logging interface.  Any logging implementation used
+// will need to abide by this interface
+type CLogger interface {
+	Debug(context.Context, ...interface{})
+	Debugln(context.Context, ...interface{})
+	Debugf(context.Context, string, ...interface{})
+	Debugw(context.Context, string, Fields)
+
+	Info(context.Context, ...interface{})
+	Infoln(context.Context, ...interface{})
+	Infof(context.Context, string, ...interface{})
+	Infow(context.Context, string, Fields)
+
+	Warn(context.Context, ...interface{})
+	Warnln(context.Context, ...interface{})
+	Warnf(context.Context, string, ...interface{})
+	Warnw(context.Context, string, Fields)
+
+	Error(context.Context, ...interface{})
+	Errorln(context.Context, ...interface{})
+	Errorf(context.Context, string, ...interface{})
+	Errorw(context.Context, string, Fields)
+
+	Fatal(context.Context, ...interface{})
+	Fatalln(context.Context, ...interface{})
+	Fatalf(context.Context, string, ...interface{})
+	Fatalw(context.Context, string, Fields)
+
+	With(Fields) CLogger
+
+	// The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+	//
+	Warning(context.Context, ...interface{})
+	Warningln(context.Context, ...interface{})
+	Warningf(context.Context, string, ...interface{})
+
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l LogLevel) bool
+
+	//Returns the log level of this specific logger
+	GetLogLevel() LogLevel
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *clogger
+var cfg zp.Config
+
+var loggers map[string]*clogger
+var cfgs map[string]zp.Config
+
+type clogger struct {
+	log         *zp.SugaredLogger
+	parent      *zp.Logger
+	packageName string
+}
+
+func logLevelToAtomicLevel(l LogLevel) zp.AtomicLevel {
+	switch l {
+	case DebugLevel:
+		return zp.NewAtomicLevelAt(zc.DebugLevel)
+	case InfoLevel:
+		return zp.NewAtomicLevelAt(zc.InfoLevel)
+	case WarnLevel:
+		return zp.NewAtomicLevelAt(zc.WarnLevel)
+	case ErrorLevel:
+		return zp.NewAtomicLevelAt(zc.ErrorLevel)
+	case FatalLevel:
+		return zp.NewAtomicLevelAt(zc.FatalLevel)
+	}
+	return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func logLevelToLevel(l LogLevel) zc.Level {
+	switch l {
+	case DebugLevel:
+		return zc.DebugLevel
+	case InfoLevel:
+		return zc.InfoLevel
+	case WarnLevel:
+		return zc.WarnLevel
+	case ErrorLevel:
+		return zc.ErrorLevel
+	case FatalLevel:
+		return zc.FatalLevel
+	}
+	return zc.ErrorLevel
+}
+
+func levelToLogLevel(l zc.Level) LogLevel {
+	switch l {
+	case zc.DebugLevel:
+		return DebugLevel
+	case zc.InfoLevel:
+		return InfoLevel
+	case zc.WarnLevel:
+		return WarnLevel
+	case zc.ErrorLevel:
+		return ErrorLevel
+	case zc.FatalLevel:
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func StringToLogLevel(l string) (LogLevel, error) {
+	switch strings.ToUpper(l) {
+	case "DEBUG":
+		return DebugLevel, nil
+	case "INFO":
+		return InfoLevel, nil
+	case "WARN":
+		return WarnLevel, nil
+	case "ERROR":
+		return ErrorLevel, nil
+	case "FATAL":
+		return FatalLevel, nil
+	}
+	return 0, errors.New("Given LogLevel is invalid : " + l)
+}
+
+func LogLevelToString(l LogLevel) (string, error) {
+	switch l {
+	case DebugLevel:
+		return "DEBUG", nil
+	case InfoLevel:
+		return "INFO", nil
+	case WarnLevel:
+		return "WARN", nil
+	case ErrorLevel:
+		return "ERROR", nil
+	case FatalLevel:
+		return "FATAL", nil
+	}
+	return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+}
+
+func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
+	return zp.Config{
+		Level:            logLevelToAtomicLevel(level),
+		Encoding:         outputType,
+		Development:      true,
+		OutputPaths:      []string{"stdout"},
+		ErrorOutputPaths: []string{"stderr"},
+		InitialFields:    defaultFields,
+		EncoderConfig: zc.EncoderConfig{
+			LevelKey:       "level",
+			MessageKey:     "msg",
+			TimeKey:        "ts",
+			CallerKey:      "caller",
+			StacktraceKey:  "stacktrace",
+			LineEnding:     zc.DefaultLineEnding,
+			EncodeLevel:    zc.LowercaseLevelEncoder,
+			EncodeTime:     zc.ISO8601TimeEncoder,
+			EncodeDuration: zc.SecondsDurationEncoder,
+			EncodeCaller:   zc.ShortCallerEncoder,
+		},
+	}
+}
+
+func ConstructZapConfig(outputType string, level LogLevel, fields Fields) zp.Config {
+	return getDefaultConfig(outputType, level, fields)
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked.  This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level LogLevel, defaultFields Fields) (CLogger, error) {
+	// Build a custom config using zap
+	cfg = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	defaultLogger = &clogger{
+		log:    l.Sugar(),
+		parent: l,
+	}
+
+	return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map.  Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger.  If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func RegisterPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (CLogger, error) {
+	if cfgs == nil {
+		cfgs = make(map[string]zp.Config)
+	}
+	if loggers == nil {
+		loggers = make(map[string]*clogger)
+	}
+
+	var pkgName string
+	for _, name := range pkgNames {
+		pkgName = name
+		break
+	}
+	if pkgName == "" {
+		pkgName, _, _, _ = getCallerInfo()
+	}
+
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName], nil
+	}
+
+	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfgs[pkgName].Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	loggers[pkgName] = &clogger{
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
+	}
+	return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+	for pkgName, cfg := range cfgs {
+		for k, v := range defaultFields {
+			if cfg.InitialFields == nil {
+				cfg.InitialFields = make(map[string]interface{})
+			}
+			cfg.InitialFields[k] = v
+		}
+		l, err := cfg.Build(zp.AddCallerSkip(1))
+		if err != nil {
+			return err
+		}
+
+		// Update the existing zap logger instance
+		loggers[pkgName].log = l.Sugar()
+		loggers[pkgName].parent = l
+	}
+	return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+	i := 0
+	keys := make([]string, len(loggers))
+	for k := range loggers {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// UpdateLogger updates the logger associated with a caller's package with supplied defaultFields
+func UpdateLogger(defaultFields Fields) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; !exist {
+		return fmt.Errorf("package-%s-not-registered", pkgName)
+	}
+
+	// Build a new logger
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("config-%s-not-registered", pkgName)
+	}
+
+	cfg := cfgs[pkgName]
+	for k, v := range defaultFields {
+		if cfg.InitialFields == nil {
+			cfg.InitialFields = make(map[string]interface{})
+		}
+		cfg.InitialFields[k] = v
+	}
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return err
+	}
+
+	// Update the existing zap logger instance
+	loggers[pkgName].log = l.Sugar()
+	loggers[pkgName].parent = l
+
+	return nil
+}
+
+func setLevel(cfg zp.Config, level LogLevel) {
+	switch level {
+	case DebugLevel:
+		cfg.Level.SetLevel(zc.DebugLevel)
+	case InfoLevel:
+		cfg.Level.SetLevel(zc.InfoLevel)
+	case WarnLevel:
+		cfg.Level.SetLevel(zc.WarnLevel)
+	case ErrorLevel:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	case FatalLevel:
+		cfg.Level.SetLevel(zc.FatalLevel)
+	default:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	}
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level.  This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level LogLevel) {
+	// Get proper config
+	if cfg, ok := cfgs[packageName]; ok {
+		setLevel(cfg, level)
+	}
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level LogLevel) {
+	// Get proper config
+	for _, cfg := range cfgs {
+		setLevel(cfg, level)
+	}
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (LogLevel, error) {
+	var name string
+	if len(packageName) == 1 {
+		name = packageName[0]
+	} else {
+		name, _, _, _ = getCallerInfo()
+	}
+	if cfg, ok := cfgs[name]; ok {
+		return levelToLogLevel(cfg.Level.Level()), nil
+	}
+	return 0, fmt.Errorf("unknown-package-%s", name)
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() LogLevel {
+	return levelToLogLevel(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level LogLevel) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("unregistered-package-%s", pkgName)
+	}
+	cfg := cfgs[pkgName]
+	setLevel(cfg, level)
+	return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level LogLevel) {
+	setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+	for _, logger := range loggers {
+		if logger != nil {
+			if logger.parent != nil {
+				if err := logger.parent.Sync(); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	if defaultLogger != nil {
+		if defaultLogger.parent != nil {
+			if err := defaultLogger.parent.Sync(); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+	// Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+	// filename, then first look for the last log.go filename and then grab the caller info one level higher.
+	maxLevel := 3
+	skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+	pc := make([]uintptr, maxLevel)
+	n := runtime.Callers(skiplevel, pc)
+	packageName := ""
+	funcName := ""
+	fileName := ""
+	var line int
+	if n == 0 {
+		return packageName, fileName, funcName, line
+	}
+	frames := runtime.CallersFrames(pc[:n])
+	var frame runtime.Frame
+	var foundFrame runtime.Frame
+	more := true
+	for more {
+		frame, more = frames.Next()
+		_, fileName = path.Split(frame.File)
+		if fileName != "log.go" {
+			foundFrame = frame // First frame after log.go in the frame stack
+			break
+		}
+	}
+	parts := strings.Split(foundFrame.Function, ".")
+	pl := len(parts)
+	if pl >= 2 {
+		funcName = parts[pl-1]
+		if parts[pl-2][0] == '(' {
+			packageName = strings.Join(parts[0:pl-2], ".")
+		} else {
+			packageName = strings.Join(parts[0:pl-1], ".")
+		}
+	}
+
+	if strings.HasSuffix(packageName, ".init") {
+		packageName = strings.TrimSuffix(packageName, ".init")
+	}
+
+	if strings.HasSuffix(fileName, ".go") {
+		fileName = strings.TrimSuffix(fileName, ".go")
+	}
+
+	return packageName, fileName, funcName, foundFrame.Line
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l clogger) With(keysAndValues Fields) CLogger {
+	return clogger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l clogger) Debug(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l clogger) Debugln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l clogger) Debugf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Debugw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(DebugLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l clogger) Info(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l clogger) Infoln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+	//msg := fmt.Sprintln(args...)
+	//l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l clogger) Infof(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Infow(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(InfoLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l clogger) Warn(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warnln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l clogger) Warnf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Warnw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(WarnLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l clogger) Error(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l clogger) Errorln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l clogger) Errorf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Errorw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(ErrorLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l clogger) Fatal(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l clogger) Fatalln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l clogger) Fatalf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Fatalw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(FatalLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l clogger) Warning(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warningln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l clogger) Warningf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l clogger) V(level LogLevel) bool {
+	return l.parent.Core().Enabled(logLevelToLevel(level))
+}
+
+// GetLogLevel returns the current level of the logger
+func (l clogger) GetLogLevel() LogLevel {
+	return levelToLogLevel(cfgs[l.packageName].Level.Level())
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
new file mode 100644
index 0000000..82c3d7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// File contains utility functions to support Open Tracing in conjunction with
+// Enhanced Logging based on context propagation
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opentracing/opentracing-go"
+	jtracing "github.com/uber/jaeger-client-go"
+	jcfg "github.com/uber/jaeger-client-go/config"
+	"io"
+	"os"
+	"strings"
+	"sync"
+)
+
+const (
+	RootSpanNameKey = "op-name"
+)
+
+// Global Settings governing the Log Correlation and Tracing features. Should only
+// be updated through the exposed public methods
+type LogFeaturesManager struct {
+	isTracePublishingEnabled bool
+	isLogCorrelationEnabled  bool
+	componentName            string // Name of component extracted from ENV variable
+	activeTraceAgentAddress  string
+	lock                     sync.Mutex
+}
+
+var globalLFM *LogFeaturesManager = &LogFeaturesManager{}
+
+func GetGlobalLFM() *LogFeaturesManager {
+	return globalLFM
+}
+
+// A Wrapper to utilize currently Active Tracer instance. The middleware library being used for generating
+// Spans for GRPC API calls does not support dynamically setting the Active Tracer similar to the SetGlobalTracer method
+// provided by OpenTracing API
+type ActiveTracerProxy struct {
+}
+
+func (atw ActiveTracerProxy) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
+	return opentracing.GlobalTracer().StartSpan(operationName, opts...)
+}
+
+func (atw ActiveTracerProxy) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	return opentracing.GlobalTracer().Inject(sm, format, carrier)
+}
+
+func (atw ActiveTracerProxy) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+	return opentracing.GlobalTracer().Extract(format, carrier)
+}
+
+// Jaeger complaint Logger instance to redirect logs to Default Logger
+type traceLogger struct {
+	logger *clogger
+}
+
+func (tl traceLogger) Error(msg string) {
+	tl.logger.Error(context.Background(), msg)
+}
+
+func (tl traceLogger) Infof(msg string, args ...interface{}) {
+	// Tracing logs should be performed only at Debug Verbosity
+	tl.logger.Debugf(context.Background(), msg, args...)
+}
+
+// Wrapper to handle correct Closer call at the time of Process Termination
+type traceCloser struct {
+}
+
+func (c traceCloser) Close() error {
+	currentActiveTracer := opentracing.GlobalTracer()
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+
+	return nil
+}
+
+// Method to Initialize Jaeger based Tracing client based on initial status of Tracing Publish and Log Correlation
+func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
+	lfm.componentName = os.Getenv("COMPONENT_NAME")
+	if lfm.componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	// Use NoopTracer when both Tracing Publishing and Log Correlation are disabled
+	if !tracePublishEnabled && !logCorrelationEnabled {
+		logger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
+		lfm.isTracePublishingEnabled = false
+		lfm.isLogCorrelationEnabled = false
+		opentracing.SetGlobalTracer(opentracing.NoopTracer{})
+		return traceCloser{}, nil
+	}
+
+	tracer, _, err := lfm.constructJaegerTracer(tracePublishEnabled, traceAgentAddress, true)
+	if err != nil {
+		return nil, err
+	}
+
+	// Initialize variables representing Active Status
+	opentracing.SetGlobalTracer(tracer)
+	lfm.isTracePublishingEnabled = tracePublishEnabled
+	lfm.activeTraceAgentAddress = traceAgentAddress
+	lfm.isLogCorrelationEnabled = logCorrelationEnabled
+	return traceCloser{}, nil
+}
+
+// Method to replace Active Tracer along with graceful closer of previous tracer
+func (lfm *LogFeaturesManager) replaceActiveTracer(tracer opentracing.Tracer) {
+	currentActiveTracer := opentracing.GlobalTracer()
+	opentracing.SetGlobalTracer(tracer)
+
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+}
+
+func (lfm *LogFeaturesManager) GetLogCorrelationStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isLogCorrelationEnabled
+}
+
+func (lfm *LogFeaturesManager) SetLogCorrelationStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isLogCorrelationEnabled {
+		logger.Debugf(context.Background(), "Ignoring Log Correlation Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Log Correlation has been enabled and current active tracer is a NoopTracer instance.
+		// Continue using the earlier tracer instance in case of any error
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); ok {
+			tracer, _, err := lfm.constructJaegerTracer(lfm.isTracePublishingEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Log Correlation Enable operation failed with error: %s", err.Error())
+				return
+			}
+
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isLogCorrelationEnabled = true
+		logger.Info(context.Background(), "Log Correlation has been enabled")
+
+	} else {
+		// Switch to NoopTracer when Log Correlation has been disabled and Tracing Publish is already disabled
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); !ok && !lfm.isTracePublishingEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		}
+
+		lfm.isLogCorrelationEnabled = false
+		logger.Info(context.Background(), "Log Correlation has been disabled")
+	}
+}
+
+func (lfm *LogFeaturesManager) GetTracePublishingStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isTracePublishingEnabled
+}
+
+func (lfm *LogFeaturesManager) SetTracePublishingStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isTracePublishingEnabled {
+		logger.Debugf(context.Background(), "Ignoring Trace Publishing Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Tracing Publish has been enabled (even if a Jaeger instance is already active)
+		// This is needed to ensure that a fresh lookup of Jaeger Agent address is performed again while performing
+		// Disable-Enable of Tracing
+		tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+		if err != nil {
+			logger.Warnf(context.Background(), "Trace Publishing Enable operation failed with error: %s", err.Error())
+			return
+		}
+		lfm.replaceActiveTracer(tracer)
+
+		lfm.isTracePublishingEnabled = true
+		logger.Info(context.Background(), "Tracing Publishing has been enabled")
+	} else {
+		// Switch to NoopTracer when Tracing Publish has been disabled and Log Correlation is already disabled
+		if !lfm.isLogCorrelationEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		} else {
+			// Else construct a new Jaeger Instance with publishing disabled
+			tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Trace Publishing Disable operation failed with error: %s", err.Error())
+				return
+			}
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isTracePublishingEnabled = false
+		logger.Info(context.Background(), "Tracing Publishing has been disabled")
+	}
+}
+
+// Method to contruct a new Jaeger Tracer instance based on given Trace Agent address and Publish status.
+// The last attribute indicates whether to use Loopback IP for creating Jaeger Client when the DNS lookup
+// of supplied Trace Agent address has failed. It is fine to fallback during the initialization step, but
+// not later (when enabling/disabling the status dynamically)
+func (lfm *LogFeaturesManager) constructJaegerTracer(tracePublishEnabled bool, traceAgentAddress string, fallbackToLoopbackAllowed bool) (opentracing.Tracer, io.Closer, error) {
+	cfg := jcfg.Configuration{ServiceName: lfm.componentName}
+
+	var err error
+	var jReporterConfig jcfg.ReporterConfig
+	var jReporterCfgOption jtracing.Reporter
+
+	logger.Info(context.Background(), "Constructing new Jaeger Tracer instance")
+
+	// Attempt Trace Agent Address first; will fallback to Loopback IP if it fails
+	jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
+	jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+
+	if err != nil {
+		if !fallbackToLoopbackAllowed {
+			return nil, nil, errors.New("Reporter Creation for given Trace Agent address " + traceAgentAddress + " failed with error : " + err.Error())
+		}
+
+		logger.Infow(context.Background(), "Unable to create Reporter with given Trace Agent address",
+			Fields{"error": err, "address": traceAgentAddress})
+		// The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
+		// It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
+		// Thus, falback to use loopback IP for Reporter initialization before throwing back any error
+		tracePublishEnabled = false
+
+		jReporterConfig.LocalAgentHostPort = "127.0.0.1:6831"
+		jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+		if err != nil {
+			return nil, nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
+		}
+	}
+
+	// To start with, we are using Constant Sampling type
+	samplerParam := 0 // 0: Do not publish span, 1: Publish
+	if tracePublishEnabled {
+		samplerParam = 1
+	}
+	jSamplerConfig := jcfg.SamplerConfig{Type: "const", Param: float64(samplerParam)}
+	jSamplerCfgOption, err := jSamplerConfig.NewSampler(lfm.componentName, jtracing.NewNullMetrics())
+	if err != nil {
+		return nil, nil, errors.New("Unable to create Sampler : " + err.Error())
+	}
+
+	return cfg.NewTracer(jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
+}
+
+func TerminateTracing(c io.Closer) {
+	err := c.Close()
+	if err != nil {
+		logger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
+	}
+}
+
+// Extracts details of Execution Context as log fields from the Tracing Span injected into the
+// context instance. Following log fields are extracted:
+// 1. Operation Name : key as 'op-name' and value as Span operation name
+// 2. Operation Id : key as 'op-id' and value as 64 bit Span Id in hex digits string
+//
+// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
+//
+// If no Span is found associated with context, blank slice is returned without any log fields
+func (lfm *LogFeaturesManager) ExtractContextAttributes(ctx context.Context) []interface{} {
+	if !lfm.isLogCorrelationEnabled {
+		return make([]interface{}, 0)
+	}
+
+	attrMap := make(map[string]interface{})
+
+	if ctx != nil {
+		if span := opentracing.SpanFromContext(ctx); span != nil {
+			if jspan, ok := span.(*jtracing.Span); ok {
+				// Add Log fields for operation identified by Root Level Span (Trace)
+				opId := fmt.Sprintf("%016x", jspan.SpanContext().TraceID().Low) // Using Sprintf to avoid removal of leading 0s
+				opName := jspan.BaggageItem(RootSpanNameKey)
+
+				taskId := fmt.Sprintf("%016x", uint64(jspan.SpanContext().SpanID())) // Using Sprintf to avoid removal of leading 0s
+				taskName := jspan.OperationName()
+
+				if opName == "" {
+					span.SetBaggageItem(RootSpanNameKey, taskName)
+					opName = taskName
+				}
+
+				attrMap["op-id"] = opId
+				attrMap["op-name"] = opName
+
+				// Add Log fields for task identified by Current Span, if it is different
+				// than operation
+				if taskId != opId {
+					attrMap["task-id"] = taskId
+					attrMap["task-name"] = taskName
+				}
+
+				for k, v := range jspan.Tags() {
+					// Ignore the special tags added by Jaeger, middleware (sampler.type, span.*) present in the span
+					if strings.HasPrefix(k, "sampler.") || strings.HasPrefix(k, "span.") || k == "component" {
+						continue
+					}
+
+					attrMap[k] = v
+				}
+
+				processBaggageItems := func(k, v string) bool {
+					if k != "rpc-span-name" {
+						attrMap[k] = v
+					}
+					return true
+				}
+
+				jspan.SpanContext().ForeachBaggageItem(processBaggageItems)
+			}
+		}
+	}
+
+	return serializeMap(attrMap)
+}
+
+// Method to inject additional log fields into Span e.g. device-id
+func EnrichSpan(ctx context.Context, keyAndValues ...Fields) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		if jspan, ok := span.(*jtracing.Span); ok {
+			// Inject as a BaggageItem when the Span is the Root Span so that it propagates
+			// across the components along with Root Span (called as Trace)
+			// Else, inject as a Tag so that it is attached to the Child Task
+			isRootSpan := false
+			if jspan.SpanContext().TraceID().String() == jspan.SpanContext().SpanID().String() {
+				isRootSpan = true
+			}
+
+			for _, field := range keyAndValues {
+				for k, v := range field {
+					if isRootSpan {
+						span.SetBaggageItem(k, v.(string))
+					} else {
+						span.SetTag(k, v)
+					}
+				}
+			}
+		}
+	}
+}
+
+// Method to inject Error into the Span in event of any operation failure
+func MarkSpanError(ctx context.Context, err error) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		span.SetTag("error", true)
+		span.SetTag("err", err)
+	}
+}
+
+// Creates a Child Span from Parent Span embedded in passed context. Should be used before starting a new major
+// operation in Synchronous or Asynchronous mode (go routine), such as following:
+// 1. Start of all implemented External API methods unless using a interceptor for auto-injection of Span (Server side impl)
+// 2. Just before calling an Third-Party lib which is invoking a External API (etcd, kafka)
+// 3. In start of a Go Routine responsible for performing a major task involving significant duration
+// 4. Any method which is suspected to be time consuming...
+func CreateChildSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+	childSpan, newCtx := opentracing.StartSpanFromContext(ctx, taskName)
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		childSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return childSpan, newCtx
+}
+
+// Creates a Async Child Span with Follows-From relationship from Parent Span embedded in passed context.
+// Should be used only in scenarios when
+// a) There is dis-continuation in execution and thus result of Child span does not affect the Parent flow at all
+// b) The execution of Child Span is guaranteed to start after the completion of Parent Span
+// In case of any confusion, use CreateChildSpan method
+// Some situations where this method would be suitable includes Kafka Async RPC call, Propagation of Event across
+// a channel etc.
+func CreateAsyncSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	var asyncSpan opentracing.Span
+	var newCtx context.Context
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+
+	// We should always be creating Aysnc span from a Valid parent span. If not, create a Child span instead
+	if parentSpan == nil {
+		logger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(ctx, taskName)
+	} else {
+		// Use Background context as the base for Follows-from case; else new span is getting both Child and FollowsFrom relationship
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(context.Background(), taskName, opentracing.FollowsFrom(parentSpan.Context()))
+	}
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		asyncSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return asyncSpan, newCtx
+}
+
+// Extracts the span from Source context and injects into the supplied Target context.
+// This should be used in situations wherein we are calling a time-sensitive operation (etcd update) and hence
+// had a context.Background() used earlier to avoid any cancellation/timeout of operation by passed context.
+// This will allow propagation of span with a different base context (and not the original context)
+func WithSpanFromContext(targetCtx, sourceCtx context.Context) context.Context {
+	span := opentracing.SpanFromContext(sourceCtx)
+	return opentracing.ContextWithSpan(targetCtx, span)
+}
+
+// Utility method to convert log Fields into array of interfaces expected by zap logger methods
+func serializeMap(fields Fields) []interface{} {
+	data := make([]interface{}, len(fields)*2)
+	i := 0
+	for k, v := range fields {
+		data[i] = k
+		data[i+1] = v
+		i = i + 2
+	}
+	return data
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/etcd/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/etcd/common.go
new file mode 100644
index 0000000..a874bd1
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/etcd/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package etcd
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/etcd/etcd_server.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/etcd/etcd_server.go
new file mode 100644
index 0000000..6113b3a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/etcd/etcd_server.go
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package etcd
+
+import (
+	"context"
+	"fmt"
+	"go.etcd.io/etcd/embed"
+	"net/url"
+	"os"
+	"time"
+)
+
+const (
+	serverStartUpTimeout          = 10 * time.Second // Maximum time allowed to wait for the Etcd server to be ready
+	defaultLocalPersistentStorage = "voltha.test.embed.etcd"
+)
+
+//EtcdServer represents an embedded Etcd server.  It is used for testing only.
+type EtcdServer struct {
+	server *embed.Etcd
+}
+
+func islogLevelValid(logLevel string) bool {
+	valid := []string{"debug", "info", "warn", "error", "panic", "fatal"}
+	for _, l := range valid {
+		if l == logLevel {
+			return true
+		}
+	}
+	return false
+}
+
+/*
+* MKConfig creates an embedded Etcd config
+* :param configName: A name for this config
+* :param clientPort:  The port the etcd client will connect to (do not use 2379 for unit test)
+* :param peerPort:  The port the etcd server will listen for its peers (do not use 2380 for unit test)
+* :param localPersistentStorageDir: The name of a local directory which will hold the Etcd server data
+* :param logLevel: One of debug, info, warn, error, panic, or fatal. Default 'info'.
+ */
+func MKConfig(ctx context.Context, configName string, clientPort, peerPort int, localPersistentStorageDir string, logLevel string) *embed.Config {
+	cfg := embed.NewConfig()
+	cfg.Name = configName
+	cfg.Dir = localPersistentStorageDir
+	cfg.Logger = "zap"
+	if !islogLevelValid(logLevel) {
+		logger.Fatalf(ctx, "Invalid log level -%s", logLevel)
+	}
+	cfg.LogLevel = logLevel
+	acurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", clientPort))
+	if err != nil {
+		logger.Fatalf(ctx, "Invalid client port -%d", clientPort)
+	}
+	cfg.ACUrls = []url.URL{*acurl}
+	cfg.LCUrls = []url.URL{*acurl}
+
+	apurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", peerPort))
+	if err != nil {
+		logger.Fatalf(ctx, "Invalid peer port -%d", peerPort)
+	}
+	cfg.LPUrls = []url.URL{*apurl}
+	cfg.APUrls = []url.URL{*apurl}
+
+	cfg.ClusterState = embed.ClusterStateFlagNew
+	cfg.InitialCluster = cfg.Name + "=" + apurl.String()
+
+	return cfg
+}
+
+//getDefaultCfg specifies the default config
+func getDefaultCfg() *embed.Config {
+	cfg := embed.NewConfig()
+	cfg.Dir = defaultLocalPersistentStorage
+	cfg.Logger = "zap"
+	cfg.LogLevel = "error"
+	return cfg
+}
+
+//StartEtcdServer creates and starts an embedded Etcd server.  A local directory to store data is created for the
+//embedded server lifetime (for the duration of a unit test.  The server runs at localhost:2379.
+func StartEtcdServer(ctx context.Context, cfg *embed.Config) *EtcdServer {
+	// If the server is already running, just return
+	if cfg == nil {
+		cfg = getDefaultCfg()
+	}
+	// Remove the local directory as
+	// a safeguard for the case where a prior test failed
+	if err := os.RemoveAll(cfg.Dir); err != nil {
+		logger.Fatalf(ctx, "Failure removing local directory %s", cfg.Dir)
+	}
+	e, err := embed.StartEtcd(cfg)
+	if err != nil {
+		logger.Fatal(ctx, err)
+	}
+	select {
+	case <-e.Server.ReadyNotify():
+		logger.Debug(ctx, "Embedded Etcd server is ready!")
+	case <-time.After(serverStartUpTimeout):
+		e.Server.HardStop() // trigger a shutdown
+		e.Close()
+		logger.Fatal(ctx, "Embedded Etcd server took too long to start!")
+	case err := <-e.Err():
+		e.Server.HardStop() // trigger a shutdown
+		e.Close()
+		logger.Fatalf(ctx, "Embedded Etcd server errored out - %s", err)
+	}
+	return &EtcdServer{server: e}
+}
+
+//Stop closes the embedded Etcd server and removes the local data directory as well
+func (es *EtcdServer) Stop(ctx context.Context) {
+	if es != nil {
+		storage := es.server.Config().Dir
+		es.server.Server.HardStop()
+		es.server.Close()
+		if err := os.RemoveAll(storage); err != nil {
+			logger.Fatalf(ctx, "Failure removing local directory %s", es.server.Config().Dir)
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/common.go
new file mode 100644
index 0000000..313ff9e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/endpoint_manager.go
new file mode 100644
index 0000000..5acec69
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/endpoint_manager.go
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"context"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+)
+
+type EndpointManager struct{}
+
+func NewEndpointManager() kafka.EndpointManager {
+	mock := &EndpointManager{}
+	return mock
+}
+
+func (em *EndpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (kafka.Endpoint, error) {
+	// TODO add mocks call and args
+	return kafka.Endpoint(serviceType), nil
+}
+
+func (em *EndpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	// TODO add mocks call and args
+	return true, nil
+}
+
+func (em *EndpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (kafka.ReplicaID, error) {
+	return kafka.ReplicaID(1), nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/kafka_client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/kafka_client.go
new file mode 100644
index 0000000..92966a7
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/kafka_client.go
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"github.com/golang/protobuf/ptypes"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// static check to ensure KafkaClient implements kafka.Client
+var _ kafka.Client = &KafkaClient{}
+
+type KafkaClient struct {
+	topicsChannelMap map[string][]chan *ic.InterContainerMessage
+	lock             sync.RWMutex
+}
+
+func NewKafkaClient() *KafkaClient {
+	return &KafkaClient{
+		topicsChannelMap: make(map[string][]chan *ic.InterContainerMessage),
+		lock:             sync.RWMutex{},
+	}
+}
+
+func (kc *KafkaClient) Start(ctx context.Context) error {
+	logger.Debug(ctx, "kafka-client-started")
+	return nil
+}
+
+func (kc *KafkaClient) Stop(ctx context.Context) {
+	kc.lock.Lock()
+	defer kc.lock.Unlock()
+	for topic, chnls := range kc.topicsChannelMap {
+		for _, c := range chnls {
+			close(c)
+		}
+		delete(kc.topicsChannelMap, topic)
+	}
+	logger.Debug(ctx, "kafka-client-stopped")
+}
+
+func (kc *KafkaClient) CreateTopic(ctx context.Context, topic *kafka.Topic, numPartition int, repFactor int) error {
+	logger.Debugw(ctx, "CreatingTopic", log.Fields{"topic": topic.Name, "numPartition": numPartition, "replicationFactor": repFactor})
+	kc.lock.Lock()
+	defer kc.lock.Unlock()
+	if _, ok := kc.topicsChannelMap[topic.Name]; ok {
+		return fmt.Errorf("Topic %s already exist", topic.Name)
+	}
+	ch := make(chan *ic.InterContainerMessage)
+	kc.topicsChannelMap[topic.Name] = append(kc.topicsChannelMap[topic.Name], ch)
+	return nil
+}
+
+func (kc *KafkaClient) DeleteTopic(ctx context.Context, topic *kafka.Topic) error {
+	logger.Debugw(ctx, "DeleteTopic", log.Fields{"topic": topic.Name})
+	kc.lock.Lock()
+	defer kc.lock.Unlock()
+	delete(kc.topicsChannelMap, topic.Name)
+	return nil
+}
+
+func (kc *KafkaClient) Subscribe(ctx context.Context, topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ic.InterContainerMessage, error) {
+	logger.Debugw(ctx, "Subscribe", log.Fields{"topic": topic.Name, "args": kvArgs})
+	kc.lock.Lock()
+	defer kc.lock.Unlock()
+	ch := make(chan *ic.InterContainerMessage)
+	kc.topicsChannelMap[topic.Name] = append(kc.topicsChannelMap[topic.Name], ch)
+	return ch, nil
+}
+
+func removeChannel(s []chan *ic.InterContainerMessage, i int) []chan *ic.InterContainerMessage {
+	s[i] = s[len(s)-1]
+	return s[:len(s)-1]
+}
+
+func (kc *KafkaClient) UnSubscribe(ctx context.Context, topic *kafka.Topic, ch <-chan *ic.InterContainerMessage) error {
+	logger.Debugw(ctx, "UnSubscribe", log.Fields{"topic": topic.Name})
+	kc.lock.Lock()
+	defer kc.lock.Unlock()
+	if chnls, ok := kc.topicsChannelMap[topic.Name]; ok {
+		idx := -1
+		for i, c := range chnls {
+			if c == ch {
+				close(c)
+				idx = i
+			}
+		}
+		if idx >= 0 {
+			kc.topicsChannelMap[topic.Name] = removeChannel(kc.topicsChannelMap[topic.Name], idx)
+		}
+	}
+	return nil
+}
+
+func (kc *KafkaClient) SubscribeForMetadata(ctx context.Context, _ func(fromTopic string, timestamp time.Time)) {
+	logger.Debug(ctx, "SubscribeForMetadata - unimplemented")
+}
+
+func toIntercontainerMessage(event *voltha.Event) *ic.InterContainerMessage {
+	msg := &ic.InterContainerMessage{
+		Header: &ic.Header{
+			Id:        event.Header.Id,
+			Type:      ic.MessageType_REQUEST,
+			Timestamp: event.Header.RaisedTs,
+		},
+	}
+	// Marshal event
+	if eventBody, err := ptypes.MarshalAny(event); err == nil {
+		msg.Body = eventBody
+	}
+	return msg
+}
+
+func (kc *KafkaClient) Send(ctx context.Context, msg interface{}, topic *kafka.Topic, keys ...string) error {
+	// Assert message is a proto message
+	// ascertain the value interface type is a proto.Message
+	if _, ok := msg.(proto.Message); !ok {
+		logger.Warnw(ctx, "message-not-a-proto-message", log.Fields{"msg": msg})
+		return status.Error(codes.InvalidArgument, "msg-not-a-proto-msg")
+	}
+	req, ok := msg.(*ic.InterContainerMessage)
+	if !ok {
+		event, ok := msg.(*voltha.Event) //This is required as event message will be of type voltha.Event
+		if !ok {
+			return status.Error(codes.InvalidArgument, "unexpected-message-type")
+		}
+		req = toIntercontainerMessage(event)
+	}
+	if req == nil {
+		return status.Error(codes.InvalidArgument, "msg-nil")
+	}
+	kc.lock.RLock()
+	defer kc.lock.RUnlock()
+	for _, ch := range kc.topicsChannelMap[topic.Name] {
+		logger.Debugw(ctx, "Publishing", log.Fields{"fromTopic": req.Header.FromTopic, "toTopic": topic.Name, "id": req.Header.Id})
+		ch <- req
+	}
+	return nil
+}
+
+func (kc *KafkaClient) SendLiveness(ctx context.Context) error {
+	return status.Error(codes.Unimplemented, "SendLiveness")
+}
+
+func (kc *KafkaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Debug(ctx, "EnableLivenessChannel - unimplemented")
+	return nil
+}
+
+func (kc *KafkaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Debug(ctx, "EnableHealthinessChannel - unimplemented")
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/kafka_inter_container_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/kafka_inter_container_proxy.go
new file mode 100644
index 0000000..a028aa9
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/mocks/kafka/kafka_inter_container_proxy.go
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"context"
+
+	"github.com/gogo/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+type InvokeRpcArgs struct {
+	Rpc             string
+	ToTopic         *kafka.Topic
+	ReplyToTopic    *kafka.Topic
+	WaitForResponse bool
+	Key             string
+	ParentDeviceId  string
+	KvArgs          map[int]interface{}
+}
+
+type InvokeRpcSpy struct {
+	CallCount int
+	Calls     map[int]InvokeRpcArgs
+	Timeout   bool
+	Response  proto.Message
+}
+
+type InvokeAsyncRpcSpy struct {
+	CallCount int
+	Calls     map[int]InvokeRpcArgs
+	Timeout   bool
+	Response  proto.Message
+}
+
+type MockKafkaICProxy struct {
+	InvokeRpcSpy InvokeRpcSpy
+}
+
+func (s *MockKafkaICProxy) Start(ctx context.Context) error { return nil }
+func (s *MockKafkaICProxy) GetDefaultTopic() *kafka.Topic {
+	t := kafka.Topic{
+		Name: "test-topic",
+	}
+	return &t
+}
+
+func (s *MockKafkaICProxy) DeleteTopic(ctx context.Context, topic kafka.Topic) error { return nil }
+
+func (s *MockKafkaICProxy) Stop(ctx context.Context) {}
+
+func (s *MockKafkaICProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *kafka.Topic, replyToTopic *kafka.Topic,
+	waitForResponse bool, key string, kvArgs ...*kafka.KVArg) chan *kafka.RpcResponse {
+
+	args := make(map[int]interface{}, 4)
+	for k, v := range kvArgs {
+		args[k] = v
+	}
+
+	s.InvokeRpcSpy.Calls[s.InvokeRpcSpy.CallCount] = InvokeRpcArgs{
+		Rpc:             rpc,
+		ToTopic:         toTopic,
+		ReplyToTopic:    replyToTopic,
+		WaitForResponse: waitForResponse,
+		Key:             key,
+		KvArgs:          args,
+	}
+
+	chnl := make(chan *kafka.RpcResponse)
+
+	return chnl
+}
+
+func (s *MockKafkaICProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *kafka.Topic, replyToTopic *kafka.Topic, waitForResponse bool, key string, kvArgs ...*kafka.KVArg) (bool, *any.Any) {
+	s.InvokeRpcSpy.CallCount++
+
+	success := true
+
+	args := make(map[int]interface{}, 4)
+	for k, v := range kvArgs {
+		args[k] = v
+	}
+
+	s.InvokeRpcSpy.Calls[s.InvokeRpcSpy.CallCount] = InvokeRpcArgs{
+		Rpc:             rpc,
+		ToTopic:         toTopic,
+		ReplyToTopic:    replyToTopic,
+		WaitForResponse: waitForResponse,
+		Key:             key,
+		KvArgs:          args,
+	}
+
+	var response any.Any
+	if s.InvokeRpcSpy.Timeout {
+
+		success = false
+
+		err := &ic.Error{Reason: "context deadline exceeded", Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+		res, _ := ptypes.MarshalAny(err)
+		response = *res
+	} else {
+		res, _ := ptypes.MarshalAny(s.InvokeRpcSpy.Response)
+		response = *res
+	}
+
+	return success, &response
+}
+func (s *MockKafkaICProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic kafka.Topic, handler interface{}) error {
+	return nil
+}
+func (s *MockKafkaICProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic kafka.Topic, initialOffset int64) error {
+	return nil
+}
+func (s *MockKafkaICProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic kafka.Topic) error {
+	return nil
+}
+func (s *MockKafkaICProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return nil
+}
+func (s *MockKafkaICProxy) SendLiveness(ctx context.Context) error { return nil }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
new file mode 100644
index 0000000..119d78e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
new file mode 100644
index 0000000..b66f398
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"context"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"net/http"
+	"sync"
+)
+
+// ProbeContextKey used to fetch the Probe instance from a context
+type ProbeContextKeyType string
+
+// ServiceStatus typed values for service status
+type ServiceStatus int
+
+const (
+	// ServiceStatusUnknown initial state of services
+	ServiceStatusUnknown ServiceStatus = iota
+
+	// ServiceStatusPreparing to optionally be used for prep, such as connecting
+	ServiceStatusPreparing
+
+	// ServiceStatusPrepared to optionally be used when prep is complete, but before run
+	ServiceStatusPrepared
+
+	// ServiceStatusRunning service is functional
+	ServiceStatusRunning
+
+	// ServiceStatusStopped service has stopped, but not because of error
+	ServiceStatusStopped
+
+	// ServiceStatusFailed service has stopped because of an error
+	ServiceStatusFailed
+
+	// ServiceStatusNotReady service has started but is unable to accept requests
+	ServiceStatusNotReady
+)
+
+const (
+	// ProbeContextKey value of context key to fetch probe
+	ProbeContextKey = ProbeContextKeyType("status-update-probe")
+)
+
+// String convert ServiceStatus values to strings
+func (s ServiceStatus) String() string {
+	switch s {
+	default:
+		fallthrough
+	case ServiceStatusUnknown:
+		return "Unknown"
+	case ServiceStatusPreparing:
+		return "Preparing"
+	case ServiceStatusPrepared:
+		return "Prepared"
+	case ServiceStatusRunning:
+		return "Running"
+	case ServiceStatusStopped:
+		return "Stopped"
+	case ServiceStatusFailed:
+		return "Failed"
+	case ServiceStatusNotReady:
+		return "NotReady"
+	}
+}
+
+// ServiceStatusUpdate status update event
+type ServiceStatusUpdate struct {
+	Name   string
+	Status ServiceStatus
+}
+
+// Probe reciever on which to implement probe capabilities
+type Probe struct {
+	readyFunc  func(map[string]ServiceStatus) bool
+	healthFunc func(map[string]ServiceStatus) bool
+
+	mutex     sync.RWMutex
+	status    map[string]ServiceStatus
+	isReady   bool
+	isHealthy bool
+}
+
+// WithReadyFunc override the default ready calculation function
+func (p *Probe) WithReadyFunc(readyFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.readyFunc = readyFunc
+	return p
+}
+
+// WithHealthFunc override the default health calculation function
+func (p *Probe) WithHealthFunc(healthFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.healthFunc = healthFunc
+	return p
+}
+
+// RegisterService register one or more service names with the probe, status will be track against service name
+func (p *Probe) RegisterService(ctx context.Context, names ...string) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+	for _, name := range names {
+		if _, ok := p.status[name]; !ok {
+			p.status[name] = ServiceStatusUnknown
+			logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
+		}
+	}
+
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+}
+
+// UpdateStatus utility function to send a service update to the probe
+func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	// if status hasn't changed, avoid doing useless work
+	existingStatus, ok := p.status[name]
+	if ok && (existingStatus == status) {
+		return
+	}
+
+	p.status[name] = status
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+	logger.Debugw(ctx, "probe-service-status-updated",
+		log.Fields{
+			"service-name": name,
+			"status":       status.String(),
+			"ready":        p.isReady,
+			"health":       p.isHealthy,
+		})
+}
+
+func (p *Probe) GetStatus(name string) ServiceStatus {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	currentStatus, ok := p.status[name]
+	if ok {
+		return currentStatus
+	}
+
+	return ServiceStatusUnknown
+}
+
+func GetProbeFromContext(ctx context.Context) *Probe {
+	if ctx != nil {
+		if value := ctx.Value(ProbeContextKey); value != nil {
+			if p, ok := value.(*Probe); ok {
+				return p
+			}
+		}
+	}
+	return nil
+}
+
+// UpdateStatusFromContext a convenience function to pull the Probe reference from the
+// Context, if it exists, and then calling UpdateStatus on that Probe reference. If Context
+// is nil or if a Probe reference is not associated with the ProbeContextKey then nothing
+// happens
+func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
+	p := GetProbeFromContext(ctx)
+	if p != nil {
+		p.UpdateStatus(ctx, name, status)
+	}
+}
+
+// pulled out to a function to help better enable unit testing
+func (p *Probe) readzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isReady {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) healthzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isHealthy {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+	ctx := context.Background()
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	w.Header().Set("Content-Type", "application/json")
+	if _, err := w.Write([]byte("{")); err != nil {
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	comma := ""
+	for c, s := range p.status {
+		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+			w.WriteHeader(http.StatusInternalServerError)
+			return
+		}
+		comma = ", "
+	}
+	if _, err := w.Write([]byte("}")); err != nil {
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}
+
+// ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
+func (p *Probe) ListenAndServe(ctx context.Context, address string) {
+	mux := http.NewServeMux()
+
+	// Returns the result of the readyFunc calculation
+	mux.HandleFunc("/readz", p.readzFunc)
+
+	// Returns the result of the healthFunc calculation
+	mux.HandleFunc("/healthz", p.healthzFunc)
+
+	// Returns the details of the services and their status as JSON
+	mux.HandleFunc("/detailz", p.detailzFunc)
+	s := &http.Server{
+		Addr:    address,
+		Handler: mux,
+	}
+	logger.Fatal(ctx, s.ListenAndServe())
+}
+
+func (p *Probe) IsReady() bool {
+	return p.isReady
+}
+
+// defaultReadyFunc if all services are running then ready, else not
+func defaultReadyFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status != ServiceStatusRunning {
+			return false
+		}
+	}
+	return true
+}
+
+// defaultHealthFunc if no service is stopped or failed, then healthy, else not.
+// service is start as unknown, so they are considered healthy
+func defaultHealthFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status == ServiceStatusStopped || status == ServiceStatusFailed {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
new file mode 100644
index 0000000..49c0b10
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package version
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Default build-time variable.
+// These values can (should) be overridden via ldflags when built with
+// `make`
+var (
+	version   = "unknown-version"
+	goVersion = "unknown-goversion"
+	vcsRef    = "unknown-vcsref"
+	vcsDirty  = "unknown-vcsdirty"
+	buildTime = "unknown-buildtime"
+	os        = "unknown-os"
+	arch      = "unknown-arch"
+)
+
+type VersionInfoType struct {
+	Version   string `json:"version"`
+	GoVersion string `json:"goversion"`
+	VcsRef    string `json:"vcsref"`
+	VcsDirty  string `json:"vcsdirty"`
+	BuildTime string `json:"buildtime"`
+	Os        string `json:"os"`
+	Arch      string `json:"arch"`
+}
+
+var VersionInfo VersionInfoType
+
+func init() {
+	VersionInfo = VersionInfoType{
+		Version:   version,
+		VcsRef:    vcsRef,
+		VcsDirty:  vcsDirty,
+		GoVersion: goVersion,
+		Os:        os,
+		Arch:      arch,
+		BuildTime: buildTime,
+	}
+}
+
+func (v VersionInfoType) String(indent string) string {
+	builder := strings.Builder{}
+
+	builder.WriteString(fmt.Sprintf("%sVersion:      %s\n", indent, VersionInfo.Version))
+	builder.WriteString(fmt.Sprintf("%sGoVersion:    %s\n", indent, VersionInfo.GoVersion))
+	builder.WriteString(fmt.Sprintf("%sVCS Ref:      %s\n", indent, VersionInfo.VcsRef))
+	builder.WriteString(fmt.Sprintf("%sVCS Dirty:    %s\n", indent, VersionInfo.VcsDirty))
+	builder.WriteString(fmt.Sprintf("%sBuilt:        %s\n", indent, VersionInfo.BuildTime))
+	builder.WriteString(fmt.Sprintf("%sOS/Arch:      %s/%s\n", indent, VersionInfo.Os, VersionInfo.Arch))
+	return builder.String()
+}