VOL-4077: Improve storage usage on etcd
- Do away with unnecessary data storage on etcd if it can be
  reconciled on adapter restart
- For data that needs storage, use lesser footprint if possible
- Use write-through-cache for all data stored on etcd via
  resource manager module
- Use ResourceManager module per interface to localize lock
  contention per PON port

Change-Id: I21d38216fab195d738a446b3f96a00251569e38b
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
new file mode 100644
index 0000000..c514d6d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"context"
+
+	"github.com/golang/protobuf/proto"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+// AdapterProxy interface for AdapterProxy implementation.
+type AdapterProxy interface {
+	SendInterAdapterMessage(ctx context.Context,
+		msg proto.Message,
+		msgType ic.InterAdapterMessageType_Types,
+		fromAdapter string,
+		toAdapter string,
+		toDeviceID string,
+		proxyDeviceID string,
+		messageID string) error
+	TechProfileInstanceRequest(ctx context.Context,
+		tpPath string,
+		ponIntfID uint32,
+		onuID uint32,
+		uniID uint32,
+		fromAdapter string,
+		toAdapter string,
+		toDeviceID string,
+		proxyDeviceID string) (*ic.InterAdapterTechProfileDownloadMessage, error)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
new file mode 100644
index 0000000..36939bd
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"context"
+
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// CoreProxy interface for voltha-go coreproxy.
+type CoreProxy interface {
+	UpdateCoreReference(deviceID string, coreReference string)
+	DeleteCoreReference(deviceID string)
+	RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error
+	DeviceUpdate(ctx context.Context, device *voltha.Device) error
+	PortCreated(ctx context.Context, deviceID string, port *voltha.Port) error
+	PortsStateUpdate(ctx context.Context, deviceID string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error
+	DeleteAllPorts(ctx context.Context, deviceID string) error
+	GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error)
+	ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error)
+	DeviceStateUpdate(ctx context.Context, deviceID string,
+		connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error
+
+	DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error
+	ChildDeviceDetected(ctx context.Context, parentDeviceID string, parentPortNo int,
+		childDeviceType string, channelID int, vendorID string, serialNumber string, onuID int64) (*voltha.Device, error)
+
+	ChildDevicesLost(ctx context.Context, parentDeviceID string) error
+	ChildDevicesDetected(ctx context.Context, parentDeviceID string) error
+	GetDevice(ctx context.Context, parentDeviceID string, deviceID string) (*voltha.Device, error)
+	GetChildDevice(ctx context.Context, parentDeviceID string, kwargs map[string]interface{}) (*voltha.Device, error)
+	GetChildDevices(ctx context.Context, parentDeviceID string) (*voltha.Devices, error)
+	SendPacketIn(ctx context.Context, deviceID string, port uint32, pktPayload []byte) error
+	DeviceReasonUpdate(ctx context.Context, deviceID string, deviceReason string) error
+	PortStateUpdate(ctx context.Context, deviceID string, pType voltha.Port_PortType, portNo uint32,
+		operStatus voltha.OperStatus_Types) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
new file mode 100644
index 0000000..fc31041
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"google.golang.org/grpc/status"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+type AdapterProxy struct {
+	kafkaICProxy kafka.InterContainerProxy
+	coreTopic    string
+	endpointMgr  kafka.EndpointManager
+}
+
+func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
+	proxy := AdapterProxy{
+		kafkaICProxy: kafkaProxy,
+		coreTopic:    coreTopic,
+		endpointMgr:  kafka.NewEndpointManager(backend),
+	}
+	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
+	return &proxy
+}
+
+func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
+	msg proto.Message,
+	msgType ic.InterAdapterMessageType_Types,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string,
+	messageId string) error {
+	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	//Marshal the message
+	var marshalledMsg *any.Any
+	var err error
+	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
+		return err
+	}
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
+		return err
+	}
+
+	//Build the inter adapter message
+	header := &ic.InterAdapterHeader{
+		Type:          msgType,
+		FromTopic:     fromAdapter,
+		ToTopic:       string(endpoint),
+		ToDeviceId:    toDeviceId,
+		ProxyDeviceId: proxyDeviceId,
+	}
+	if messageId != "" {
+		header.Id = messageId
+	} else {
+		header.Id = uuid.New().String()
+	}
+	header.Timestamp = ptypes.TimestampNow()
+	iaMsg := &ic.InterAdapterMessage{
+		Header: header,
+		Body:   marshalledMsg,
+	}
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: iaMsg,
+	}
+
+	topic := kafka.Topic{Name: string(endpoint)}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_inter_adapter_message"
+
+	// Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
+	ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *AdapterProxy) TechProfileInstanceRequest(ctx context.Context,
+	tpPath string,
+	parentPonPort uint32,
+	onuID uint32,
+	uniID uint32,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+	logger.Debugw(ctx, "sending-tech-profile-instance-request-message", log.Fields{"from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
+		return nil, err
+	}
+
+	//Build the inter adapter message
+	tpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{
+		TpInstancePath: tpPath,
+		ParentDeviceId: toDeviceId,
+		ParentPonPort:  parentPonPort,
+		OnuId:          onuID,
+		UniId:          uniID,
+	}
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: tpReqMsg,
+	}
+
+	topic := kafka.Topic{Name: string(endpoint)}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_tech_profile_instance_request"
+
+	ctx = context.WithValue(ctx, "inter-adapter-tp-req-msg", tpPath)
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	if success {
+		tpDwnldMsg := &ic.InterAdapterTechProfileDownloadMessage{}
+		if err := ptypes.UnmarshalAny(result, tpDwnldMsg); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, err
+		}
+		return tpDwnldMsg, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "TechProfileInstanceRequest-return", log.Fields{"tpPath": tpPath, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
new file mode 100644
index 0000000..98085bb
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
new file mode 100644
index 0000000..589d951
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
@@ -0,0 +1,689 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"sync"
+
+	"github.com/golang/protobuf/ptypes"
+	a "github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type CoreProxy struct {
+	kafkaICProxy        kafka.InterContainerProxy
+	adapterTopic        string
+	coreTopic           string
+	deviceIdCoreMap     map[string]string
+	lockDeviceIdCoreMap sync.RWMutex
+}
+
+func NewCoreProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
+	var proxy CoreProxy
+	proxy.kafkaICProxy = kafkaProxy
+	proxy.adapterTopic = adapterTopic
+	proxy.coreTopic = coreTopic
+	proxy.deviceIdCoreMap = make(map[string]string)
+	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
+	logger.Debugw(ctx, "TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+
+	return &proxy
+}
+
+func unPackResponse(ctx context.Context, rpc string, deviceId string, success bool, response *a.Any) error {
+	if success {
+		return nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "response", log.Fields{"rpc": rpc, "device-id": deviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
+	}
+}
+
+// UpdateCoreReference adds or update a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) UpdateCoreReference(deviceId string, coreReference string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	ap.deviceIdCoreMap[deviceId] = coreReference
+}
+
+// DeleteCoreReference removes a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) DeleteCoreReference(deviceId string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	delete(ap.deviceIdCoreMap, deviceId)
+}
+
+func (ap *CoreProxy) getCoreTopic(deviceId string) kafka.Topic {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+
+	if t, exist := ap.deviceIdCoreMap[deviceId]; exist {
+		return kafka.Topic{Name: t}
+	}
+
+	return kafka.Topic{Name: ap.coreTopic}
+}
+
+func (ap *CoreProxy) getAdapterTopic(args ...string) kafka.Topic {
+	return kafka.Topic{Name: ap.adapterTopic}
+}
+
+func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
+	logger.Debugw(ctx, "registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	rpc := "Register"
+	topic := kafka.Topic{Name: ap.coreTopic}
+	replyToTopic := ap.getAdapterTopic()
+	args := make([]*kafka.KVArg, 2)
+
+	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
+		logger.Fatal(ctx, "totalReplicas can't be 0, since you're here you have at least one")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
+		logger.Fatal(ctx, "currentReplica can't be 0, it has to start from 1")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
+		// if the adapter is not setting these fields they default to 0,
+		// in that case it means the adapter is not ready to be scaled and thus it defaults
+		// to a single instance
+		adapter.CurrentReplica = 1
+		adapter.TotalReplicas = 1
+	}
+
+	if adapter.CurrentReplica > adapter.TotalReplicas {
+		logger.Fatalf(ctx, "CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
+			adapter.CurrentReplica, adapter.TotalReplicas)
+	}
+
+	args[0] = &kafka.KVArg{
+		Key:   "adapter",
+		Value: adapter,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "deviceTypes",
+		Value: deviceTypes,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
+	logger.Debugw(ctx, "Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
+	logger.Debugw(ctx, "DeviceUpdate", log.Fields{"device-id": device.Id})
+	rpc := "DeviceUpdate"
+	toTopic := ap.getCoreTopic(device.Id)
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
+	logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"device-id": device.Id, "success": success})
+	return unPackResponse(ctx, rpc, device.Id, success, result)
+}
+
+func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
+	logger.Debugw(ctx, "PortCreated", log.Fields{"portNo": port.PortNo})
+	rpc := "PortCreated"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: port,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortCreated-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "PortsStateUpdate", log.Fields{"device-id": deviceId})
+	rpc := "PortsStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceId},
+	}, {
+		Key:   "port_type_filter",
+		Value: &ic.IntType{Val: int64(portTypeFilter)},
+	}, {
+		Key:   "oper_status",
+		Value: &ic.IntType{Val: int64(operStatus)},
+	}}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
+	logger.Debugw(ctx, "DeleteAllPorts", log.Fields{"device-id": deviceId})
+	rpc := "DeleteAllPorts"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error) {
+	logger.Debugw(ctx, "GetDevicePort", log.Fields{"device-id": deviceID})
+	rpc := "GetDevicePort"
+
+	toTopic := ap.getCoreTopic(deviceID)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceID},
+	}, {
+		Key:   "port_no",
+		Value: &ic.IntType{Val: int64(portNo)},
+	}}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+	logger.Debugw(ctx, "GetDevicePort-response", log.Fields{"device-id": deviceID, "success": success})
+
+	if success {
+		port := &voltha.Port{}
+		if err := ptypes.UnmarshalAny(result, port); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return port, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetDevicePort-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error) {
+	logger.Debugw(ctx, "ListDevicePorts", log.Fields{"device-id": deviceID})
+	rpc := "ListDevicePorts"
+
+	toTopic := ap.getCoreTopic(deviceID)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceID},
+	}}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+	logger.Debugw(ctx, "ListDevicePorts-response", log.Fields{"device-id": deviceID, "success": success})
+
+	if success {
+		ports := &voltha.Ports{}
+		if err := ptypes.UnmarshalAny(result, ports); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return ports.Items, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "ListDevicePorts-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
+	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "DeviceStateUpdate", log.Fields{"device-id": deviceId})
+	rpc := "DeviceStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+	cStatus := &ic.IntType{Val: int64(connStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "connect_status",
+		Value: cStatus,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
+	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
+	logger.Debugw(ctx, "ChildDeviceDetected", log.Fields{"parent-device-id": parentDeviceId, "channelId": channelId})
+	rpc := "ChildDeviceDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 7)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+	ppn := &ic.IntType{Val: int64(parentPortNo)}
+	args[1] = &kafka.KVArg{
+		Key:   "parent_port_no",
+		Value: ppn,
+	}
+	cdt := &ic.StrType{Val: childDeviceType}
+	args[2] = &kafka.KVArg{
+		Key:   "child_device_type",
+		Value: cdt,
+	}
+	channel := &ic.IntType{Val: int64(channelId)}
+	args[3] = &kafka.KVArg{
+		Key:   "channel_id",
+		Value: channel,
+	}
+	vId := &ic.StrType{Val: vendorId}
+	args[4] = &kafka.KVArg{
+		Key:   "vendor_id",
+		Value: vId,
+	}
+	sNo := &ic.StrType{Val: serialNumber}
+	args[5] = &kafka.KVArg{
+		Key:   "serial_number",
+		Value: sNo,
+	}
+	oId := &ic.IntType{Val: int64(onuId)}
+	args[6] = &kafka.KVArg{
+		Key:   "onu_id",
+		Value: oId,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "ChildDeviceDetected-return", log.Fields{"device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+
+}
+
+func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ChildDevicesLost", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ChildDevicesLost"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ChildDevicesDetected", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ChildDevicesDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
+	logger.Debugw(ctx, "GetDevice", log.Fields{"device-id": deviceId})
+	rpc := "GetDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
+	logger.Debugw(ctx, "GetChildDevice", log.Fields{"parent-device-id": parentDeviceId, "kwargs": kwargs})
+	rpc := "GetChildDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 4)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	var cnt uint8 = 0
+	for k, v := range kwargs {
+		cnt += 1
+		if k == "serial_number" {
+			val := &ic.StrType{Val: v.(string)}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "onu_id" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "parent_port_no" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		}
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetChildDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
+	logger.Debugw(ctx, "GetChildDevices", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "GetChildDevices"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevices := &voltha.Devices{}
+		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevices, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetChildDevices-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
+	logger.Debugw(ctx, "SendPacketIn", log.Fields{"device-id": deviceId, "port": port, "pktPayload": pktPayload})
+	rpc := "PacketIn"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	portNo := &ic.IntType{Val: int64(port)}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: portNo,
+	}
+	pkt := &ic.Packet{Payload: pktPayload}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: pkt,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
+	logger.Debugw(ctx, "DeviceReasonUpdate", log.Fields{"device-id": deviceId, "deviceReason": deviceReason})
+	rpc := "DeviceReasonUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	reason := &ic.StrType{Val: deviceReason}
+	args[1] = &kafka.KVArg{
+		Key:   "device_reason",
+		Value: reason,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeviceReason-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
+	logger.Debugw(ctx, "DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	rpc := "DevicePMConfigUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(pmConfigs.Id)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device_pm_config",
+		Value: pmConfigs,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
+	logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pmconfig-device-id": pmConfigs.Id, "success": success})
+	return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
+}
+
+func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ReconcileChildDevices", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ReconcileChildDevices"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{
+		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
+	operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "PortStateUpdate", log.Fields{"device-id": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	rpc := "PortStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 4)
+	deviceID := &voltha.ID{Id: deviceId}
+	portNo := &ic.IntType{Val: int64(portNum)}
+	portType := &ic.IntType{Val: int64(pType)}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: deviceID,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "port_type",
+		Value: portType,
+	}
+	args[3] = &kafka.KVArg{
+		Key:   "port_no",
+		Value: portNo,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
new file mode 100644
index 0000000..3b6d4f9
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+type PmMetrics struct {
+	deviceId          string
+	frequency         uint32
+	grouped           bool
+	frequencyOverride bool
+	metrics           map[string]*voltha.PmConfig
+}
+
+type PmMetricsOption func(*PmMetrics)
+
+func Frequency(frequency uint32) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequency = frequency
+	}
+}
+
+func Grouped(grouped bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.grouped = grouped
+	}
+}
+
+func FrequencyOverride(frequencyOverride bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequencyOverride = frequencyOverride
+	}
+}
+
+// UpdateFrequency will update the frequency.
+func (pm *PmMetrics) UpdateFrequency(frequency uint32) {
+	pm.frequency = frequency
+}
+
+func Metrics(pmNames []string) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.metrics = make(map[string]*voltha.PmConfig)
+		for _, name := range pmNames {
+			args.metrics[name] = &voltha.PmConfig{
+				Name:    name,
+				Type:    voltha.PmConfig_COUNTER,
+				Enabled: true,
+			}
+		}
+	}
+}
+
+func NewPmMetrics(deviceId string, opts ...PmMetricsOption) *PmMetrics {
+	pm := &PmMetrics{deviceId: deviceId}
+	for _, option := range opts {
+		option(pm)
+	}
+	return pm
+}
+
+func (pm *PmMetrics) ToPmConfigs() *voltha.PmConfigs {
+	pmConfigs := &voltha.PmConfigs{
+		Id:           pm.deviceId,
+		DefaultFreq:  pm.frequency,
+		Grouped:      pm.grouped,
+		FreqOverride: pm.frequencyOverride,
+	}
+	for _, v := range pm.metrics {
+		pmConfigs.Metrics = append(pmConfigs.Metrics, &voltha.PmConfig{Name: v.Name, Type: v.Type, Enabled: v.Enabled})
+	}
+	return pmConfigs
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
new file mode 100644
index 0000000..90f575b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
@@ -0,0 +1,1011 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"errors"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/extension"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type RequestHandlerProxy struct {
+	TestMode       bool
+	coreInstanceId string
+	adapter        adapters.IAdapter
+	coreProxy      adapterif.CoreProxy
+}
+
+func NewRequestHandlerProxy(coreInstanceId string, iadapter adapters.IAdapter, cProxy adapterif.CoreProxy) *RequestHandlerProxy {
+	var proxy RequestHandlerProxy
+	proxy.coreInstanceId = coreInstanceId
+	proxy.adapter = iadapter
+	proxy.coreProxy = cProxy
+	return &proxy
+}
+
+func (rhp *RequestHandlerProxy) Adapter_descriptor() (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Device_types() (*voltha.DeviceTypes, error) {
+	return nil, nil
+}
+
+func (rhp *RequestHandlerProxy) Health() (*voltha.HealthStatus, error) {
+	return nil, nil
+}
+
+func (rhp *RequestHandlerProxy) Adopt_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Adopt_device", log.Fields{"deviceId": device.Id})
+
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Adopt_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reconcile_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+
+	//Invoke the reconcile device API on the adapter
+	if err := rhp.adapter.Reconcile_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Abandon_device(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Disable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Disable_device API on the adapter
+	if err := rhp.adapter.Disable_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reenable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Reenable_device API on the adapter
+	if err := rhp.adapter.Reenable_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reboot_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Reboot_device API on the adapter
+	if err := rhp.adapter.Reboot_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+
+}
+
+func (rhp *RequestHandlerProxy) Self_test_device(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Delete_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the delete_device API on the adapter
+	if err := rhp.adapter.Delete_device(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Get_device_details(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_flows_bulk(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_bulk")
+	if len(args) < 5 {
+		logger.Warn(ctx, "Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	flows := &voltha.Flows{}
+	flowMetadata := &voltha.FlowMetadata{}
+	groups := &voltha.FlowGroups{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flows":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "groups":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_metadata":
+			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	//Invoke the bulk flow update API of the adapter
+	if err := rhp.adapter.Update_flows_bulk(ctx, device, flows, groups, flowMetadata); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_flows_incrementally(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_incrementally")
+	if len(args) < 5 {
+		logger.Warn(ctx, "Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	flows := &openflow_13.FlowChanges{}
+	flowMetadata := &voltha.FlowMetadata{}
+	groups := &openflow_13.FlowGroupChanges{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "group_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_metadata":
+			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	//Invoke the incremental flow update API of the adapter
+	if err := rhp.adapter.Update_flows_incrementally(ctx, device, flows, groups, flowMetadata); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_pm_config(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_pm_config")
+	if len(args) < 2 {
+		logger.Warn(ctx, "Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	pmConfigs := &voltha.PmConfigs{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "pm_configs":
+			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Update_pm_config", log.Fields{"device-id": device.Id, "pmConfigs": pmConfigs})
+	//Invoke the pm config update API of the adapter
+	if err := rhp.adapter.Update_pm_config(ctx, device, pmConfigs); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Receive_packet_out(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"args": args})
+	if len(args) < 3 {
+		logger.Warn(ctx, "Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &ic.StrType{}
+	egressPort := &ic.IntType{}
+	packet := &openflow_13.OfpPacketOut{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "outPort":
+			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-egressPort", log.Fields{"error": err})
+				return nil, err
+			}
+		case "packet":
+			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-packet", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"device-id": deviceId.Val, "outPort": egressPort, "packet": packet})
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Receive_packet_out(ctx, deviceId.Val, int(egressPort.Val), packet); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Suppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Unsuppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Get_ofp_device_info(ctx context.Context, args []*ic.Argument) (*ic.SwitchCapability, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"device-id": device.Id})
+
+	var cap *ic.SwitchCapability
+	var err error
+	if cap, err = rhp.adapter.Get_ofp_device_info(ctx, device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"cap": cap})
+	return cap, nil
+}
+
+func (rhp *RequestHandlerProxy) Process_inter_adapter_message(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	iaMsg := &ic.InterAdapterMessage{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "msg":
+			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+
+	//Invoke the inter adapter API on the handler
+	if err := rhp.adapter.Process_inter_adapter_message(ctx, iaMsg); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Process_tech_profile_instance_request(ctx context.Context, args []*ic.Argument) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	iaTpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "msg":
+			if err := ptypes.UnmarshalAny(arg.Value, iaTpReqMsg); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Process_tech_profile_instance_request", log.Fields{"tpPath": iaTpReqMsg.TpInstancePath})
+
+	//Invoke the tech profile instance request
+	tpInst := rhp.adapter.Process_tech_profile_instance_request(ctx, iaTpReqMsg)
+
+	return tpInst, nil
+}
+
+func (rhp *RequestHandlerProxy) Download_image(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Download_image(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_image_download_status(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Get_image_download_status(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Cancel_image_download(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Cancel_image_download(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Activate_image_update(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Activate_image_update(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func (rhp *RequestHandlerProxy) Revert_image_update(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
+	device, image, err := unMarshalImageDowload(args, ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownload, err := rhp.adapter.Revert_image_update(ctx, device, image)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownload, nil
+}
+
+func unMarshalImageDowload(args []*ic.Argument, ctx context.Context) (*voltha.Device, *voltha.ImageDownload, error) {
+	if len(args) < 4 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, nil, err
+	}
+	device := &voltha.Device{}
+	image := &voltha.ImageDownload{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		case "request":
+			if err := ptypes.UnmarshalAny(arg.Value, image); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-image", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, nil, err
+			}
+		}
+	}
+	return device, image, nil
+}
+
+func (rhp *RequestHandlerProxy) Enable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "enable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
+	if err != nil {
+		logger.Warnw(ctx, "enable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
+		return err
+	}
+	return rhp.adapter.Enable_port(ctx, deviceId, port)
+}
+
+func (rhp *RequestHandlerProxy) Disable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "disable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
+	if err != nil {
+		logger.Warnw(ctx, "disable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
+		return err
+	}
+	return rhp.adapter.Disable_port(ctx, deviceId, port)
+}
+
+func (rhp *RequestHandlerProxy) getEnableDisableParams(ctx context.Context, args []*ic.Argument) (string, *voltha.Port, error) {
+	logger.Debugw(ctx, "getEnableDisableParams", log.Fields{"args": args})
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return "", nil, errors.New("invalid-number-of-args")
+	}
+	deviceId := &ic.StrType{}
+	port := &voltha.Port{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return "", nil, err
+			}
+		case "port":
+			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
+				return "", nil, err
+			}
+		}
+	}
+	return deviceId.Val, port, nil
+}
+
+func (rhp *RequestHandlerProxy) Child_device_lost(ctx context.Context, args []*ic.Argument) error {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return errors.New("invalid-number-of-args")
+	}
+	childDevice := &voltha.Device{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "childDevice":
+			if err := ptypes.UnmarshalAny(arg.Value, childDevice); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-child-device", log.Fields{"error": err})
+				return err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(childDevice.ParentId, fromTopic.Val)
+	//Invoke the Child_device_lost API on the adapter
+	if err := rhp.adapter.Child_device_lost(ctx, childDevice); err != nil {
+		return status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return nil
+}
+
+func (rhp *RequestHandlerProxy) Start_omci_test(ctx context.Context, args []*ic.Argument) (*ic.TestResponse, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	// TODO: See related comment in voltha-go:adapter_proxy_go:startOmciTest()
+	//   Second argument should perhaps be uuid instead of omcitestrequest
+
+	device := &voltha.Device{}
+	request := &voltha.OmciTestRequest{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "omcitestrequest":
+			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw(ctx, "Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
+	result, err := rhp.adapter.Start_omci_test(ctx, device, request)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return result, nil
+}
+func (rhp *RequestHandlerProxy) Get_ext_value(ctx context.Context, args []*ic.Argument) (*voltha.ReturnValues, error) {
+	if len(args) < 3 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+
+	pDeviceId := &ic.StrType{}
+	device := &voltha.Device{}
+	valuetype := &ic.IntType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "pDeviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-parent-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "valuetype":
+			if err := ptypes.UnmarshalAny(arg.Value, valuetype); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-valuetype", log.Fields{"error": err})
+				return nil, err
+			}
+		default:
+			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
+		}
+	}
+
+	//Invoke the Get_value API on the adapter
+	return rhp.adapter.Get_ext_value(ctx, pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
+}
+
+func (rhp *RequestHandlerProxy) Single_get_value_request(ctx context.Context, args []*ic.Argument) (*extension.SingleGetValueResponse, error) {
+	logger.Debugw(ctx, "req handler Single_get_value_request", log.Fields{"no of args": len(args), "args": args})
+
+	if len(args) < 1 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+	singleGetvalueReq := extension.SingleGetValueRequest{}
+	errResp := func(status extension.GetValueResponse_Status, reason extension.GetValueResponse_ErrorReason) *extension.SingleGetValueResponse {
+		return &extension.SingleGetValueResponse{
+			Response: &extension.GetValueResponse{
+				Status:    status,
+				ErrReason: reason,
+			},
+		}
+	}
+	for _, arg := range args {
+		switch arg.Key {
+		case "request":
+			if err := ptypes.UnmarshalAny(arg.Value, &singleGetvalueReq); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-singleGetvalueReq", log.Fields{"error": err})
+				return errResp(extension.GetValueResponse_ERROR, extension.GetValueResponse_REASON_UNDEFINED), nil
+			}
+		default:
+			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
+		}
+	}
+	logger.Debugw(ctx, "invoke rhp.adapter.Single_get_value_request ", log.Fields{"singleGetvalueReq": singleGetvalueReq})
+	return rhp.adapter.Single_get_value_request(ctx, singleGetvalueReq)
+}
+
+func (rhp *RequestHandlerProxy) getDeviceDownloadImageRequest(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageDownloadRequest, error) {
+	logger.Debugw(ctx, "getDeviceDownloadImageRequest", log.Fields{"args": args})
+	if len(args) < 1 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+
+	deviceDownloadImageReq := ic.DeviceImageDownloadRequest{}
+
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceImageDownloadReq":
+			if err := ptypes.UnmarshalAny(arg.Value, &deviceDownloadImageReq); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	return &deviceDownloadImageReq, nil
+}
+
+func (rhp *RequestHandlerProxy) getDeviceImageRequest(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageRequest, error) {
+	logger.Debugw(ctx, "getDeviceImageRequest", log.Fields{"args": args})
+	if len(args) < 1 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		return nil, errors.New("invalid-number-of-args")
+	}
+
+	deviceImageReq := ic.DeviceImageRequest{}
+
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceImageReq":
+			if err := ptypes.UnmarshalAny(arg.Value, &deviceImageReq); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	return &deviceImageReq, nil
+}
+
+func (rhp *RequestHandlerProxy) getDeviceID(ctx context.Context, args []*ic.Argument) (string, error) {
+	logger.Debugw(ctx, "getDeviceID", log.Fields{"args": args})
+
+	deviceId := &ic.StrType{}
+
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return "", err
+			}
+		}
+	}
+
+	if deviceId.Val == "" {
+		return "", errors.New("invalid argument")
+	}
+
+	return deviceId.Val, nil
+}
+
+func (rhp *RequestHandlerProxy) Download_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageDownloadReq, err := rhp.getDeviceDownloadImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageDownloadRsp, err := rhp.adapter.Download_onu_image(ctx, imageDownloadReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageDownloadRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_onu_image_status(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageStatusReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageStatus, err := rhp.adapter.Get_onu_image_status(ctx, imageStatusReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageStatus, nil
+}
+
+func (rhp *RequestHandlerProxy) Abort_onu_image_upgrade(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageAbortReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageAbortRsp, err := rhp.adapter.Abort_onu_image_upgrade(ctx, imageAbortReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageAbortRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Activate_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageActivateReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageActivateRsp, err := rhp.adapter.Activate_onu_image(ctx, imageActivateReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return imageActivateRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Commit_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
+	imageCommitReq, err := rhp.getDeviceImageRequest(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	imageCommitRsp, err := rhp.adapter.Commit_onu_image(ctx, imageCommitReq)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return imageCommitRsp, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_onu_images(ctx context.Context, args []*ic.Argument) (*voltha.OnuImages, error) {
+	deviceID, err := rhp.getDeviceID(ctx, args)
+	if err != nil {
+		return nil, err
+	}
+
+	onuImages, err := rhp.adapter.Get_onu_images(ctx, deviceID)
+	if err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return onuImages, nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
new file mode 100644
index 0000000..35f227e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"google.golang.org/grpc/codes"
+	"math/rand"
+	"time"
+)
+
+//GetRandomSerialNumber returns a serial number formatted as "HOST:PORT"
+func GetRandomSerialNumber() string {
+	rand.Seed(time.Now().UnixNano())
+	return fmt.Sprintf("%d.%d.%d.%d:%d",
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(9000)+1000,
+	)
+}
+
+//GetRandomMacAddress returns a random mac address
+func GetRandomMacAddress() string {
+	rand.Seed(time.Now().UnixNano())
+	return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+	)
+}
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+const (
+	letterIdxBits = 6                    // 6 bits to represent a letter index
+	letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+	letterIdxMax  = 63 / letterIdxBits   // # of letter indices fitting in 63 bits
+)
+
+var src = rand.NewSource(time.Now().UnixNano())
+
+func GetRandomString(n int) string {
+	b := make([]byte, n)
+	// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
+	for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+		if remain == 0 {
+			cache, remain = src.Int63(), letterIdxMax
+		}
+		if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+			b[i] = letterBytes[idx]
+			i--
+		}
+		cache >>= letterIdxBits
+		remain--
+	}
+	return string(b)
+}
+
+func ICProxyErrorCodeToGrpcErrorCode(ctx context.Context, icErr ic.ErrorCodeCodes) codes.Code {
+	switch icErr {
+	case ic.ErrorCode_INVALID_PARAMETERS:
+		return codes.InvalidArgument
+	case ic.ErrorCode_UNSUPPORTED_REQUEST:
+		return codes.Unavailable
+	case ic.ErrorCode_DEADLINE_EXCEEDED:
+		return codes.DeadlineExceeded
+	default:
+		logger.Warnw(ctx, "cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		return codes.Internal
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
new file mode 100644
index 0000000..aca4271
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package adapters
+
+import (
+	"context"
+
+	"github.com/opencord/voltha-protos/v4/go/extension"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+//IAdapter represents the set of APIs a voltha adapter has to support.
+type IAdapter interface {
+	Adapter_descriptor(ctx context.Context) error
+	Device_types(ctx context.Context) (*voltha.DeviceTypes, error)
+	Health(ctx context.Context) (*voltha.HealthStatus, error)
+	Adopt_device(ctx context.Context, device *voltha.Device) error
+	Reconcile_device(ctx context.Context, device *voltha.Device) error
+	Abandon_device(ctx context.Context, device *voltha.Device) error
+	Disable_device(ctx context.Context, device *voltha.Device) error
+	Reenable_device(ctx context.Context, device *voltha.Device) error
+	Reboot_device(ctx context.Context, device *voltha.Device) error
+	Self_test_device(ctx context.Context, device *voltha.Device) error
+	Delete_device(ctx context.Context, device *voltha.Device) error
+	Get_device_details(ctx context.Context, device *voltha.Device) error
+	Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
+	Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
+	Update_pm_config(ctx context.Context, device *voltha.Device, pm_configs *voltha.PmConfigs) error
+	Receive_packet_out(ctx context.Context, deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
+	Suppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
+	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
+	Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage
+	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Child_device_lost(ctx context.Context, childDevice *voltha.Device) error
+	Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
+	Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
+	Single_get_value_request(ctx context.Context, request extension.SingleGetValueRequest) (*extension.SingleGetValueResponse, error)
+	Download_onu_image(ctx context.Context, request *voltha.DeviceImageDownloadRequest) (*voltha.DeviceImageResponse, error)
+	Get_onu_image_status(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	Abort_onu_image_upgrade(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	Get_onu_images(ctx context.Context, deviceID string) (*voltha.OnuImages, error)
+	Activate_onu_image(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	Commit_onu_image(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
new file mode 100644
index 0000000..606d18c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
new file mode 100644
index 0000000..f5efa36
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"strings"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+const (
+	defaultkvStoreConfigPath     = "config"
+	defaultkvStoreDataPathPrefix = "service/voltha_voltha"
+	kvStorePathSeparator         = "/"
+)
+
+// ConfigType represents the type for which config is created inside the kvstore
+// For example, loglevel
+type ConfigType int
+
+const (
+	ConfigTypeLogLevel ConfigType = iota
+	ConfigTypeMetadata
+	ConfigTypeKafka
+	ConfigTypeLogFeatures
+)
+
+func (c ConfigType) String() string {
+	return [...]string{"loglevel", "metadata", "kafka", "logfeatures"}[c]
+}
+
+// ChangeEvent represents the event recieved from watch
+// For example, Put Event
+type ChangeEvent int
+
+const (
+	Put ChangeEvent = iota
+	Delete
+)
+
+func (ce ChangeEvent) String() string {
+	return [...]string{"Put", "Delete"}[ce]
+}
+
+// ConfigChangeEvent represents config for the events recieved from watch
+// For example,ChangeType is Put ,ConfigAttribute default
+type ConfigChangeEvent struct {
+	ChangeType      ChangeEvent
+	ConfigAttribute string
+}
+
+// ConfigManager is a wrapper over Backend to maintain Configuration of voltha components
+// in kvstore based persistent storage
+type ConfigManager struct {
+	Backend               *db.Backend
+	KVStoreConfigPrefix   string
+	KVStoreDataPathPrefix string
+}
+
+// ComponentConfig represents a category of configuration for a specific VOLTHA component type
+// stored in a persistent storage pointed to by Config Manager
+// For example, one ComponentConfig instance will be created for loglevel config type for rw-core
+// component while another ComponentConfig instance will refer to connection config type for same
+// rw-core component. So, there can be multiple ComponentConfig instance created per component
+// pointing to different category of configuration.
+//
+// Configuration pointed to be by ComponentConfig is stored in kvstore as a list of key/value pairs
+// under the hierarchical tree with following base path
+// <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/
+//
+// For example, rw-core ComponentConfig for loglevel config entries will be stored under following path
+// /voltha/service/config/rw-core/loglevel/
+type ComponentConfig struct {
+	cManager         *ConfigManager
+	componentLabel   string
+	configType       ConfigType
+	changeEventChan  chan *ConfigChangeEvent
+	kvStoreEventChan chan *kvstore.Event
+}
+
+func NewConfigManager(ctx context.Context, kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
+	var kvStorePrefix string
+	if prefix, present := os.LookupEnv("KV_STORE_DATAPATH_PREFIX"); present {
+		kvStorePrefix = prefix
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
+	} else {
+		kvStorePrefix = defaultkvStoreDataPathPrefix
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
+	}
+	return &ConfigManager{
+		KVStoreConfigPrefix:   defaultkvStoreConfigPath,
+		KVStoreDataPathPrefix: kvStorePrefix,
+		Backend: &db.Backend{
+			Client:     kvClient,
+			StoreType:  kvStoreType,
+			Address:    kvStoreAddress,
+			Timeout:    kvStoreTimeout,
+			PathPrefix: kvStorePrefix,
+		},
+	}
+}
+
+// RetrieveComponentList list the component Names for which loglevel is stored in kvstore
+func (c *ConfigManager) RetrieveComponentList(ctx context.Context, configType ConfigType) ([]string, error) {
+	data, err := c.Backend.List(ctx, c.KVStoreConfigPrefix)
+	if err != nil {
+		return nil, err
+	}
+
+	// Looping through the data recieved from the Backend for config
+	// Trimming and Splitting the required key and value from data and  storing as componentName,PackageName and Level
+	// For Example, recieved key would be <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/default and value \"DEBUG\"
+	// Then in default will be stored as PackageName,componentName as <Component Name> and DEBUG will be stored as value in List struct
+	ccPathPrefix := kvStorePathSeparator + configType.String() + kvStorePathSeparator
+	pathPrefix := c.KVStoreDataPathPrefix + kvStorePathSeparator + c.KVStoreConfigPrefix + kvStorePathSeparator
+	var list []string
+	keys := make(map[string]interface{})
+	for attr := range data {
+		cname := strings.TrimPrefix(attr, pathPrefix)
+		cName := strings.SplitN(cname, ccPathPrefix, 2)
+		if len(cName) != 2 {
+			continue
+		}
+		if _, exist := keys[cName[0]]; !exist {
+			keys[cName[0]] = nil
+			list = append(list, cName[0])
+		}
+	}
+	return list, nil
+}
+
+// Initialize the component config
+func (cm *ConfigManager) InitComponentConfig(componentLabel string, configType ConfigType) *ComponentConfig {
+
+	return &ComponentConfig{
+		componentLabel:   componentLabel,
+		configType:       configType,
+		cManager:         cm,
+		changeEventChan:  nil,
+		kvStoreEventChan: nil,
+	}
+
+}
+
+func (c *ComponentConfig) makeConfigPath() string {
+
+	cType := c.configType.String()
+	return c.cManager.KVStoreConfigPrefix + kvStorePathSeparator +
+		c.componentLabel + kvStorePathSeparator + cType
+}
+
+// MonitorForConfigChange watch on the subkeys for the given key
+// Any changes to the subkeys for the given key will return an event channel
+// Then Event channel will be processed and  new event channel with required values will be created and return
+// For example, rw-core will be watching on <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/
+// will return an event channel for PUT,DELETE eventType.
+// Then values from event channel will be processed and  stored in kvStoreEventChan.
+func (c *ComponentConfig) MonitorForConfigChange(ctx context.Context) chan *ConfigChangeEvent {
+	key := c.makeConfigPath()
+
+	logger.Debugw(ctx, "monitoring-for-config-change", log.Fields{"key": key})
+
+	c.changeEventChan = make(chan *ConfigChangeEvent, 1)
+
+	c.kvStoreEventChan = c.cManager.Backend.CreateWatch(ctx, key, true)
+
+	go c.processKVStoreWatchEvents(ctx)
+
+	return c.changeEventChan
+}
+
+// processKVStoreWatchEvents process event channel recieved from the Backend for any ChangeType
+// It checks for the EventType is valid or not.For the valid EventTypes creates ConfigChangeEvent and send it on channel
+func (c *ComponentConfig) processKVStoreWatchEvents(ctx context.Context) {
+
+	ccKeyPrefix := c.makeConfigPath()
+
+	logger.Debugw(ctx, "processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
+
+	ccPathPrefix := c.cManager.Backend.PathPrefix + ccKeyPrefix + kvStorePathSeparator
+
+	for watchResp := range c.kvStoreEventChan {
+
+		if watchResp.EventType == kvstore.CONNECTIONDOWN || watchResp.EventType == kvstore.UNKNOWN {
+			logger.Warnw(ctx, "received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
+			continue
+		}
+
+		// populating the configAttribute from the received Key
+		// For Example, Key received would be <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/default
+		// Storing default in configAttribute variable
+		ky := fmt.Sprintf("%s", watchResp.Key)
+
+		c.changeEventChan <- &ConfigChangeEvent{
+			ChangeType:      ChangeEvent(watchResp.EventType),
+			ConfigAttribute: strings.TrimPrefix(ky, ccPathPrefix),
+		}
+	}
+}
+
+// Retrieves value of a specific config key. Value of key is returned in String format
+func (c *ComponentConfig) Retrieve(ctx context.Context, configKey string) (string, error) {
+	key := c.makeConfigPath() + "/" + configKey
+
+	logger.Debugw(ctx, "retrieving-config", log.Fields{"key": key})
+
+	if kvpair, err := c.cManager.Backend.Get(ctx, key); err != nil {
+		return "", err
+	} else {
+		if kvpair == nil {
+			return "", fmt.Errorf("config-key-does-not-exist : %s", key)
+		}
+
+		value := strings.Trim(fmt.Sprintf("%s", kvpair.Value), "\"")
+		logger.Debugw(ctx, "retrieved-config", log.Fields{"key": key, "value": value})
+		return value, nil
+	}
+}
+
+func (c *ComponentConfig) RetrieveAll(ctx context.Context) (map[string]string, error) {
+	key := c.makeConfigPath()
+
+	logger.Debugw(ctx, "retreiving-list", log.Fields{"key": key})
+
+	data, err := c.cManager.Backend.List(ctx, key)
+	if err != nil {
+		return nil, err
+	}
+
+	// Looping through the data recieved from the Backend for the given key
+	// Trimming the required key and value from data and  storing as key/value pair
+	// For Example, recieved key would be <Backend Prefix Path>/<Config Prefix>/<Component Name>/<Config Type>/default and value \"DEBUG\"
+	// Then in default will be stored as key and DEBUG will be stored as value in map[string]string
+	res := make(map[string]string)
+	ccPathPrefix := c.cManager.Backend.PathPrefix + kvStorePathSeparator + key + kvStorePathSeparator
+	for attr, val := range data {
+		res[strings.TrimPrefix(attr, ccPathPrefix)] = strings.Trim(fmt.Sprintf("%s", val.Value), "\"")
+	}
+
+	return res, nil
+}
+
+func (c *ComponentConfig) Save(ctx context.Context, configKey string, configValue string) error {
+	key := c.makeConfigPath() + "/" + configKey
+
+	logger.Debugw(ctx, "saving-config", log.Fields{"key": key, "value": configValue})
+
+	//save the data for update config
+	if err := c.cManager.Backend.Put(ctx, key, configValue); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *ComponentConfig) Delete(ctx context.Context, configKey string) error {
+	//construct key using makeConfigPath
+	key := c.makeConfigPath() + "/" + configKey
+
+	logger.Debugw(ctx, "deleting-config", log.Fields{"key": key})
+	//delete the config
+	if err := c.cManager.Backend.Delete(ctx, key); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
new file mode 100644
index 0000000..68bfb32
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package Config provides dynamic logging configuration for specific Voltha component with loglevel lookup
+// from etcd kvstore implemented using Backend.
+// Any Voltha component can start utilizing dynamic logging by starting goroutine of StartLogLevelConfigProcessing after
+// starting kvClient for the component.
+
+package config
+
+import (
+	"context"
+	"crypto/md5"
+	"encoding/json"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"os"
+	"sort"
+	"strings"
+)
+
+const (
+	defaultLogLevelKey                = "default"          // kvstore key containing default loglevel
+	globalConfigRootNode              = "global"           // Root Node in kvstore containing global config
+	initialGlobalDefaultLogLevelValue = "WARN"             // Hard-coded Global Default loglevel pushed at PoD startup
+	logPackagesListKey                = "log_package_list" // kvstore key containing list of allowed log packages
+)
+
+// ComponentLogController represents a Configuration for Logging Config of specific Voltha component type
+// It stores ComponentConfig and GlobalConfig of loglevel config of specific Voltha component type
+// For example,ComponentLogController instance will be created for rw-core component
+type ComponentLogController struct {
+	ComponentName       string
+	componentNameConfig *ComponentConfig
+	GlobalConfig        *ComponentConfig
+	configManager       *ConfigManager
+	logHash             [16]byte
+	initialLogLevel     string // Initial default log level set by helm chart
+}
+
+func NewComponentLogController(ctx context.Context, cm *ConfigManager) (*ComponentLogController, error) {
+	logger.Debug(ctx, "creating-new-component-log-controller")
+	componentName := os.Getenv("COMPONENT_NAME")
+	if componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	var defaultLogLevel string
+	var err error
+	// Retrieve and save default log level; used for fallback if all loglevel config is cleared in etcd
+	if defaultLogLevel, err = log.LogLevelToString(log.GetDefaultLogLevel()); err != nil {
+		defaultLogLevel = "DEBUG"
+	}
+
+	return &ComponentLogController{
+		ComponentName:       componentName,
+		componentNameConfig: nil,
+		GlobalConfig:        nil,
+		configManager:       cm,
+		initialLogLevel:     defaultLogLevel,
+	}, nil
+
+}
+
+// StartLogLevelConfigProcessing initialize component config and global config
+// Then, it persists initial default Loglevels into Config Store before
+// starting the loading and processing of all Log Configuration
+func StartLogLevelConfigProcessing(cm *ConfigManager, ctx context.Context) {
+	cc, err := NewComponentLogController(ctx, cm)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
+		return
+	}
+
+	cc.GlobalConfig = cm.InitComponentConfig(globalConfigRootNode, ConfigTypeLogLevel)
+	logger.Debugw(ctx, "global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
+
+	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogLevel)
+	logger.Debugw(ctx, "component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+
+	cc.persistInitialDefaultLogConfigs(ctx)
+
+	cc.persistRegisteredLogPackageList(ctx)
+
+	cc.processLogConfig(ctx)
+}
+
+// Method to persist Global default loglevel into etcd, if not set yet
+// It also checks and set Component default loglevel into etcd with initial loglevel set from command line
+func (c *ComponentLogController) persistInitialDefaultLogConfigs(ctx context.Context) {
+
+	_, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
+	if err != nil {
+		logger.Debugw(ctx, "failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
+
+		err = c.GlobalConfig.Save(ctx, defaultLogLevelKey, initialGlobalDefaultLogLevelValue)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
+		}
+	}
+
+	_, err = c.componentNameConfig.Retrieve(ctx, defaultLogLevelKey)
+	if err != nil {
+		logger.Debugw(ctx, "failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
+
+		err = c.componentNameConfig.Save(ctx, defaultLogLevelKey, c.initialLogLevel)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
+		}
+	}
+}
+
+// Method to save list of all registered packages for component into config kvstore. A single string
+// is constructed with comma-separated package names in sorted order and persisted
+func (c *ComponentLogController) persistRegisteredLogPackageList(ctx context.Context) {
+
+	componentMetadataConfig := c.configManager.InitComponentConfig(c.ComponentName, ConfigTypeMetadata)
+	logger.Debugw(ctx, "component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
+
+	packageList := log.GetPackageNames()
+	packageList = append(packageList, defaultLogLevelKey)
+	sort.Strings(packageList)
+
+	packageNames, err := json.Marshal(packageList)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
+		return
+	}
+
+	if err := componentMetadataConfig.Save(ctx, logPackagesListKey, string(packageNames)); err != nil {
+		logger.Errorw(ctx, "failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
+	}
+}
+
+// ProcessLogConfig will first load and apply log config and then start waiting on component config and global config
+// channels for any changes. Event channel will be recieved from Backend for valid change type
+// Then data for componentn log config and global log config will be retrieved from Backend and stored in updatedLogConfig in precedence order
+// If any changes in updatedLogConfig will be applied on component
+func (c *ComponentLogController) processLogConfig(ctx context.Context) {
+
+	// Load and apply Log Config for first time
+	initialLogConfig, err := c.buildUpdatedLogConfig(ctx)
+	if err != nil {
+		logger.Warnw(ctx, "unable-to-load-log-config-at-startup", log.Fields{"error": err})
+	} else {
+		if err := c.loadAndApplyLogConfig(ctx, initialLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-apply-log-config-at-startup", log.Fields{"error": err})
+		}
+	}
+
+	componentConfigEventChan := c.componentNameConfig.MonitorForConfigChange(ctx)
+
+	globalConfigEventChan := c.GlobalConfig.MonitorForConfigChange(ctx)
+
+	// process the events for componentName and  global config
+	var configEvent *ConfigChangeEvent
+	for {
+		select {
+		case configEvent = <-globalConfigEventChan:
+		case configEvent = <-componentConfigEventChan:
+
+		}
+		logger.Debugw(ctx, "processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+
+		updatedLogConfig, err := c.buildUpdatedLogConfig(ctx)
+		if err != nil {
+			logger.Warnw(ctx, "unable-to-fetch-updated-log-config", log.Fields{"error": err})
+			continue
+		}
+
+		logger.Debugw(ctx, "applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
+
+		if err := c.loadAndApplyLogConfig(ctx, updatedLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-load-and-apply-log-config", log.Fields{"error": err})
+		}
+	}
+
+}
+
+// get active loglevel from the zap logger
+func getActiveLogLevels(ctx context.Context) map[string]string {
+	loglevels := make(map[string]string)
+
+	// now do the default log level
+	if level, err := log.LogLevelToString(log.GetDefaultLogLevel()); err == nil {
+		loglevels[defaultLogLevelKey] = level
+	}
+
+	// do the per-package log levels
+	for _, packageName := range log.GetPackageNames() {
+		level, err := log.GetPackageLogLevel(packageName)
+		if err != nil {
+			logger.Warnw(ctx, "unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
+			continue
+		}
+
+		if l, err := log.LogLevelToString(level); err == nil {
+			loglevels[packageName] = l
+		}
+	}
+
+	logger.Debugw(ctx, "retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
+
+	return loglevels
+}
+
+func (c *ComponentLogController) getGlobalLogConfig(ctx context.Context) (string, error) {
+
+	globalDefaultLogLevel, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
+	if err != nil {
+		return "", err
+	}
+
+	// Handle edge cases when global default loglevel is deleted directly from etcd or set to a invalid value
+	// We should use hard-coded initial default value in such cases
+	if globalDefaultLogLevel == "" {
+		logger.Warn(ctx, "global-default-loglevel-not-found-in-config-store")
+		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
+	}
+
+	if _, err := log.StringToLogLevel(globalDefaultLogLevel); err != nil {
+		logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
+		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
+	}
+
+	logger.Debugw(ctx, "retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
+
+	return globalDefaultLogLevel, nil
+}
+
+func (c *ComponentLogController) getComponentLogConfig(ctx context.Context, globalDefaultLogLevel string) (map[string]string, error) {
+	componentLogConfig, err := c.componentNameConfig.RetrieveAll(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	effectiveDefaultLogLevel := ""
+	for logConfigKey, logConfigValue := range componentLogConfig {
+		if _, err := log.StringToLogLevel(logConfigValue); err != nil || logConfigKey == "" {
+			logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
+			delete(componentLogConfig, logConfigKey)
+		} else {
+			if logConfigKey == defaultLogLevelKey {
+				effectiveDefaultLogLevel = componentLogConfig[defaultLogLevelKey]
+			}
+		}
+	}
+
+	// if default loglevel is not configured for the component, component should use
+	// default loglevel configured at global level
+	if effectiveDefaultLogLevel == "" {
+		effectiveDefaultLogLevel = globalDefaultLogLevel
+	}
+
+	componentLogConfig[defaultLogLevelKey] = effectiveDefaultLogLevel
+
+	logger.Debugw(ctx, "retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
+
+	return componentLogConfig, nil
+}
+
+// buildUpdatedLogConfig retrieve the global logConfig and component logConfig  from Backend
+// component logConfig stores the log config with precedence order
+// For example, If the global logConfig is set and component logConfig is set only for specific package then
+// component logConfig is stored with global logConfig  and component logConfig of specific package
+// For example, If the global logConfig is set and component logConfig is set for specific package and as well as for default then
+// component logConfig is stored with  component logConfig data only
+func (c *ComponentLogController) buildUpdatedLogConfig(ctx context.Context) (map[string]string, error) {
+	globalLogLevel, err := c.getGlobalLogConfig(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-retrieve-global-log-config", log.Fields{"err": err})
+	}
+
+	componentLogConfig, err := c.getComponentLogConfig(ctx, globalLogLevel)
+	if err != nil {
+		return nil, err
+	}
+
+	finalLogConfig := make(map[string]string)
+	for packageName, logLevel := range componentLogConfig {
+		finalLogConfig[strings.ReplaceAll(packageName, "#", "/")] = logLevel
+	}
+
+	return finalLogConfig, nil
+}
+
+// load and apply the current configuration for component name
+// create hash of loaded configuration using GenerateLogConfigHash
+// if there is previous hash stored, compare the hash to stored hash
+// if there is any change will call UpdateLogLevels
+func (c *ComponentLogController) loadAndApplyLogConfig(ctx context.Context, logConfig map[string]string) error {
+	currentLogHash, err := GenerateLogConfigHash(logConfig)
+	if err != nil {
+		return err
+	}
+
+	if c.logHash != currentLogHash {
+		updateLogLevels(ctx, logConfig)
+		c.logHash = currentLogHash
+	} else {
+		logger.Debug(ctx, "effective-loglevel-config-same-as-currently-active")
+	}
+
+	return nil
+}
+
+// createModifiedLogLevels loops through the activeLogLevels recieved from zap logger and updatedLogLevels recieved from buildUpdatedLogConfig
+// to identify and create map of modified Log Levels of 2 types:
+// - Packages for which log level has been changed
+// - Packages for which log level config has been cleared - set to default log level
+func createModifiedLogLevels(ctx context.Context, activeLogLevels, updatedLogLevels map[string]string) map[string]string {
+	defaultLevel := updatedLogLevels[defaultLogLevelKey]
+
+	modifiedLogLevels := make(map[string]string)
+	for activeKey, activeLevel := range activeLogLevels {
+		if _, exist := updatedLogLevels[activeKey]; !exist {
+			if activeLevel != defaultLevel {
+				modifiedLogLevels[activeKey] = defaultLevel
+			}
+		} else if activeLevel != updatedLogLevels[activeKey] {
+			modifiedLogLevels[activeKey] = updatedLogLevels[activeKey]
+		}
+	}
+
+	// Log warnings for all invalid packages for which log config has been set
+	for key, value := range updatedLogLevels {
+		if _, exist := activeLogLevels[key]; !exist {
+			logger.Warnw(ctx, "ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
+		}
+	}
+
+	return modifiedLogLevels
+}
+
+// updateLogLevels update the loglevels for the component
+// retrieve active confguration from logger
+// compare with entries one by one and apply
+func updateLogLevels(ctx context.Context, updatedLogConfig map[string]string) {
+
+	activeLogLevels := getActiveLogLevels(ctx)
+	changedLogLevels := createModifiedLogLevels(ctx, activeLogLevels, updatedLogConfig)
+
+	// If no changed log levels are found, just return. It may happen on configuration of a invalid package
+	if len(changedLogLevels) == 0 {
+		logger.Debug(ctx, "no-change-in-effective-loglevel-config")
+		return
+	}
+
+	logger.Debugw(ctx, "applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
+	for key, level := range changedLogLevels {
+		if key == defaultLogLevelKey {
+			if l, err := log.StringToLogLevel(level); err == nil {
+				log.SetDefaultLogLevel(l)
+			}
+		} else {
+			if l, err := log.StringToLogLevel(level); err == nil {
+				log.SetPackageLogLevel(key, l)
+			}
+		}
+	}
+}
+
+// generate md5 hash of key value pairs appended into a single string
+// in order by key name
+func GenerateLogConfigHash(createHashLog map[string]string) ([16]byte, error) {
+	createHashLogBytes := []byte{}
+	levelData, err := json.Marshal(createHashLog)
+	if err != nil {
+		return [16]byte{}, err
+	}
+	createHashLogBytes = append(createHashLogBytes, levelData...)
+	return md5.Sum(createHashLogBytes), nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
new file mode 100644
index 0000000..95c5bde
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package config
+
+import (
+	"context"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"os"
+	"strings"
+)
+
+const (
+	defaultTracingStatusKey        = "trace_publish"   // kvstore key containing tracing configuration status
+	defaultLogCorrelationStatusKey = "log_correlation" // kvstore key containing log correlation configuration status
+)
+
+// ComponentLogFeatureController represents Configuration for Logging related features of Tracing and Log
+// Correlation of specific Voltha component.
+type ComponentLogFeaturesController struct {
+	ComponentName               string
+	componentNameConfig         *ComponentConfig
+	configManager               *ConfigManager
+	initialTracingStatus        bool // Initial default tracing status set by helm chart
+	initialLogCorrelationStatus bool // Initial default log correlation status set by helm chart
+}
+
+func NewComponentLogFeaturesController(ctx context.Context, cm *ConfigManager) (*ComponentLogFeaturesController, error) {
+	logger.Debug(ctx, "creating-new-component-log-features-controller")
+	componentName := os.Getenv("COMPONENT_NAME")
+	if componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
+	logCorrelationStatus := log.GetGlobalLFM().GetLogCorrelationStatus()
+
+	return &ComponentLogFeaturesController{
+		ComponentName:               componentName,
+		componentNameConfig:         nil,
+		configManager:               cm,
+		initialTracingStatus:        tracingStatus,
+		initialLogCorrelationStatus: logCorrelationStatus,
+	}, nil
+
+}
+
+// StartLogFeaturesConfigProcessing persists initial config of Log Features into Config Store before
+// starting the loading and processing of Configuration updates
+func StartLogFeaturesConfigProcessing(cm *ConfigManager, ctx context.Context) {
+	cc, err := NewComponentLogFeaturesController(ctx, cm)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-construct-component-log-features-controller-instance-for-monitoring", log.Fields{"error": err})
+		return
+	}
+
+	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogFeatures)
+	logger.Debugw(ctx, "component-log-features-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+
+	cc.persistInitialLogFeaturesConfigs(ctx)
+
+	cc.processLogFeaturesConfig(ctx)
+}
+
+// Method to persist Initial status of Log Correlation and Tracing features (as set from command line)
+// into config store (etcd kvstore), if not set yet
+func (cc *ComponentLogFeaturesController) persistInitialLogFeaturesConfigs(ctx context.Context) {
+
+	_, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+	if err != nil {
+		statusString := "DISABLED"
+		if cc.initialTracingStatus {
+			statusString = "ENABLED"
+		}
+		err = cc.componentNameConfig.Save(ctx, defaultTracingStatusKey, statusString)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-initial-tracing-status-at-startup", log.Fields{"error": err, "tracingstatus": statusString})
+		}
+	}
+
+	_, err = cc.componentNameConfig.Retrieve(ctx, defaultLogCorrelationStatusKey)
+	if err != nil {
+		statusString := "DISABLED"
+		if cc.initialLogCorrelationStatus {
+			statusString = "ENABLED"
+		}
+		err = cc.componentNameConfig.Save(ctx, defaultLogCorrelationStatusKey, statusString)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-initial-log-correlation-status-at-startup", log.Fields{"error": err, "logcorrelationstatus": statusString})
+		}
+	}
+}
+
+// processLogFeaturesConfig will first load and apply configuration of log features. Then it will start waiting for any changes
+// made to configuration in config store (etcd) and apply the same
+func (cc *ComponentLogFeaturesController) processLogFeaturesConfig(ctx context.Context) {
+
+	// Load and apply Tracing Status and log correlation status for first time
+	cc.loadAndApplyTracingStatusUpdate(ctx)
+	cc.loadAndApplyLogCorrelationStatusUpdate(ctx)
+
+	componentConfigEventChan := cc.componentNameConfig.MonitorForConfigChange(ctx)
+
+	// process the change events received on the channel
+	var configEvent *ConfigChangeEvent
+	for {
+		select {
+		case <-ctx.Done():
+			return
+
+		case configEvent = <-componentConfigEventChan:
+			logger.Debugw(ctx, "processing-log-features-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+
+			if strings.HasSuffix(configEvent.ConfigAttribute, defaultTracingStatusKey) {
+				cc.loadAndApplyTracingStatusUpdate(ctx)
+			} else if strings.HasSuffix(configEvent.ConfigAttribute, defaultLogCorrelationStatusKey) {
+				cc.loadAndApplyLogCorrelationStatusUpdate(ctx)
+			}
+		}
+	}
+
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyTracingStatusUpdate(ctx context.Context) {
+
+	desiredTracingStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+	if err != nil || desiredTracingStatus == "" {
+		logger.Warn(ctx, "unable-to-retrieve-tracing-status-from-config-store")
+		return
+	}
+
+	if desiredTracingStatus != "ENABLED" && desiredTracingStatus != "DISABLED" {
+		logger.Warnw(ctx, "unsupported-tracing-status-configured-in-config-store", log.Fields{"failed-tracing-status": desiredTracingStatus, "tracing-status": log.GetGlobalLFM().GetTracePublishingStatus()})
+		return
+	}
+
+	logger.Debugw(ctx, "retrieved-tracing-status", log.Fields{"tracing-status": desiredTracingStatus})
+
+	log.GetGlobalLFM().SetTracePublishingStatus(desiredTracingStatus == "ENABLED")
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyLogCorrelationStatusUpdate(ctx context.Context) {
+
+	desiredLogCorrelationStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultLogCorrelationStatusKey)
+	if err != nil || desiredLogCorrelationStatus == "" {
+		logger.Warn(ctx, "unable-to-retrieve-log-correlation-status-from-config-store")
+		return
+	}
+
+	if desiredLogCorrelationStatus != "ENABLED" && desiredLogCorrelationStatus != "DISABLED" {
+		logger.Warnw(ctx, "unsupported-log-correlation-status-configured-in-config-store", log.Fields{"failed-log-correlation-status": desiredLogCorrelationStatus, "log-correlation-status": log.GetGlobalLFM().GetLogCorrelationStatus()})
+		return
+	}
+
+	logger.Debugw(ctx, "retrieved-log-correlation-status", log.Fields{"log-correlation-status": desiredLogCorrelationStatus})
+
+	log.GetGlobalLFM().SetLogCorrelationStatus(desiredLogCorrelationStatus == "ENABLED")
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
new file mode 100644
index 0000000..ff0b5b7
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package db
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// Default Minimal Interval for posting alive state of backend kvstore on Liveness Channel
+	DefaultLivenessChannelInterval = time.Second * 30
+)
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	Client                  kvstore.Client
+	StoreType               string
+	Timeout                 time.Duration
+	Address                 string
+	PathPrefix              string
+	alive                   bool // Is this backend connection alive?
+	livenessMutex           sync.Mutex
+	liveness                chan bool     // channel to post alive state
+	LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
+	lastLivenessTime        time.Time     // Instant of last alive state push
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(ctx context.Context, storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:               storeType,
+		Address:                 address,
+		Timeout:                 timeout,
+		LivenessChannelInterval: DefaultLivenessChannelInterval,
+		PathPrefix:              pathPrefix,
+		alive:                   false, // connection considered down at start
+	}
+
+	if b.Client, err = b.newClient(ctx, address, timeout); err != nil {
+		logger.Errorw(ctx, "failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "address": address,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "etcd":
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(ctx context.Context, key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+func (b *Backend) updateLiveness(ctx context.Context, alive bool) {
+	// Periodically push stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Push alive state if the
+	// last push to channel was beyond livenessChannelInterval
+	b.livenessMutex.Lock()
+	defer b.livenessMutex.Unlock()
+	if b.liveness != nil {
+		if b.alive != alive {
+			logger.Debug(ctx, "update-liveness-channel-reason-change")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
+			logger.Debug(ctx, "update-liveness-channel-reason-interval")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Emit log message only for alive state change
+	if b.alive != alive {
+		logger.Debugw(ctx, "change-kvstore-alive-status", log.Fields{"alive": alive})
+		b.alive = alive
+	}
+}
+
+// Perform a dummy Key Lookup on kvstore to test Connection Liveness and
+// post on Liveness channel
+func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
+	alive := b.Client.IsConnectionUp(ctx)
+	logger.Debugw(ctx, "kvstore-liveness-check-result", log.Fields{"alive": alive})
+
+	b.updateLiveness(ctx, alive)
+	return alive
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every kvstore operation which indicates whether
+// or not the connection is still Live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (b *Backend) EnableLivenessChannel(ctx context.Context) chan bool {
+	logger.Debug(ctx, "enable-kvstore-liveness-channel")
+	b.livenessMutex.Lock()
+	defer b.livenessMutex.Unlock()
+	if b.liveness == nil {
+		b.liveness = make(chan bool, 10)
+		b.liveness <- b.alive
+		b.lastLivenessTime = time.Now()
+	}
+
+	return b.liveness
+}
+
+// Extract Alive status of Kvstore based on type of error
+func (b *Backend) isErrorIndicatingAliveKvstore(ctx context.Context, err error) bool {
+	// Alive unless observed an error indicating so
+	alive := true
+
+	if err != nil {
+
+		// timeout indicates kvstore not reachable/alive
+		if err == context.DeadlineExceeded {
+			alive = false
+		}
+
+		// Need to analyze client-specific errors based on backend type
+		if b.StoreType == "etcd" {
+
+			// For etcd backend, consider not-alive only for errors indicating
+			// timedout request or unavailable/corrupted cluster. For all remaining
+			// error codes listed in https://godoc.org/google.golang.org/grpc/codes#Code,
+			// we would not infer a not-alive backend because such a error may also
+			// occur due to bad client requests or sequence of operations
+			switch status.Code(err) {
+			case codes.DeadlineExceeded:
+				fallthrough
+			case codes.Unavailable:
+				fallthrough
+			case codes.DataLoss:
+				alive = false
+			}
+		}
+	}
+
+	return alive
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-list")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "listing-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.List(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return pair, err
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-get")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "getting-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.Get(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return pair, err
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-put")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "putting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Put(ctx, formattedPath, value)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(ctx context.Context, key string) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Delete(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
+// DeleteWithPrefix removes items having prefix key
+func (b *Backend) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete-with-prefix")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, prefixKey)
+	logger.Debugw(ctx, "deleting-prefix-key", log.Fields{"key": prefixKey, "path": formattedPath})
+
+	err := b.Client.DeleteWithPrefix(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-create-watch")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(ctx, formattedPath, withPrefix)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete-watch")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(ctx, formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
new file mode 100644
index 0000000..4bc92b1
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package db
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
new file mode 100644
index 0000000..e4b1fff
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"time"
+)
+
+const (
+	// Maximum channel buffer between publisher/subscriber goroutines
+	maxClientChannelBufferSize = 10
+)
+
+// These constants represent the event types returned by the KV client
+const (
+	PUT = iota
+	DELETE
+	CONNECTIONDOWN
+	UNKNOWN
+)
+
+// KVPair is a common wrapper for key-value pairs returned from the KV store
+type KVPair struct {
+	Key     string
+	Value   interface{}
+	Version int64
+	Session string
+	Lease   int64
+}
+
+// NewKVPair creates a new KVPair object
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
+	kv := new(KVPair)
+	kv.Key = key
+	kv.Value = value
+	kv.Session = session
+	kv.Lease = lease
+	kv.Version = version
+	return kv
+}
+
+// Event is generated by the KV client when a key change is detected
+type Event struct {
+	EventType int
+	Key       interface{}
+	Value     interface{}
+	Version   int64
+}
+
+// NewEvent creates a new Event object
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
+	evnt := new(Event)
+	evnt.EventType = eventType
+	evnt.Key = key
+	evnt.Value = value
+	evnt.Version = version
+
+	return evnt
+}
+
+// Client represents the set of APIs a KV Client must implement
+type Client interface {
+	List(ctx context.Context, key string) (map[string]*KVPair, error)
+	Get(ctx context.Context, key string) (*KVPair, error)
+	Put(ctx context.Context, key string, value interface{}) error
+	Delete(ctx context.Context, key string) error
+	DeleteWithPrefix(ctx context.Context, prefixKey string) error
+	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
+	IsConnectionUp(ctx context.Context) bool // timeout in second
+	CloseWatch(ctx context.Context, key string, ch chan *Event)
+	Close(ctx context.Context)
+
+	// These APIs are not used.  They will be cleaned up in release Voltha 2.9.
+	// It's not cleaned now to limit changes in all components
+	Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error)
+	ReleaseReservation(ctx context.Context, key string) error
+	ReleaseAllReservations(ctx context.Context) error
+	RenewReservation(ctx context.Context, key string) error
+	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
+	ReleaseLock(lockName string) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
new file mode 100644
index 0000000..b8509db
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..96ffc2f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,473 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	"os"
+	"strconv"
+	"sync"
+	"time"
+)
+
+const (
+	poolCapacityEnvName = "VOLTHA_ETCD_CLIENT_POOL_CAPACITY"
+	maxUsageEnvName     = "VOLTHA_ETCD_CLIENT_MAX_USAGE"
+)
+
+const (
+	defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
+	defaultMaxPoolUsage    = 100  // Maximum concurrent request an Etcd Client is allowed to process
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+	pool               EtcdClientAllocator
+	watchedChannels    sync.Map
+	watchedClients     map[string]*v3Client.Client
+	watchedClientsLock sync.RWMutex
+}
+
+// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
+// the called to specify etcd client configuration
+func NewEtcdCustomClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	// Get the capacity and max usage from the environment
+	capacity := defaultMaxPoolCapacity
+	maxUsage := defaultMaxPoolUsage
+	if capacityStr, present := os.LookupEnv(poolCapacityEnvName); present {
+		if val, err := strconv.Atoi(capacityStr); err == nil {
+			capacity = val
+			logger.Infow(ctx, "env-variable-set", log.Fields{"pool-capacity": capacity})
+		} else {
+			logger.Warnw(ctx, "invalid-capacity-value", log.Fields{"error": err, "capacity": capacityStr})
+		}
+	}
+	if maxUsageStr, present := os.LookupEnv(maxUsageEnvName); present {
+		if val, err := strconv.Atoi(maxUsageStr); err == nil {
+			maxUsage = val
+			logger.Infow(ctx, "env-variable-set", log.Fields{"max-usage": maxUsage})
+		} else {
+			logger.Warnw(ctx, "invalid-max-usage-value", log.Fields{"error": err, "max-usage": maxUsageStr})
+		}
+	}
+
+	var err error
+
+	pool, err := NewRoundRobinEtcdClientAllocator([]string{addr}, timeout, capacity, maxUsage, level)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-create-rr-client", log.Fields{
+			"error": err,
+		})
+	}
+
+	logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
+
+	return &EtcdClient{pool: pool,
+		watchedClients: make(map[string]*v3Client.Client),
+	}, nil
+}
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	return NewEtcdCustomClient(ctx, addr, timeout, level)
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
+	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
+	if _, err := c.Get(ctx, "non-existent-key"); err != nil {
+		return false
+	}
+	return true
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer c.pool.Put(client)
+	resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+
+	if err != nil {
+		logger.Error(ctx, err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, ev := range resp.Kvs {
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+
+startLoop:
+	for {
+		resp, err := client.Get(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return nil, err
+				}
+				logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return nil, err
+		}
+
+		for _, ev := range resp.Kvs {
+			// Only one value is returned
+			return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+		}
+		return nil, nil
+	}
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var err error
+	if val, err = ToString(value); err != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+startLoop:
+	for {
+		_, err = client.Put(ctx, key, val)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		return nil
+	}
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(ctx context.Context, key string) error {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+startLoop:
+	for {
+		_, err = client.Delete(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+		return nil
+	}
+}
+
+func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	//delete the prefix
+	if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
+		return err
+	}
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+	var err error
+	// Reuse the Etcd client when multiple callees are watching the same key.
+	c.watchedClientsLock.Lock()
+	client, exist := c.watchedClients[key]
+	if !exist {
+		client, err = c.pool.Get(ctx)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-an-etcd-client", log.Fields{"key": key, "error": err})
+			c.watchedClientsLock.Unlock()
+			return nil
+		}
+		c.watchedClients[key] = client
+	}
+	c.watchedClientsLock.Unlock()
+
+	w := v3Client.NewWatcher(client)
+	ctx, cancel := context.WithCancel(ctx)
+	var channel v3Client.WatchChan
+	if withPrefix {
+		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+	} else {
+		channel = w.Watch(ctx, key)
+	}
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap[ch] = w
+	channelMaps := c.addChannelMap(key, channelMap)
+
+	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+	// json format.
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(ctx, channel, ch, cancel)
+
+	return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+	} else {
+		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var ok bool
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug(ctx, "channel-found")
+			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+
+	// If we don't have any keys being watched then return the Etcd client to the pool
+	if len(channelMaps) == 0 {
+		c.watchedClientsLock.Lock()
+		// Sanity
+		if client, ok := c.watchedClients[key]; ok {
+			c.pool.Put(client)
+			delete(c.watchedClients, key)
+		}
+		c.watchedClientsLock.Unlock()
+	}
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for resp := range channel {
+		for _, ev := range resp.Events {
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+		}
+	}
+	logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+	switch event.Type {
+	case v3Client.EventTypePut:
+		return PUT
+	case v3Client.EventTypeDelete:
+		return DELETE
+	}
+	return UNKNOWN
+}
+
+// Close closes all the connection in the pool store client
+func (c *EtcdClient) Close(ctx context.Context) {
+	logger.Debug(ctx, "closing-etcd-pool")
+	c.pool.Close(ctx)
+}
+
+// The APIs below are not used
+var errUnimplemented = errors.New("deprecated")
+
+// Reserve is deprecated
+func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+	return nil, errUnimplemented
+}
+
+// ReleaseAllReservations is deprecated
+func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
+	return errUnimplemented
+}
+
+// ReleaseReservation is deprecated
+func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
+	return errUnimplemented
+}
+
+// RenewReservation is deprecated
+func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
+	return errUnimplemented
+}
+
+// AcquireLock is deprecated
+func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+	return errUnimplemented
+}
+
+// ReleaseLock is deprecated
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+	return errUnimplemented
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go
new file mode 100644
index 0000000..6af7d3d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"container/list"
+	"context"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"go.etcd.io/etcd/clientv3"
+	"sync"
+	"time"
+)
+
+// EtcdClientAllocator represents a generic interface to allocate an Etcd Client
+type EtcdClientAllocator interface {
+	Get(context.Context) (*clientv3.Client, error)
+	Put(*clientv3.Client)
+	Close(ctx context.Context)
+}
+
+// NewRoundRobinEtcdClientAllocator creates a new ETCD Client Allocator using a Round Robin scheme
+func NewRoundRobinEtcdClientAllocator(endpoints []string, timeout time.Duration, capacity, maxUsage int, level log.LogLevel) (EtcdClientAllocator, error) {
+	return &roundRobin{
+		all:       make(map[*clientv3.Client]*rrEntry),
+		full:      make(map[*clientv3.Client]*rrEntry),
+		waitList:  list.New(),
+		max:       maxUsage,
+		capacity:  capacity,
+		timeout:   timeout,
+		endpoints: endpoints,
+		logLevel:  level,
+		closingCh: make(chan struct{}, capacity*maxUsage),
+		stopCh:    make(chan struct{}),
+	}, nil
+}
+
+type rrEntry struct {
+	client *clientv3.Client
+	count  int
+	age    time.Time
+}
+
+type roundRobin struct {
+	//block chan struct{}
+	sync.Mutex
+	available []*rrEntry
+	all       map[*clientv3.Client]*rrEntry
+	full      map[*clientv3.Client]*rrEntry
+	waitList  *list.List
+	max       int
+	capacity  int
+	timeout   time.Duration
+	//ageOut    time.Duration
+	endpoints []string
+	size      int
+	logLevel  log.LogLevel
+	closing   bool
+	closingCh chan struct{}
+	stopCh    chan struct{}
+}
+
+// Get returns an Etcd client. If not is available, it will create one
+// until the maximum allowed capacity.  If maximum capacity has been
+// reached then it will wait until s used one is freed.
+func (r *roundRobin) Get(ctx context.Context) (*clientv3.Client, error) {
+	r.Lock()
+
+	if r.closing {
+		r.Unlock()
+		return nil, errors.New("pool-is-closing")
+	}
+
+	// first determine if we need to block, which would mean the
+	// available queue is empty and we are at capacity
+	if len(r.available) == 0 && r.size >= r.capacity {
+
+		// create a channel on which to wait and
+		// add it to the list
+		ch := make(chan struct{})
+		element := r.waitList.PushBack(ch)
+		r.Unlock()
+
+		// block until it is our turn or context
+		// expires or is canceled
+		select {
+		case <-r.stopCh:
+			logger.Info(ctx, "stop-waiting-pool-is-closing")
+			r.waitList.Remove(element)
+			return nil, errors.New("stop-waiting-pool-is-closing")
+		case <-ch:
+			r.waitList.Remove(element)
+		case <-ctx.Done():
+			r.waitList.Remove(element)
+			return nil, ctx.Err()
+		}
+		r.Lock()
+	}
+
+	defer r.Unlock()
+	if len(r.available) > 0 {
+		// pull off back end as it is operationally quicker
+		last := len(r.available) - 1
+		entry := r.available[last]
+		entry.count++
+		if entry.count >= r.max {
+			r.available = r.available[:last]
+			r.full[entry.client] = entry
+		}
+		entry.age = time.Now()
+		return entry.client, nil
+	}
+
+	logConfig := log.ConstructZapConfig(log.JSON, r.logLevel, log.Fields{})
+	// increase capacity
+	client, err := clientv3.New(clientv3.Config{
+		Endpoints:   r.endpoints,
+		DialTimeout: r.timeout,
+		LogConfig:   &logConfig,
+	})
+	if err != nil {
+		return nil, err
+	}
+	entry := &rrEntry{
+		client: client,
+		count:  1,
+	}
+	r.all[entry.client] = entry
+
+	if r.max > 1 {
+		r.available = append(r.available, entry)
+	} else {
+		r.full[entry.client] = entry
+	}
+	r.size++
+	return client, nil
+}
+
+// Put returns the Etcd Client back to the pool
+func (r *roundRobin) Put(client *clientv3.Client) {
+	r.Lock()
+
+	entry := r.all[client]
+	entry.count--
+
+	if r.closing {
+		// Close client if count is 0
+		if entry.count == 0 {
+			if err := entry.client.Close(); err != nil {
+				logger.Warnw(context.Background(), "error-closing-client", log.Fields{"error": err})
+			}
+			delete(r.all, entry.client)
+		}
+		// Notify Close function that a client was returned to the pool
+		r.closingCh <- struct{}{}
+		r.Unlock()
+		return
+	}
+
+	// This entry is now available for use, so
+	// if in full map add it to available and
+	// remove from full
+	if _, ok := r.full[client]; ok {
+		r.available = append(r.available, entry)
+		delete(r.full, client)
+	}
+
+	front := r.waitList.Front()
+	if front != nil {
+		ch := r.waitList.Remove(front)
+		r.Unlock()
+		// need to unblock if someone is waiting
+		ch.(chan struct{}) <- struct{}{}
+		return
+	}
+	r.Unlock()
+}
+
+func (r *roundRobin) Close(ctx context.Context) {
+	r.Lock()
+	r.closing = true
+
+	// Notify anyone waiting for a client to stop waiting
+	close(r.stopCh)
+
+	// Clean-up unused clients
+	for i := 0; i < len(r.available); i++ {
+		// Count 0 means no one is using that client
+		if r.available[i].count == 0 {
+			if err := r.available[i].client.Close(); err != nil {
+				logger.Warnw(ctx, "failure-closing-client", log.Fields{"client": r.available[i].client, "error": err})
+			}
+			// Remove client for all list
+			delete(r.all, r.available[i].client)
+		}
+	}
+
+	// Figure out how many clients are in use
+	numberInUse := 0
+	for _, rrEntry := range r.all {
+		numberInUse += rrEntry.count
+	}
+	r.Unlock()
+
+	if numberInUse == 0 {
+		logger.Info(ctx, "no-connection-in-use")
+		return
+	}
+
+	logger.Infow(ctx, "waiting-for-clients-return", log.Fields{"count": numberInUse})
+
+	// Wait for notifications when a client is returned to the pool
+	for {
+		select {
+		case <-r.closingCh:
+			numberInUse--
+			if numberInUse == 0 {
+				logger.Info(ctx, "all-connections-closed")
+				return
+			}
+		case <-ctx.Done():
+			logger.Warnw(ctx, "context-done", log.Fields{"error": ctx.Err()})
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
new file mode 100644
index 0000000..ca57542
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+const (
+	minRetryInterval  = 100
+	maxRetryInterval  = 5000
+	incrementalFactor = 1.2
+	jitter            = 0.2
+)
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// backoff waits an amount of time that is proportional to the attempt value.  The wait time in a range of
+// minRetryInterval and maxRetryInterval.
+func backoff(ctx context.Context, attempt int) error {
+	if attempt == 0 {
+		return nil
+	}
+	backoff := int(minRetryInterval + incrementalFactor*math.Exp(float64(attempt)))
+	backoff *= 1 + int(jitter*(rand.Float64()*2-1))
+	if backoff > maxRetryInterval {
+		backoff = maxRetryInterval
+	}
+	ticker := time.NewTicker(time.Duration(backoff) * time.Millisecond)
+	defer ticker.Stop()
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-ticker.C:
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
new file mode 100644
index 0000000..df3e839
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package events
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
new file mode 100644
index 0000000..35f821f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package eventif
+
+import (
+	"context"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// EventProxy interface for eventproxy
+type EventProxy interface {
+	SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category EventCategory,
+		subCategory EventSubCategory, raisedTs int64) error
+	SendKpiEvent(ctx context.Context, id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
+		subCategory EventSubCategory, raisedTs int64) error
+	SendRPCEvent(ctx context.Context, id string, deviceEvent *voltha.RPCEvent, category EventCategory,
+		subCategory *EventSubCategory, raisedTs int64) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	SendLiveness(ctx context.Context) error
+}
+
+const (
+	EventTypeVersion = "0.1"
+)
+
+type (
+	EventType        = voltha.EventType_Types
+	EventCategory    = voltha.EventCategory_Types
+	EventSubCategory = voltha.EventSubCategory_Types
+)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
new file mode 100644
index 0000000..19a4f26
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package events
+
+import (
+	"container/ring"
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// TODO: Make configurable through helm chart
+const EVENT_THRESHOLD = 1000
+
+type lastEvent struct{}
+
+type EventProxy struct {
+	kafkaClient    kafka.Client
+	eventTopic     kafka.Topic
+	eventQueue     *EventQueue
+	queueCtx       context.Context
+	queueCancelCtx context.CancelFunc
+}
+
+func NewEventProxy(opts ...EventProxyOption) *EventProxy {
+	var proxy EventProxy
+	for _, option := range opts {
+		option(&proxy)
+	}
+	proxy.eventQueue = newEventQueue()
+	proxy.queueCtx, proxy.queueCancelCtx = context.WithCancel(context.Background())
+	return &proxy
+}
+
+type EventProxyOption func(*EventProxy)
+
+func MsgClient(client kafka.Client) EventProxyOption {
+	return func(args *EventProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func MsgTopic(topic kafka.Topic) EventProxyOption {
+	return func(args *EventProxy) {
+		args.eventTopic = topic
+	}
+}
+
+func (ep *EventProxy) formatId(eventName string) string {
+	return fmt.Sprintf("Voltha.openolt.%s.%s", eventName, strconv.FormatInt(time.Now().UnixNano(), 10))
+}
+
+func (ep *EventProxy) getEventHeader(eventName string,
+	category eventif.EventCategory,
+	subCategory *eventif.EventSubCategory,
+	eventType eventif.EventType,
+	raisedTs int64) (*voltha.EventHeader, error) {
+	var header voltha.EventHeader
+	if strings.Contains(eventName, "_") {
+		eventName = strings.Join(strings.Split(eventName, "_")[:len(strings.Split(eventName, "_"))-2], "_")
+	} else {
+		eventName = "UNKNOWN_EVENT"
+	}
+	/* Populating event header */
+	header.Id = ep.formatId(eventName)
+	header.Category = category
+	if subCategory != nil {
+		header.SubCategory = *subCategory
+	} else {
+		header.SubCategory = voltha.EventSubCategory_NONE
+	}
+	header.Type = eventType
+	header.TypeVersion = eventif.EventTypeVersion
+
+	// raisedTs is in seconds
+	timestamp, err := ptypes.TimestampProto(time.Unix(raisedTs, 0))
+	if err != nil {
+		return nil, err
+	}
+	header.RaisedTs = timestamp
+
+	timestamp, err = ptypes.TimestampProto(time.Now())
+	if err != nil {
+		return nil, err
+	}
+	header.ReportedTs = timestamp
+
+	return &header, nil
+}
+
+/* Send out rpc events*/
+func (ep *EventProxy) SendRPCEvent(ctx context.Context, id string, rpcEvent *voltha.RPCEvent, category eventif.EventCategory, subCategory *eventif.EventSubCategory, raisedTs int64) error {
+	if rpcEvent == nil {
+		logger.Error(ctx, "Received empty rpc event")
+		return errors.New("rpc event nil")
+	}
+	var event voltha.Event
+	var err error
+	if event.Header, err = ep.getEventHeader(id, category, subCategory, voltha.EventType_RPC_EVENT, raisedTs); err != nil {
+		return err
+	}
+	event.EventType = &voltha.Event_RpcEvent{RpcEvent: rpcEvent}
+	ep.eventQueue.push(&event)
+	return nil
+
+}
+
+/* Send out device events*/
+func (ep *EventProxy) SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64) error {
+	if deviceEvent == nil {
+		logger.Error(ctx, "Recieved empty device event")
+		return errors.New("Device event nil")
+	}
+	var event voltha.Event
+	var de voltha.Event_DeviceEvent
+	var err error
+	de.DeviceEvent = deviceEvent
+	if event.Header, err = ep.getEventHeader(deviceEvent.DeviceEventName, category, &subCategory, voltha.EventType_DEVICE_EVENT, raisedTs); err != nil {
+		return err
+	}
+	event.EventType = &de
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+		return err
+	}
+	logger.Infow(ctx, "Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
+		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
+		"DeviceEventName": deviceEvent.DeviceEventName})
+
+	return nil
+
+}
+
+// SendKpiEvent is to send kpi events to voltha.event topic
+func (ep *EventProxy) SendKpiEvent(ctx context.Context, id string, kpiEvent *voltha.KpiEvent2, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64) error {
+	if kpiEvent == nil {
+		logger.Error(ctx, "Recieved empty kpi event")
+		return errors.New("KPI event nil")
+	}
+	var event voltha.Event
+	var de voltha.Event_KpiEvent2
+	var err error
+	de.KpiEvent2 = kpiEvent
+	if event.Header, err = ep.getEventHeader(id, category, &subCategory, voltha.EventType_KPI_EVENT2, raisedTs); err != nil {
+		return err
+	}
+	event.EventType = &de
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+		return err
+	}
+	logger.Infow(ctx, "Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
+		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
+
+	return nil
+
+}
+
+func (ep *EventProxy) sendEvent(ctx context.Context, event *voltha.Event) error {
+	logger.Debugw(ctx, "Send event to kafka", log.Fields{"event": event})
+	if err := ep.kafkaClient.Send(ctx, event, &ep.eventTopic); err != nil {
+		return err
+	}
+	logger.Debugw(ctx, "Sent event to kafka", log.Fields{"event": event})
+
+	return nil
+}
+
+func (ep *EventProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return ep.kafkaClient.EnableLivenessChannel(ctx, enable)
+}
+
+func (ep *EventProxy) SendLiveness(ctx context.Context) error {
+	return ep.kafkaClient.SendLiveness(ctx)
+}
+
+// Start the event proxy
+func (ep *EventProxy) Start() {
+	eq := ep.eventQueue
+	go eq.start(ep.queueCtx)
+	logger.Debugw(context.Background(), "event-proxy-starting...", log.Fields{"events-threashold": EVENT_THRESHOLD})
+	for {
+		// Notify the queue I am ready
+		eq.readyToSendToKafkaCh <- struct{}{}
+		// Wait for an event
+		elem, ok := <-eq.eventChannel
+		if !ok {
+			logger.Debug(context.Background(), "event-channel-closed-exiting")
+			break
+		}
+		// Check for last event
+		if _, ok := elem.(*lastEvent); ok {
+			// close the queuing loop
+			logger.Info(context.Background(), "received-last-event")
+			ep.queueCancelCtx()
+			break
+		}
+		ctx := context.Background()
+		event, ok := elem.(*voltha.Event)
+		if !ok {
+			logger.Warnw(ctx, "invalid-event", log.Fields{"element": elem})
+			continue
+		}
+		if err := ep.sendEvent(ctx, event); err != nil {
+			logger.Errorw(ctx, "failed-to-send-event-to-kafka-bus", log.Fields{"event": event})
+		} else {
+			logger.Debugw(ctx, "successfully-sent-rpc-event-to-kafka-bus", log.Fields{"id": event.Header.Id, "category": event.Header.Category,
+				"sub-category": event.Header.SubCategory, "type": event.Header.Type, "type-version": event.Header.TypeVersion,
+				"reported-ts": event.Header.ReportedTs, "event-type": event.EventType})
+		}
+	}
+}
+
+func (ep *EventProxy) Stop() {
+	ep.eventQueue.stop()
+}
+
+type EventQueue struct {
+	mutex                sync.RWMutex
+	eventChannel         chan interface{}
+	insertPosition       *ring.Ring
+	popPosition          *ring.Ring
+	dataToSendAvailable  chan struct{}
+	readyToSendToKafkaCh chan struct{}
+	eventQueueStopped    chan struct{}
+}
+
+func newEventQueue() *EventQueue {
+	ev := &EventQueue{
+		eventChannel:         make(chan interface{}),
+		insertPosition:       ring.New(EVENT_THRESHOLD),
+		dataToSendAvailable:  make(chan struct{}),
+		readyToSendToKafkaCh: make(chan struct{}),
+		eventQueueStopped:    make(chan struct{}),
+	}
+	ev.popPosition = ev.insertPosition
+	return ev
+}
+
+// push is invoked to push an event at the back of a queue
+func (eq *EventQueue) push(event interface{}) {
+	eq.mutex.Lock()
+
+	if eq.insertPosition != nil {
+		// Handle Queue is full.
+		// TODO: Current default is to overwrite old data if queue is full. Is there a need to
+		// block caller if max threshold is reached?
+		if eq.insertPosition.Value != nil && eq.insertPosition == eq.popPosition {
+			eq.popPosition = eq.popPosition.Next()
+		}
+
+		// Insert data and move pointer to next empty position
+		eq.insertPosition.Value = event
+		eq.insertPosition = eq.insertPosition.Next()
+
+		// Check for last event
+		if _, ok := event.(*lastEvent); ok {
+			eq.insertPosition = nil
+		}
+		eq.mutex.Unlock()
+		// Notify waiting thread of data availability
+		eq.dataToSendAvailable <- struct{}{}
+
+	} else {
+		logger.Debug(context.Background(), "event-queue-is-closed-as-insert-position-is-cleared")
+		eq.mutex.Unlock()
+	}
+}
+
+// start starts the routine that extracts an element from the event queue and
+// send it to the kafka sending routine to process.
+func (eq *EventQueue) start(ctx context.Context) {
+	logger.Info(ctx, "starting-event-queue")
+loop:
+	for {
+		select {
+		case <-eq.dataToSendAvailable:
+		//	Do nothing - use to prevent caller pushing data to block
+		case <-eq.readyToSendToKafkaCh:
+			{
+				// Kafka sending routine is ready to process an event
+				eq.mutex.Lock()
+				element := eq.popPosition.Value
+				if element == nil {
+					// No events to send. Wait
+					eq.mutex.Unlock()
+					select {
+					case _, ok := <-eq.dataToSendAvailable:
+						if !ok {
+							// channel closed
+							eq.eventQueueStopped <- struct{}{}
+							return
+						}
+					case <-ctx.Done():
+						logger.Info(ctx, "event-queue-context-done")
+						eq.eventQueueStopped <- struct{}{}
+						return
+					}
+					eq.mutex.Lock()
+					element = eq.popPosition.Value
+				}
+				eq.popPosition.Value = nil
+				eq.popPosition = eq.popPosition.Next()
+				eq.mutex.Unlock()
+				eq.eventChannel <- element
+			}
+		case <-ctx.Done():
+			logger.Info(ctx, "event-queue-context-done")
+			eq.eventQueueStopped <- struct{}{}
+			break loop
+		}
+	}
+	logger.Info(ctx, "event-queue-stopped")
+
+}
+
+func (eq *EventQueue) stop() {
+	// Flush all
+	eq.push(&lastEvent{})
+	<-eq.eventQueueStopped
+	eq.mutex.Lock()
+	close(eq.readyToSendToKafkaCh)
+	close(eq.dataToSendAvailable)
+	close(eq.eventChannel)
+	eq.mutex.Unlock()
+
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
new file mode 100644
index 0000000..fe3a017
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package events
+
+import (
+	"fmt"
+	"strconv"
+
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+type ContextType string
+
+const (
+	// ContextAdminState is for the admin state of the Device in the context of the event
+	ContextAdminState ContextType = "admin-state"
+	// ContextConnectState is for the connect state of the Device in the context of the event
+	ContextConnectState ContextType = "connect-state"
+	// ContextOperState is for the operational state of the Device in the context of the event
+	ContextOperState ContextType = "oper-state"
+	// ContextPrevdminState is for the previous admin state of the Device in the context of the event
+	ContextPrevAdminState ContextType = "prev-admin-state"
+	// ContextPrevConnectState is for the previous connect state of the Device in the context of the event
+	ContextPrevConnectState ContextType = "prev-connect-state"
+	// ContextPrevOperState is for the previous operational state of the Device in the context of the event
+	ContextPrevOperState ContextType = "prev-oper-state"
+	// ContextDeviceID is for the previous operational state of the Device in the context of the event
+	ContextDeviceID ContextType = "id"
+	// ContextParentID is for the parent id in the context of the event
+	ContextParentID ContextType = "parent-id"
+	// ContextSerialNumber is for the serial number of the Device in the context of the event
+	ContextSerialNumber ContextType = "serial-number"
+	// ContextIsRoot is for the root flag of Device in the context of the event
+	ContextIsRoot ContextType = "is-root"
+	// ContextParentPort is for the parent interface id of child in the context of the event
+	ContextParentPort ContextType = "parent-port"
+)
+
+type EventName string
+
+const (
+	DeviceStateChangeEvent EventName = "DEVICE_STATE_CHANGE"
+)
+
+type EventAction string
+
+const (
+	Raise EventAction = "RAISE_EVENT"
+	Clear EventAction = "CLEAR_EVENT"
+)
+
+//CreateDeviceStateChangeEvent forms and returns a new DeviceStateChange Event
+func CreateDeviceStateChangeEvent(serialNumber string, deviceID string, parentID string,
+	prevOperStatus voltha.OperStatus_Types, prevConnStatus voltha.ConnectStatus_Types, prevAdminStatus voltha.AdminState_Types,
+	operStatus voltha.OperStatus_Types, connStatus voltha.ConnectStatus_Types, adminStatus voltha.AdminState_Types,
+	parentPort uint32, isRoot bool) *voltha.DeviceEvent {
+
+	context := make(map[string]string)
+	/* Populating event context */
+	context[string(ContextSerialNumber)] = serialNumber
+	context[string(ContextDeviceID)] = deviceID
+	context[string(ContextParentID)] = parentID
+	context[string(ContextPrevOperState)] = prevOperStatus.String()
+	context[string(ContextPrevConnectState)] = prevConnStatus.String()
+	context[string(ContextPrevAdminState)] = prevAdminStatus.String()
+	context[string(ContextOperState)] = operStatus.String()
+	context[string(ContextConnectState)] = connStatus.String()
+	context[string(ContextAdminState)] = adminStatus.String()
+	context[string(ContextIsRoot)] = strconv.FormatBool(isRoot)
+	context[string(ContextParentPort)] = strconv.FormatUint(uint64(parentPort), 10)
+
+	return &voltha.DeviceEvent{
+		Context:         context,
+		ResourceId:      deviceID,
+		DeviceEventName: fmt.Sprintf("%s_%s", string(DeviceStateChangeEvent), string(Raise)),
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
new file mode 100644
index 0000000..beb0574
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
new file mode 100644
index 0000000..ff6aaf0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
@@ -0,0 +1,1608 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"bytes"
+	"context"
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"hash"
+	"sort"
+
+	"github.com/cevaris/ordered_map"
+	"github.com/gogo/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
+)
+
+var (
+	// Instructions shortcut
+	APPLY_ACTIONS  = ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS
+	WRITE_METADATA = ofp.OfpInstructionType_OFPIT_WRITE_METADATA
+	METER_ACTION   = ofp.OfpInstructionType_OFPIT_METER
+
+	//OFPAT_* shortcuts
+	OUTPUT       = ofp.OfpActionType_OFPAT_OUTPUT
+	COPY_TTL_OUT = ofp.OfpActionType_OFPAT_COPY_TTL_OUT
+	COPY_TTL_IN  = ofp.OfpActionType_OFPAT_COPY_TTL_IN
+	SET_MPLS_TTL = ofp.OfpActionType_OFPAT_SET_MPLS_TTL
+	DEC_MPLS_TTL = ofp.OfpActionType_OFPAT_DEC_MPLS_TTL
+	PUSH_VLAN    = ofp.OfpActionType_OFPAT_PUSH_VLAN
+	POP_VLAN     = ofp.OfpActionType_OFPAT_POP_VLAN
+	PUSH_MPLS    = ofp.OfpActionType_OFPAT_PUSH_MPLS
+	POP_MPLS     = ofp.OfpActionType_OFPAT_POP_MPLS
+	SET_QUEUE    = ofp.OfpActionType_OFPAT_SET_QUEUE
+	GROUP        = ofp.OfpActionType_OFPAT_GROUP
+	SET_NW_TTL   = ofp.OfpActionType_OFPAT_SET_NW_TTL
+	NW_TTL       = ofp.OfpActionType_OFPAT_DEC_NW_TTL
+	SET_FIELD    = ofp.OfpActionType_OFPAT_SET_FIELD
+	PUSH_PBB     = ofp.OfpActionType_OFPAT_PUSH_PBB
+	POP_PBB      = ofp.OfpActionType_OFPAT_POP_PBB
+	EXPERIMENTER = ofp.OfpActionType_OFPAT_EXPERIMENTER
+
+	//OFPXMT_OFB_* shortcuts (incomplete)
+	IN_PORT         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+	IN_PHY_PORT     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT
+	METADATA        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_METADATA
+	ETH_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST
+	ETH_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC
+	ETH_TYPE        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE
+	VLAN_VID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID
+	VLAN_PCP        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP
+	IP_DSCP         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP
+	IP_ECN          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN
+	IP_PROTO        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO
+	IPV4_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC
+	IPV4_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST
+	TCP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC
+	TCP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST
+	UDP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC
+	UDP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST
+	SCTP_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC
+	SCTP_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST
+	ICMPV4_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE
+	ICMPV4_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE
+	ARP_OP          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP
+	ARP_SPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA
+	ARP_TPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA
+	ARP_SHA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA
+	ARP_THA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA
+	IPV6_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC
+	IPV6_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST
+	IPV6_FLABEL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL
+	ICMPV6_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE
+	ICMPV6_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE
+	IPV6_ND_TARGET  = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET
+	OFB_IPV6_ND_SLL = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL
+	IPV6_ND_TLL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL
+	MPLS_LABEL      = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL
+	MPLS_TC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC
+	MPLS_BOS        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS
+	PBB_ISID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID
+	TUNNEL_ID       = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID
+	IPV6_EXTHDR     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR
+)
+
+//ofp_action_* shortcuts
+
+func Output(port uint32, maxLen ...ofp.OfpControllerMaxLen) *ofp.OfpAction {
+	maxLength := ofp.OfpControllerMaxLen_OFPCML_MAX
+	if len(maxLen) > 0 {
+		maxLength = maxLen[0]
+	}
+	return &ofp.OfpAction{Type: OUTPUT, Action: &ofp.OfpAction_Output{Output: &ofp.OfpActionOutput{Port: port, MaxLen: uint32(maxLength)}}}
+}
+
+func MplsTtl(ttl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: SET_MPLS_TTL, Action: &ofp.OfpAction_MplsTtl{MplsTtl: &ofp.OfpActionMplsTtl{MplsTtl: ttl}}}
+}
+
+func PushVlan(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: PUSH_VLAN, Action: &ofp.OfpAction_Push{Push: &ofp.OfpActionPush{Ethertype: ethType}}}
+}
+
+func PopVlan() *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_VLAN}
+}
+
+func PopMpls(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_MPLS, Action: &ofp.OfpAction_PopMpls{PopMpls: &ofp.OfpActionPopMpls{Ethertype: ethType}}}
+}
+
+func Group(groupId uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: GROUP, Action: &ofp.OfpAction_Group{Group: &ofp.OfpActionGroup{GroupId: groupId}}}
+}
+
+func NwTtl(nwTtl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: NW_TTL, Action: &ofp.OfpAction_NwTtl{NwTtl: &ofp.OfpActionNwTtl{NwTtl: nwTtl}}}
+}
+
+func SetField(field *ofp.OfpOxmOfbField) *ofp.OfpAction {
+	actionSetField := &ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: &ofp.OfpOxmField_OfbField{OfbField: field}}
+	return &ofp.OfpAction{Type: SET_FIELD, Action: &ofp.OfpAction_SetField{SetField: &ofp.OfpActionSetField{Field: actionSetField}}}
+}
+
+func Experimenter(experimenter uint32, data []byte) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: EXPERIMENTER, Action: &ofp.OfpAction_Experimenter{Experimenter: &ofp.OfpActionExperimenter{Experimenter: experimenter, Data: data}}}
+}
+
+//ofb_field generators (incomplete set)
+
+func InPort(inPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPort}}
+}
+
+func InPhyPort(inPhyPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PHY_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPhyPort}}
+}
+
+func Metadata_ofp(tableMetadata uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: METADATA, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: tableMetadata}}
+}
+
+// should Metadata_ofp used here ?????
+func EthDst(ethDst uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_DST, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethDst}}
+}
+
+// should Metadata_ofp used here ?????
+func EthSrc(ethSrc uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_SRC, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethSrc}}
+}
+
+func EthType(ethType uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_TYPE, Value: &ofp.OfpOxmOfbField_EthType{EthType: ethType}}
+}
+
+func VlanVid(vlanVid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_VID, Value: &ofp.OfpOxmOfbField_VlanVid{VlanVid: vlanVid}}
+}
+
+func VlanPcp(vlanPcp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_PCP, Value: &ofp.OfpOxmOfbField_VlanPcp{VlanPcp: vlanPcp}}
+}
+
+func IpDscp(ipDscp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_DSCP, Value: &ofp.OfpOxmOfbField_IpDscp{IpDscp: ipDscp}}
+}
+
+func IpEcn(ipEcn uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_ECN, Value: &ofp.OfpOxmOfbField_IpEcn{IpEcn: ipEcn}}
+}
+
+func IpProto(ipProto uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_PROTO, Value: &ofp.OfpOxmOfbField_IpProto{IpProto: ipProto}}
+}
+
+func Ipv4Src(ipv4Src uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_SRC, Value: &ofp.OfpOxmOfbField_Ipv4Src{Ipv4Src: ipv4Src}}
+}
+
+func Ipv4Dst(ipv4Dst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_DST, Value: &ofp.OfpOxmOfbField_Ipv4Dst{Ipv4Dst: ipv4Dst}}
+}
+
+func TcpSrc(tcpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_SRC, Value: &ofp.OfpOxmOfbField_TcpSrc{TcpSrc: tcpSrc}}
+}
+
+func TcpDst(tcpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_DST, Value: &ofp.OfpOxmOfbField_TcpDst{TcpDst: tcpDst}}
+}
+
+func UdpSrc(udpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_SRC, Value: &ofp.OfpOxmOfbField_UdpSrc{UdpSrc: udpSrc}}
+}
+
+func UdpDst(udpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_DST, Value: &ofp.OfpOxmOfbField_UdpDst{UdpDst: udpDst}}
+}
+
+func SctpSrc(sctpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_SRC, Value: &ofp.OfpOxmOfbField_SctpSrc{SctpSrc: sctpSrc}}
+}
+
+func SctpDst(sctpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_DST, Value: &ofp.OfpOxmOfbField_SctpDst{SctpDst: sctpDst}}
+}
+
+func Icmpv4Type(icmpv4Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv4Type{Icmpv4Type: icmpv4Type}}
+}
+
+func Icmpv4Code(icmpv4Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_CODE, Value: &ofp.OfpOxmOfbField_Icmpv4Code{Icmpv4Code: icmpv4Code}}
+}
+
+func ArpOp(arpOp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_OP, Value: &ofp.OfpOxmOfbField_ArpOp{ArpOp: arpOp}}
+}
+
+func ArpSpa(arpSpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SPA, Value: &ofp.OfpOxmOfbField_ArpSpa{ArpSpa: arpSpa}}
+}
+
+func ArpTpa(arpTpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_TPA, Value: &ofp.OfpOxmOfbField_ArpTpa{ArpTpa: arpTpa}}
+}
+
+func ArpSha(arpSha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SHA, Value: &ofp.OfpOxmOfbField_ArpSha{ArpSha: arpSha}}
+}
+
+func ArpTha(arpTha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_THA, Value: &ofp.OfpOxmOfbField_ArpTha{ArpTha: arpTha}}
+}
+
+func Ipv6Src(ipv6Src []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_SRC, Value: &ofp.OfpOxmOfbField_Ipv6Src{Ipv6Src: ipv6Src}}
+}
+
+func Ipv6Dst(ipv6Dst []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_DST, Value: &ofp.OfpOxmOfbField_Ipv6Dst{Ipv6Dst: ipv6Dst}}
+}
+
+func Ipv6Flabel(ipv6Flabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_FLABEL, Value: &ofp.OfpOxmOfbField_Ipv6Flabel{Ipv6Flabel: ipv6Flabel}}
+}
+
+func Icmpv6Type(icmpv6Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv6Type{Icmpv6Type: icmpv6Type}}
+}
+
+func Icmpv6Code(icmpv6Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_CODE, Value: &ofp.OfpOxmOfbField_Icmpv6Code{Icmpv6Code: icmpv6Code}}
+}
+
+func Ipv6NdTarget(ipv6NdTarget []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TARGET, Value: &ofp.OfpOxmOfbField_Ipv6NdTarget{Ipv6NdTarget: ipv6NdTarget}}
+}
+
+func OfbIpv6NdSll(ofbIpv6NdSll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: OFB_IPV6_ND_SLL, Value: &ofp.OfpOxmOfbField_Ipv6NdSsl{Ipv6NdSsl: ofbIpv6NdSll}}
+}
+
+func Ipv6NdTll(ipv6NdTll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TLL, Value: &ofp.OfpOxmOfbField_Ipv6NdTll{Ipv6NdTll: ipv6NdTll}}
+}
+
+func MplsLabel(mplsLabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_LABEL, Value: &ofp.OfpOxmOfbField_MplsLabel{MplsLabel: mplsLabel}}
+}
+
+func MplsTc(mplsTc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_TC, Value: &ofp.OfpOxmOfbField_MplsTc{MplsTc: mplsTc}}
+}
+
+func MplsBos(mplsBos uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_BOS, Value: &ofp.OfpOxmOfbField_MplsBos{MplsBos: mplsBos}}
+}
+
+func PbbIsid(pbbIsid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: PBB_ISID, Value: &ofp.OfpOxmOfbField_PbbIsid{PbbIsid: pbbIsid}}
+}
+
+func TunnelId(tunnelId uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TUNNEL_ID, Value: &ofp.OfpOxmOfbField_TunnelId{TunnelId: tunnelId}}
+}
+
+func Ipv6Exthdr(ipv6Exthdr uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_EXTHDR, Value: &ofp.OfpOxmOfbField_Ipv6Exthdr{Ipv6Exthdr: ipv6Exthdr}}
+}
+
+//frequently used extractors
+
+func excludeAction(action *ofp.OfpAction, exclude ...ofp.OfpActionType) bool {
+	for _, actionToExclude := range exclude {
+		if action.Type == actionToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetActions(flow *ofp.OfpFlowStats, exclude ...ofp.OfpActionType) []*ofp.OfpAction {
+	if flow == nil {
+		return nil
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			if len(exclude) == 0 {
+				return instActions.Actions
+			} else {
+				filteredAction := make([]*ofp.OfpAction, 0)
+				for _, action := range instActions.Actions {
+					if !excludeAction(action, exclude...) {
+						filteredAction = append(filteredAction, action)
+					}
+				}
+				return filteredAction
+			}
+		}
+	}
+	return nil
+}
+
+func UpdateOutputPortByActionType(flow *ofp.OfpFlowStats, actionType uint32, toPort uint32) *ofp.OfpFlowStats {
+	if flow == nil {
+		return nil
+	}
+	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	nFlow.Instructions = nil
+	nInsts := make([]*ofp.OfpInstruction, 0)
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == actionType {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			nActions := make([]*ofp.OfpAction, 0)
+			for _, action := range instActions.Actions {
+				if action.GetOutput() != nil {
+					nActions = append(nActions, Output(toPort))
+				} else {
+					nActions = append(nActions, action)
+				}
+			}
+			instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: nActions}}
+			nInsts = append(nInsts, &ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction})
+		} else {
+			nInsts = append(nInsts, instruction)
+		}
+	}
+	nFlow.Instructions = nInsts
+	return nFlow
+}
+
+func excludeOxmOfbField(field *ofp.OfpOxmOfbField, exclude ...ofp.OxmOfbFieldTypes) bool {
+	for _, fieldToExclude := range exclude {
+		if field.Type == fieldToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetOfbFields(flow *ofp.OfpFlowStats, exclude ...ofp.OxmOfbFieldTypes) []*ofp.OfpOxmOfbField {
+	if flow == nil || flow.Match == nil || flow.Match.Type != ofp.OfpMatchType_OFPMT_OXM {
+		return nil
+	}
+	ofbFields := make([]*ofp.OfpOxmOfbField, 0)
+	for _, field := range flow.Match.OxmFields {
+		if field.OxmClass == ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
+			ofbFields = append(ofbFields, field.GetOfbField())
+		}
+	}
+	if len(exclude) == 0 {
+		return ofbFields
+	} else {
+		filteredFields := make([]*ofp.OfpOxmOfbField, 0)
+		for _, ofbField := range ofbFields {
+			if !excludeOxmOfbField(ofbField, exclude...) {
+				filteredFields = append(filteredFields, ofbField)
+			}
+		}
+		return filteredFields
+	}
+}
+
+func GetPacketOutPort(packet *ofp.OfpPacketOut) uint32 {
+	if packet == nil {
+		return 0
+	}
+	for _, action := range packet.GetActions() {
+		if action.Type == OUTPUT {
+			return action.GetOutput().Port
+		}
+	}
+	return 0
+}
+
+func GetOutPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == OUTPUT {
+			out := action.GetOutput()
+			if out == nil {
+				return 0
+			}
+			return out.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetInPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == IN_PORT {
+			return field.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetGotoTableId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE) {
+			gotoTable := instruction.GetGotoTable()
+			if gotoTable == nil {
+				return 0
+			}
+			return gotoTable.GetTableId()
+		}
+	}
+	return 0
+}
+
+func GetMeterId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_METER) {
+			MeterInstruction := instruction.GetMeter()
+			if MeterInstruction == nil {
+				return 0
+			}
+			return MeterInstruction.GetMeterId()
+		}
+	}
+	return 0
+}
+
+func GetVlanVid(flow *ofp.OfpFlowStats) *uint32 {
+	if flow == nil {
+		return nil
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == VLAN_VID {
+			ret := field.GetVlanVid()
+			return &ret
+		}
+	}
+	// Dont return 0 if the field is missing as vlan id value 0 has meaning and cannot be overloaded as "not found"
+	return nil
+}
+
+func GetSetActionField(ctx context.Context, flow *ofp.OfpFlowStats, ofbType ofp.OxmOfbFieldTypes) (uint32, bool) {
+	if flow == nil {
+		return 0, false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(APPLY_ACTIONS) {
+			actions := instruction.GetActions()
+			for _, action := range actions.GetActions() {
+				if action.Type == SET_FIELD {
+					setField := action.GetSetField()
+					if setField.Field.GetOfbField().Type == ofbType {
+						switch ofbType {
+						case VLAN_PCP:
+							return setField.Field.GetOfbField().GetVlanPcp(), true
+						case VLAN_VID:
+							return setField.Field.GetOfbField().GetVlanVid(), true
+						default:
+							logger.Errorw(ctx, "unsupported-ofb-field-type", log.Fields{"ofbType": ofbType})
+							return 0, false
+						}
+					}
+				}
+			}
+			return 0, false
+		}
+	}
+	return 0, false
+}
+
+func GetTunnelId(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == TUNNEL_ID {
+			return field.GetTunnelId()
+		}
+	}
+	return 0
+}
+
+//GetMetaData - legacy get method (only want lower 32 bits)
+func GetMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
+		}
+	}
+	logger.Debug(ctx, "No-metadata-present")
+	return 0
+}
+
+func GetMetaData64Bit(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return field.GetTableMetadata()
+		}
+	}
+	logger.Debug(ctx, "No-metadata-present")
+	return 0
+}
+
+// function returns write metadata value from write_metadata action field
+func GetMetadataFromWriteMetadataAction(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(WRITE_METADATA) {
+				if writeMetadata := instruction.GetWriteMetadata(); writeMetadata != nil {
+					return writeMetadata.GetMetadata()
+				}
+			}
+		}
+	}
+	logger.Debugw(ctx, "No-write-metadata-present", log.Fields{"flow": flow})
+	return 0
+}
+
+func GetTechProfileIDFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
+	/*
+	   Write metadata instruction value (metadata) is 8 bytes:
+	   MS 2 bytes: C Tag
+	   Next 2 bytes: Technology Profile Id
+	   Next 4 bytes: Port number (uni or nni)
+
+	   This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var tpId uint16 = 0
+	logger.Debugw(ctx, "Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	if metadata != 0 {
+		tpId = uint16((metadata >> 32) & 0xFFFF)
+		logger.Debugw(ctx, "Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+	}
+	return tpId
+}
+
+func GetEgressPortNumberFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var uniPort uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	if md != 0 {
+		uniPort = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+	}
+	return uniPort
+
+}
+
+func GetInnerTagFromMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint16 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var innerTag uint16 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	if md != 0 {
+		innerTag = uint16((md >> 48) & 0xFFFF)
+		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+	}
+	return innerTag
+}
+
+//GetInnerTagFromMetaData retrieves the inner tag from the Metadata_ofp. The port number (UNI on ONU) is in the
+// lower 32-bits of Metadata_ofp and the inner_tag is in the upper 32-bits. This is set in the ONOS OltPipeline as
+//// a Metadata_ofp field
+/*func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint64 {
+	md := GetMetaData64Bit(flow)
+	if md == 0 {
+		return 0
+	}
+	if md <= 0xffffffff {
+		logger.Debugw(ctx, "onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		return md
+	}
+	return (md >> 32) & 0xffffffff
+}*/
+
+// Extract the child device port from a flow that contains the parent device peer port.  Typically the UNI port of an
+// ONU child device.  Per TST agreement this will be the lower 32 bits of tunnel id reserving upper 32 bits for later
+// use
+func GetChildPortFromTunnelId(flow *ofp.OfpFlowStats) uint32 {
+	tid := GetTunnelId(flow)
+	if tid == 0 {
+		return 0
+	}
+	// Per TST agreement we are keeping any child port id (uni port id) in the lower 32 bits
+	return uint32(tid & 0xffffffff)
+}
+
+func HasNextTable(flow *ofp.OfpFlowStats) bool {
+	if flow == nil {
+		return false
+	}
+	return GetGotoTableId(flow) != 0
+}
+
+func GetGroup(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == GROUP {
+			grp := action.GetGroup()
+			if grp == nil {
+				return 0
+			}
+			return grp.GetGroupId()
+		}
+	}
+	return 0
+}
+
+func HasGroup(flow *ofp.OfpFlowStats) bool {
+	return GetGroup(flow) != 0
+}
+
+// GetNextTableId returns the next table ID if the "table_id" is present in the map, otherwise return nil
+func GetNextTableId(kw OfpFlowModArgs) *uint32 {
+	if val, exist := kw["table_id"]; exist {
+		ret := uint32(val)
+		return &ret
+	}
+	return nil
+}
+
+// GetMeterIdFlowModArgs returns the meterId if the "meter_id" is present in the map, otherwise return 0
+func GetMeterIdFlowModArgs(kw OfpFlowModArgs) uint32 {
+	if val, exist := kw["meter_id"]; exist {
+		return uint32(val)
+	}
+	return 0
+}
+
+// Function returns the metadata if the "write_metadata" is present in the map, otherwise return nil
+func GetMetadataFlowModArgs(kw OfpFlowModArgs) uint64 {
+	if val, exist := kw["write_metadata"]; exist {
+		ret := uint64(val)
+		return ret
+	}
+	return 0
+}
+
+// HashFlowStats returns a unique 64-bit integer hash of 'table_id', 'priority', and 'match'
+// The OF spec states that:
+// A flow table entry is identified by its match fields and priority: the match fields
+// and priority taken together identify a unique flow entry in the flow table.
+func HashFlowStats(flow *ofp.OfpFlowStats) (uint64, error) {
+	// first we need to make sure the oxm fields are in a predictable order (the specific order doesn't matter)
+	sort.Slice(flow.Match.OxmFields, func(a, b int) bool {
+		fieldsA, fieldsB := flow.Match.OxmFields[a], flow.Match.OxmFields[b]
+		if fieldsA.OxmClass < fieldsB.OxmClass {
+			return true
+		}
+		switch fieldA := fieldsA.Field.(type) {
+		case *ofp.OfpOxmField_OfbField:
+			switch fieldB := fieldsB.Field.(type) {
+			case *ofp.OfpOxmField_ExperimenterField:
+				return true // ofp < experimenter
+			case *ofp.OfpOxmField_OfbField:
+				return fieldA.OfbField.Type < fieldB.OfbField.Type
+			}
+		case *ofp.OfpOxmField_ExperimenterField:
+			switch fieldB := fieldsB.Field.(type) {
+			case *ofp.OfpOxmField_OfbField:
+				return false // ofp < experimenter
+			case *ofp.OfpOxmField_ExperimenterField:
+				eFieldA, eFieldB := fieldA.ExperimenterField, fieldB.ExperimenterField
+				if eFieldA.Experimenter != eFieldB.Experimenter {
+					return eFieldA.Experimenter < eFieldB.Experimenter
+				}
+				return eFieldA.OxmHeader < eFieldB.OxmHeader
+			}
+		}
+		return false
+	})
+
+	md5Hash := md5.New() // note that write errors will never occur with md5 hashing
+	var tmp [12]byte
+
+	binary.BigEndian.PutUint32(tmp[0:4], flow.TableId)             // tableId
+	binary.BigEndian.PutUint32(tmp[4:8], flow.Priority)            // priority
+	binary.BigEndian.PutUint32(tmp[8:12], uint32(flow.Match.Type)) // match type
+	_, _ = md5Hash.Write(tmp[:12])
+
+	for _, field := range flow.Match.OxmFields { // for all match fields
+		binary.BigEndian.PutUint32(tmp[:4], uint32(field.OxmClass)) // match class
+		_, _ = md5Hash.Write(tmp[:4])
+
+		switch oxmField := field.Field.(type) {
+		case *ofp.OfpOxmField_ExperimenterField:
+			binary.BigEndian.PutUint32(tmp[0:4], oxmField.ExperimenterField.Experimenter)
+			binary.BigEndian.PutUint32(tmp[4:8], oxmField.ExperimenterField.OxmHeader)
+			_, _ = md5Hash.Write(tmp[:8])
+
+		case *ofp.OfpOxmField_OfbField:
+			if err := hashWriteOfbField(md5Hash, oxmField.OfbField); err != nil {
+				return 0, err
+			}
+
+		default:
+			return 0, fmt.Errorf("unknown OfpOxmField type: %T", field.Field)
+		}
+	}
+
+	ret := md5Hash.Sum(nil)
+	return binary.BigEndian.Uint64(ret[0:8]), nil
+}
+
+func hashWriteOfbField(md5Hash hash.Hash, field *ofp.OfpOxmOfbField) error {
+	var tmp [8]byte
+	binary.BigEndian.PutUint32(tmp[:4], uint32(field.Type)) // type
+	_, _ = md5Hash.Write(tmp[:4])
+
+	// value
+	valType, val32, val64, valSlice := uint8(0), uint32(0), uint64(0), []byte(nil)
+	switch val := field.Value.(type) {
+	case *ofp.OfpOxmOfbField_Port:
+		valType, val32 = 4, val.Port
+	case *ofp.OfpOxmOfbField_PhysicalPort:
+		valType, val32 = 4, val.PhysicalPort
+	case *ofp.OfpOxmOfbField_TableMetadata:
+		valType, val64 = 8, val.TableMetadata
+	case *ofp.OfpOxmOfbField_EthDst:
+		valType, valSlice = 1, val.EthDst
+	case *ofp.OfpOxmOfbField_EthSrc:
+		valType, valSlice = 1, val.EthSrc
+	case *ofp.OfpOxmOfbField_EthType:
+		valType, val32 = 4, val.EthType
+	case *ofp.OfpOxmOfbField_VlanVid:
+		valType, val32 = 4, val.VlanVid
+	case *ofp.OfpOxmOfbField_VlanPcp:
+		valType, val32 = 4, val.VlanPcp
+	case *ofp.OfpOxmOfbField_IpDscp:
+		valType, val32 = 4, val.IpDscp
+	case *ofp.OfpOxmOfbField_IpEcn:
+		valType, val32 = 4, val.IpEcn
+	case *ofp.OfpOxmOfbField_IpProto:
+		valType, val32 = 4, val.IpProto
+	case *ofp.OfpOxmOfbField_Ipv4Src:
+		valType, val32 = 4, val.Ipv4Src
+	case *ofp.OfpOxmOfbField_Ipv4Dst:
+		valType, val32 = 4, val.Ipv4Dst
+	case *ofp.OfpOxmOfbField_TcpSrc:
+		valType, val32 = 4, val.TcpSrc
+	case *ofp.OfpOxmOfbField_TcpDst:
+		valType, val32 = 4, val.TcpDst
+	case *ofp.OfpOxmOfbField_UdpSrc:
+		valType, val32 = 4, val.UdpSrc
+	case *ofp.OfpOxmOfbField_UdpDst:
+		valType, val32 = 4, val.UdpDst
+	case *ofp.OfpOxmOfbField_SctpSrc:
+		valType, val32 = 4, val.SctpSrc
+	case *ofp.OfpOxmOfbField_SctpDst:
+		valType, val32 = 4, val.SctpDst
+	case *ofp.OfpOxmOfbField_Icmpv4Type:
+		valType, val32 = 4, val.Icmpv4Type
+	case *ofp.OfpOxmOfbField_Icmpv4Code:
+		valType, val32 = 4, val.Icmpv4Code
+	case *ofp.OfpOxmOfbField_ArpOp:
+		valType, val32 = 4, val.ArpOp
+	case *ofp.OfpOxmOfbField_ArpSpa:
+		valType, val32 = 4, val.ArpSpa
+	case *ofp.OfpOxmOfbField_ArpTpa:
+		valType, val32 = 4, val.ArpTpa
+	case *ofp.OfpOxmOfbField_ArpSha:
+		valType, valSlice = 1, val.ArpSha
+	case *ofp.OfpOxmOfbField_ArpTha:
+		valType, valSlice = 1, val.ArpTha
+	case *ofp.OfpOxmOfbField_Ipv6Src:
+		valType, valSlice = 1, val.Ipv6Src
+	case *ofp.OfpOxmOfbField_Ipv6Dst:
+		valType, valSlice = 1, val.Ipv6Dst
+	case *ofp.OfpOxmOfbField_Ipv6Flabel:
+		valType, val32 = 4, val.Ipv6Flabel
+	case *ofp.OfpOxmOfbField_Icmpv6Type:
+		valType, val32 = 4, val.Icmpv6Type
+	case *ofp.OfpOxmOfbField_Icmpv6Code:
+		valType, val32 = 4, val.Icmpv6Code
+	case *ofp.OfpOxmOfbField_Ipv6NdTarget:
+		valType, valSlice = 1, val.Ipv6NdTarget
+	case *ofp.OfpOxmOfbField_Ipv6NdSsl:
+		valType, valSlice = 1, val.Ipv6NdSsl
+	case *ofp.OfpOxmOfbField_Ipv6NdTll:
+		valType, valSlice = 1, val.Ipv6NdTll
+	case *ofp.OfpOxmOfbField_MplsLabel:
+		valType, val32 = 4, val.MplsLabel
+	case *ofp.OfpOxmOfbField_MplsTc:
+		valType, val32 = 4, val.MplsTc
+	case *ofp.OfpOxmOfbField_MplsBos:
+		valType, val32 = 4, val.MplsBos
+	case *ofp.OfpOxmOfbField_PbbIsid:
+		valType, val32 = 4, val.PbbIsid
+	case *ofp.OfpOxmOfbField_TunnelId:
+		valType, val64 = 8, val.TunnelId
+	case *ofp.OfpOxmOfbField_Ipv6Exthdr:
+		valType, val32 = 4, val.Ipv6Exthdr
+	default:
+		return fmt.Errorf("unknown OfpOxmField value type: %T", val)
+	}
+	switch valType {
+	case 1: // slice
+		_, _ = md5Hash.Write(valSlice)
+	case 4: // uint32
+		binary.BigEndian.PutUint32(tmp[:4], val32)
+		_, _ = md5Hash.Write(tmp[:4])
+	case 8: // uint64
+		binary.BigEndian.PutUint64(tmp[:8], val64)
+		_, _ = md5Hash.Write(tmp[:8])
+	}
+
+	// mask
+	if !field.HasMask {
+		tmp[0] = 0x00
+		_, _ = md5Hash.Write(tmp[:1]) // match hasMask = false
+	} else {
+		tmp[0] = 0x01
+		_, _ = md5Hash.Write(tmp[:1]) // match hasMask = true
+
+		maskType, mask32, mask64, maskSlice := uint8(0), uint32(0), uint64(0), []byte(nil)
+		switch mask := field.Mask.(type) {
+		case *ofp.OfpOxmOfbField_TableMetadataMask:
+			maskType, mask64 = 8, mask.TableMetadataMask
+		case *ofp.OfpOxmOfbField_EthDstMask:
+			maskType, maskSlice = 1, mask.EthDstMask
+		case *ofp.OfpOxmOfbField_EthSrcMask:
+			maskType, maskSlice = 1, mask.EthSrcMask
+		case *ofp.OfpOxmOfbField_VlanVidMask:
+			maskType, mask32 = 4, mask.VlanVidMask
+		case *ofp.OfpOxmOfbField_Ipv4SrcMask:
+			maskType, mask32 = 4, mask.Ipv4SrcMask
+		case *ofp.OfpOxmOfbField_Ipv4DstMask:
+			maskType, mask32 = 4, mask.Ipv4DstMask
+		case *ofp.OfpOxmOfbField_ArpSpaMask:
+			maskType, mask32 = 4, mask.ArpSpaMask
+		case *ofp.OfpOxmOfbField_ArpTpaMask:
+			maskType, mask32 = 4, mask.ArpTpaMask
+		case *ofp.OfpOxmOfbField_Ipv6SrcMask:
+			maskType, maskSlice = 1, mask.Ipv6SrcMask
+		case *ofp.OfpOxmOfbField_Ipv6DstMask:
+			maskType, maskSlice = 1, mask.Ipv6DstMask
+		case *ofp.OfpOxmOfbField_Ipv6FlabelMask:
+			maskType, mask32 = 4, mask.Ipv6FlabelMask
+		case *ofp.OfpOxmOfbField_PbbIsidMask:
+			maskType, mask32 = 4, mask.PbbIsidMask
+		case *ofp.OfpOxmOfbField_TunnelIdMask:
+			maskType, mask64 = 8, mask.TunnelIdMask
+		case *ofp.OfpOxmOfbField_Ipv6ExthdrMask:
+			maskType, mask32 = 4, mask.Ipv6ExthdrMask
+		case nil:
+			return fmt.Errorf("hasMask set to true, but no mask present")
+		default:
+			return fmt.Errorf("unknown OfpOxmField mask type: %T", mask)
+		}
+		switch maskType {
+		case 1: // slice
+			_, _ = md5Hash.Write(maskSlice)
+		case 4: // uint32
+			binary.BigEndian.PutUint32(tmp[:4], mask32)
+			_, _ = md5Hash.Write(tmp[:4])
+		case 8: // uint64
+			binary.BigEndian.PutUint64(tmp[:8], mask64)
+			_, _ = md5Hash.Write(tmp[:8])
+		}
+	}
+	return nil
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func FlowStatsEntryFromFlowModMessage(mod *ofp.OfpFlowMod) (*ofp.OfpFlowStats, error) {
+	flow := &ofp.OfpFlowStats{}
+	if mod == nil {
+		return flow, nil
+	}
+	flow.TableId = mod.TableId
+	flow.Priority = mod.Priority
+	flow.IdleTimeout = mod.IdleTimeout
+	flow.HardTimeout = mod.HardTimeout
+	flow.Flags = mod.Flags
+	flow.Cookie = mod.Cookie
+	flow.Match = mod.Match
+	flow.Instructions = mod.Instructions
+	var err error
+	if flow.Id, err = HashFlowStats(flow); err != nil {
+		return nil, err
+	}
+
+	return flow, nil
+}
+
+func GroupEntryFromGroupMod(mod *ofp.OfpGroupMod) *ofp.OfpGroupEntry {
+	group := &ofp.OfpGroupEntry{}
+	if mod == nil {
+		return group
+	}
+	group.Desc = &ofp.OfpGroupDesc{Type: mod.Type, GroupId: mod.GroupId, Buckets: mod.Buckets}
+	group.Stats = &ofp.OfpGroupStats{GroupId: mod.GroupId}
+	//TODO do we need to instantiate bucket bins?
+	return group
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func MeterEntryFromMeterMod(ctx context.Context, meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+	bandStats := make([]*ofp.OfpMeterBandStats, 0)
+	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
+		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
+	if meterMod == nil {
+		logger.Error(ctx, "Invalid meter mod command")
+		return meter
+	}
+	// config init
+	meter.Config.MeterId = meterMod.MeterId
+	meter.Config.Flags = meterMod.Flags
+	meter.Config.Bands = meterMod.Bands
+	// meter stats init
+	meter.Stats.MeterId = meterMod.MeterId
+	meter.Stats.FlowCount = 0
+	meter.Stats.PacketInCount = 0
+	meter.Stats.ByteInCount = 0
+	meter.Stats.DurationSec = 0
+	meter.Stats.DurationNsec = 0
+	// band stats init
+	for range meterMod.Bands {
+		band := &ofp.OfpMeterBandStats{}
+		band.PacketBandCount = 0
+		band.ByteBandCount = 0
+		bandStats = append(bandStats, band)
+	}
+	meter.Stats.BandStats = bandStats
+	logger.Debugw(ctx, "Allocated meter entry", log.Fields{"meter": *meter})
+	return meter
+
+}
+
+func GetMeterIdFromFlow(flow *ofp.OfpFlowStats) uint32 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					return meterInst.GetMeterId()
+				}
+			}
+		}
+	}
+
+	return uint32(0)
+}
+
+func MkOxmFields(matchFields []ofp.OfpOxmField) []*ofp.OfpOxmField {
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	return oxmFields
+}
+
+func MkInstructionsFromActions(actions []*ofp.OfpAction) []*ofp.OfpInstruction {
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+	return instructions
+}
+
+// Convenience function to generare ofp_flow_mod message with OXM BASIC match composed from the match_fields, and
+// single APPLY_ACTIONS instruction with a list if ofp_action objects.
+func MkSimpleFlowMod(matchFields []*ofp.OfpOxmField, actions []*ofp.OfpAction, command *ofp.OfpFlowModCommand, kw OfpFlowModArgs) *ofp.OfpFlowMod {
+
+	// Process actions instructions
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+
+	// Process next table
+	if tableId := GetNextTableId(kw); tableId != nil {
+		var instGotoTable ofp.OfpInstruction_GotoTable
+		instGotoTable.GotoTable = &ofp.OfpInstructionGotoTable{TableId: *tableId}
+		inst := ofp.OfpInstruction{Type: uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE), Data: &instGotoTable}
+		instructions = append(instructions, &inst)
+	}
+	// Process meter action
+	if meterId := GetMeterIdFlowModArgs(kw); meterId != 0 {
+		var instMeter ofp.OfpInstruction_Meter
+		instMeter.Meter = &ofp.OfpInstructionMeter{MeterId: meterId}
+		inst := ofp.OfpInstruction{Type: uint32(METER_ACTION), Data: &instMeter}
+		instructions = append(instructions, &inst)
+	}
+	//process write_metadata action
+	if metadata := GetMetadataFlowModArgs(kw); metadata != 0 {
+		var instWriteMetadata ofp.OfpInstruction_WriteMetadata
+		instWriteMetadata.WriteMetadata = &ofp.OfpInstructionWriteMetadata{Metadata: metadata}
+		inst := ofp.OfpInstruction{Type: uint32(WRITE_METADATA), Data: &instWriteMetadata}
+		instructions = append(instructions, &inst)
+	}
+
+	// Process match fields
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	var match ofp.OfpMatch
+	match.Type = ofp.OfpMatchType_OFPMT_OXM
+	match.OxmFields = oxmFields
+
+	// Create ofp_flow_message
+	msg := &ofp.OfpFlowMod{}
+	if command == nil {
+		msg.Command = ofp.OfpFlowModCommand_OFPFC_ADD
+	} else {
+		msg.Command = *command
+	}
+	msg.Instructions = instructions
+	msg.Match = &match
+
+	// Set the variadic argument values
+	msg = setVariadicModAttributes(msg, kw)
+
+	return msg
+}
+
+func MkMulticastGroupMod(groupId uint32, buckets []*ofp.OfpBucket, command *ofp.OfpGroupModCommand) *ofp.OfpGroupMod {
+	group := &ofp.OfpGroupMod{}
+	if command == nil {
+		group.Command = ofp.OfpGroupModCommand_OFPGC_ADD
+	} else {
+		group.Command = *command
+	}
+	group.Type = ofp.OfpGroupType_OFPGT_ALL
+	group.GroupId = groupId
+	group.Buckets = buckets
+	return group
+}
+
+//SetVariadicModAttributes sets only uint64 or uint32 fields of the ofp_flow_mod message
+func setVariadicModAttributes(mod *ofp.OfpFlowMod, args OfpFlowModArgs) *ofp.OfpFlowMod {
+	if args == nil {
+		return mod
+	}
+	for key, val := range args {
+		switch key {
+		case "cookie":
+			mod.Cookie = val
+		case "cookie_mask":
+			mod.CookieMask = val
+		case "table_id":
+			mod.TableId = uint32(val)
+		case "idle_timeout":
+			mod.IdleTimeout = uint32(val)
+		case "hard_timeout":
+			mod.HardTimeout = uint32(val)
+		case "priority":
+			mod.Priority = uint32(val)
+		case "buffer_id":
+			mod.BufferId = uint32(val)
+		case "out_port":
+			mod.OutPort = uint32(val)
+		case "out_group":
+			mod.OutGroup = uint32(val)
+		case "flags":
+			mod.Flags = uint32(val)
+		}
+	}
+	return mod
+}
+
+func MkPacketIn(port uint32, packet []byte) *ofp.OfpPacketIn {
+	packetIn := &ofp.OfpPacketIn{
+		Reason: ofp.OfpPacketInReason_OFPR_ACTION,
+		Match: &ofp.OfpMatch{
+			Type: ofp.OfpMatchType_OFPMT_OXM,
+			OxmFields: []*ofp.OfpOxmField{
+				{
+					OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC,
+					Field: &ofp.OfpOxmField_OfbField{
+						OfbField: InPort(port)},
+				},
+			},
+		},
+		Data: packet,
+	}
+	return packetIn
+}
+
+// MkFlowStat is a helper method to build flows
+func MkFlowStat(fa *FlowArgs) (*ofp.OfpFlowStats, error) {
+	//Build the match-fields
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range fa.MatchFields {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return FlowStatsEntryFromFlowModMessage(MkSimpleFlowMod(matchFields, fa.Actions, fa.Command, fa.KV))
+}
+
+func MkGroupStat(ga *GroupArgs) *ofp.OfpGroupEntry {
+	return GroupEntryFromGroupMod(MkMulticastGroupMod(ga.GroupId, ga.Buckets, ga.Command))
+}
+
+type OfpFlowModArgs map[string]uint64
+
+type FlowArgs struct {
+	MatchFields []*ofp.OfpOxmOfbField
+	Actions     []*ofp.OfpAction
+	Command     *ofp.OfpFlowModCommand
+	Priority    uint32
+	KV          OfpFlowModArgs
+}
+
+type GroupArgs struct {
+	GroupId uint32
+	Buckets []*ofp.OfpBucket
+	Command *ofp.OfpGroupModCommand
+}
+
+type FlowsAndGroups struct {
+	Flows  *ordered_map.OrderedMap
+	Groups *ordered_map.OrderedMap
+}
+
+func NewFlowsAndGroups() *FlowsAndGroups {
+	var fg FlowsAndGroups
+	fg.Flows = ordered_map.NewOrderedMap()
+	fg.Groups = ordered_map.NewOrderedMap()
+	return &fg
+}
+
+func (fg *FlowsAndGroups) Copy() *FlowsAndGroups {
+	copyFG := NewFlowsAndGroups()
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			copyFG.Flows.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			copyFG.Groups.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	return copyFG
+}
+
+func (fg *FlowsAndGroups) GetFlow(index int) *ofp.OfpFlowStats {
+	iter := fg.Flows.IterFunc()
+	pos := 0
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if pos == index {
+			if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+				return protoMsg
+			}
+			return nil
+		}
+		pos += 1
+	}
+	return nil
+}
+
+func (fg *FlowsAndGroups) ListFlows() []*ofp.OfpFlowStats {
+	flows := make([]*ofp.OfpFlowStats, 0)
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			flows = append(flows, protoMsg)
+		}
+	}
+	return flows
+}
+
+func (fg *FlowsAndGroups) ListGroups() []*ofp.OfpGroupEntry {
+	groups := make([]*ofp.OfpGroupEntry, 0)
+	iter := fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			groups = append(groups, protoMsg)
+		}
+	}
+	return groups
+}
+
+func (fg *FlowsAndGroups) String() string {
+	var buffer bytes.Buffer
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			buffer.WriteString("\nFlow:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			buffer.WriteString("\nGroup:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	return buffer.String()
+}
+
+func (fg *FlowsAndGroups) AddFlow(flow *ofp.OfpFlowStats) {
+	if flow == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add flow only if absent
+	if _, exist := fg.Flows.Get(flow.Id); !exist {
+		fg.Flows.Set(flow.Id, flow)
+	}
+}
+
+func (fg *FlowsAndGroups) AddGroup(group *ofp.OfpGroupEntry) {
+	if group == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add group only if absent
+	if _, exist := fg.Groups.Get(group.Desc.GroupId); !exist {
+		fg.Groups.Set(group.Desc.GroupId, group)
+	}
+}
+
+//AddFrom add flows and groups from the argument into this structure only if they do not already exist
+func (fg *FlowsAndGroups) AddFrom(from *FlowsAndGroups) {
+	iter := from.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			if _, exist := fg.Flows.Get(protoMsg.Id); !exist {
+				fg.Flows.Set(protoMsg.Id, protoMsg)
+			}
+		}
+	}
+	iter = from.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			if _, exist := fg.Groups.Get(protoMsg.Stats.GroupId); !exist {
+				fg.Groups.Set(protoMsg.Stats.GroupId, protoMsg)
+			}
+		}
+	}
+}
+
+type DeviceRules struct {
+	Rules map[string]*FlowsAndGroups
+}
+
+func NewDeviceRules() *DeviceRules {
+	var dr DeviceRules
+	dr.Rules = make(map[string]*FlowsAndGroups)
+	return &dr
+}
+
+func (dr *DeviceRules) Copy() *DeviceRules {
+	copyDR := NewDeviceRules()
+	if dr != nil {
+		for key, val := range dr.Rules {
+			if val != nil {
+				copyDR.Rules[key] = val.Copy()
+			}
+		}
+	}
+	return copyDR
+}
+
+func (dr *DeviceRules) ClearFlows(deviceId string) {
+	if _, exist := dr.Rules[deviceId]; exist {
+		dr.Rules[deviceId].Flows = ordered_map.NewOrderedMap()
+	}
+}
+
+func (dr *DeviceRules) FilterRules(deviceIds map[string]string) *DeviceRules {
+	filteredDR := NewDeviceRules()
+	for key, val := range dr.Rules {
+		if _, exist := deviceIds[key]; exist {
+			filteredDR.Rules[key] = val.Copy()
+		}
+	}
+	return filteredDR
+}
+
+func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
+	if _, exist := dr.Rules[deviceId]; !exist {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId].AddFlow(flow)
+}
+
+func (dr *DeviceRules) GetRules() map[string]*FlowsAndGroups {
+	return dr.Rules
+}
+
+func (dr *DeviceRules) String() string {
+	var buffer bytes.Buffer
+	for key, value := range dr.Rules {
+		buffer.WriteString("DeviceId:")
+		buffer.WriteString(key)
+		buffer.WriteString(value.String())
+		buffer.WriteString("\n\n")
+	}
+	return buffer.String()
+}
+
+func (dr *DeviceRules) AddFlowsAndGroup(deviceId string, fg *FlowsAndGroups) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId] = fg
+}
+
+// CreateEntryIfNotExist creates a new deviceId in the Map if it does not exist and assigns an
+// empty FlowsAndGroups to it.  Otherwise, it does nothing.
+func (dr *DeviceRules) CreateEntryIfNotExist(deviceId string) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+}
+
+/*
+ *  Common flow routines
+ */
+
+//FindOverlappingFlows return a list of overlapping flow(s) where mod is the flow request
+func FindOverlappingFlows(flows []*ofp.OfpFlowStats, mod *ofp.OfpFlowMod) []*ofp.OfpFlowStats {
+	return nil //TODO - complete implementation
+}
+
+// FindFlowById returns the index of the flow in the flows array if present. Otherwise, it returns -1
+func FindFlowById(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if flow.Id == f.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+// FindFlows returns the index in flows where flow if present.  Otherwise, it returns -1
+func FindFlows(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if f.Id == flow.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+//FlowMatch returns true if two flows matches on the following flow attributes:
+//TableId, Priority, Flags, Cookie, Match
+func FlowMatch(f1 *ofp.OfpFlowStats, f2 *ofp.OfpFlowStats) bool {
+	return f1 != nil && f2 != nil && f1.Id == f2.Id
+}
+
+//FlowMatchesMod returns True if given flow is "covered" by the wildcard flow_mod, taking into consideration of
+//both exact matches as well as masks-based match fields if any. Otherwise return False
+func FlowMatchesMod(flow *ofp.OfpFlowStats, mod *ofp.OfpFlowMod) bool {
+	if flow == nil || mod == nil {
+		return false
+	}
+	//Check if flow.cookie is covered by mod.cookie and mod.cookie_mask
+	if (flow.Cookie & mod.CookieMask) != (mod.Cookie & mod.CookieMask) {
+		return false
+	}
+
+	//Check if flow.table_id is covered by flow_mod.table_id
+	if mod.TableId != uint32(ofp.OfpTable_OFPTT_ALL) && flow.TableId != mod.TableId {
+		return false
+	}
+
+	//Check out_port
+	if (mod.OutPort&0x7fffffff) != uint32(ofp.OfpPortNo_OFPP_ANY) && !FlowHasOutPort(flow, mod.OutPort) {
+		return false
+	}
+
+	//	Check out_group
+	if (mod.OutGroup&0x7fffffff) != uint32(ofp.OfpGroup_OFPG_ANY) && !FlowHasOutGroup(flow, mod.OutGroup) {
+		return false
+	}
+
+	//Priority is ignored
+
+	//Check match condition
+	//If the flow_mod match field is empty, that is a special case and indicates the flow entry matches
+	if (mod.Match == nil) || (mod.Match.OxmFields == nil) || (len(mod.Match.OxmFields) == 0) {
+		//If we got this far and the match is empty in the flow spec, than the flow matches
+		return true
+	} // TODO : implement the flow match analysis
+	return false
+
+}
+
+//FlowHasOutPort returns True if flow has a output command with the given out_port
+func FlowHasOutPort(flow *ofp.OfpFlowStats, outPort uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_OUTPUT {
+					if (action.GetOutput() != nil) && (action.GetOutput().Port == outPort) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FlowHasOutGroup return True if flow has a output command with the given out_group
+func FlowHasOutGroup(flow *ofp.OfpFlowStats, groupID uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_GROUP {
+					if (action.GetGroup() != nil) && (action.GetGroup().GroupId == groupID) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FindGroup returns index of group if found, else returns -1
+func FindGroup(groups []*ofp.OfpGroupEntry, groupId uint32) int {
+	for idx, group := range groups {
+		if group.Desc.GroupId == groupId {
+			return idx
+		}
+	}
+	return -1
+}
+
+func FlowsDeleteByGroupId(flows []*ofp.OfpFlowStats, groupId uint32) (bool, []*ofp.OfpFlowStats) {
+	toKeep := make([]*ofp.OfpFlowStats, 0)
+
+	for _, f := range flows {
+		if !FlowHasOutGroup(f, groupId) {
+			toKeep = append(toKeep, f)
+		}
+	}
+	return len(toKeep) < len(flows), toKeep
+}
+
+func ToOfpOxmField(from []*ofp.OfpOxmOfbField) []*ofp.OfpOxmField {
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range from {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return matchFields
+}
+
+//IsMulticastIp returns true if the ip starts with the byte sequence of 1110;
+//false otherwise.
+func IsMulticastIp(ip uint32) bool {
+	return ip>>28 == 14
+}
+
+//ConvertToMulticastMacInt returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacInt(ip uint32) uint64 {
+	//get last 23 bits of ip address by ip & 00000000011111111111111111111111
+	theLast23BitsOfIp := ip & 8388607
+	// perform OR with 0x1005E000000 to build mcast mac address
+	return 1101088686080 | uint64(theLast23BitsOfIp)
+}
+
+//ConvertToMulticastMacBytes returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacBytes(ip uint32) []byte {
+	mac := ConvertToMulticastMacInt(ip)
+	var b bytes.Buffer
+	// catalyze (48 bits) in binary:111111110000000000000000000000000000000000000000
+	catalyze := uint64(280375465082880)
+	//convert each octet to decimal
+	for i := 0; i < 6; i++ {
+		if i != 0 {
+			catalyze = catalyze >> 8
+		}
+		octet := mac & catalyze
+		octetDecimal := octet >> uint8(40-i*8)
+		b.WriteByte(byte(octetDecimal))
+	}
+	return b.Bytes()
+}
+
+func GetMeterIdFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni) or MeterId
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var meterID uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "found-metadata-for-egress/uni-port", log.Fields{"metadata": md})
+	if md != 0 {
+		meterID = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "found-meterID-in-write-metadata-action", log.Fields{"meterID": meterID})
+	}
+	return meterID
+}
+
+func SetMeterIdToFlow(flow *ofp.OfpFlowStats, meterId uint32) {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					meterInst.MeterId = meterId
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
new file mode 100644
index 0000000..eee9631
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"time"
+
+	ca "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+const (
+	PartitionConsumer = iota
+	GroupCustomer     = iota
+)
+
+const (
+	OffsetNewest = -1
+	OffsetOldest = -2
+)
+
+const (
+	GroupIdKey = "groupId"
+	Offset     = "offset"
+)
+
+const (
+	DefaultKafkaAddress             = "127.0.0.1:9092"
+	DefaultGroupName                = "voltha"
+	DefaultSleepOnError             = 1
+	DefaultProducerFlushFrequency   = 10
+	DefaultProducerFlushMessages    = 10
+	DefaultProducerFlushMaxmessages = 100
+	DefaultProducerReturnSuccess    = true
+	DefaultProducerReturnErrors     = true
+	DefaultProducerRetryMax         = 3
+	DefaultProducerRetryBackoff     = time.Millisecond * 100
+	DefaultConsumerMaxwait          = 100
+	DefaultMaxProcessingTime        = 100
+	DefaultConsumerType             = PartitionConsumer
+	DefaultNumberPartitions         = 3
+	DefaultNumberReplicas           = 1
+	DefaultAutoCreateTopic          = false
+	DefaultMetadataMaxRetry         = 3
+	DefaultLivenessChannelInterval  = time.Second * 30
+)
+
+// MsgClient represents the set of APIs  a Kafka MsgClient must implement
+type Client interface {
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error
+	DeleteTopic(ctx context.Context, topic *Topic) error
+	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
+	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time))
+	Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error
+	SendLiveness(ctx context.Context) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	EnableHealthinessChannel(ctx context.Context, enable bool) chan bool
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
new file mode 100644
index 0000000..f4d7661
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
new file mode 100644
index 0000000..962b932
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"github.com/buraksezer/consistent"
+	"github.com/cespare/xxhash"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"sync"
+)
+
+const (
+	// All the values below can be tuned to get optimal data distribution.  The numbers below seems to work well when
+	// supporting 1000-10000 devices and 1 - 20 replicas of a service
+
+	// Keys are distributed among partitions. Prime numbers are good to distribute keys uniformly.
+	DefaultPartitionCount = 1117
+
+	// Represents how many times a node is replicated on the consistent ring.
+	DefaultReplicationFactor = 117
+
+	// Load is used to calculate average load.
+	DefaultLoad = 1.1
+)
+
+type Endpoint string // Endpoint of a service instance.  When using kafka, this is the topic name of a service
+type ReplicaID int32 // The replication ID of a service instance
+
+type EndpointManager interface {
+
+	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
+	// now this will return the topic name
+	GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error)
+
+	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
+	// devices owned by that service need to be reconciled
+	IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error)
+
+	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
+	// test only
+	GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error)
+}
+
+type service struct {
+	id             string // Id of the service.  The same id is used for all replicas
+	totalReplicas  int32
+	replicas       map[ReplicaID]Endpoint
+	consistentRing *consistent.Consistent
+}
+
+type endpointManager struct {
+	partitionCount           int
+	replicationFactor        int
+	load                     float64
+	backend                  *db.Backend
+	services                 map[string]*service
+	servicesLock             sync.RWMutex
+	deviceTypeServiceMap     map[string]string
+	deviceTypeServiceMapLock sync.RWMutex
+}
+
+type EndpointManagerOption func(*endpointManager)
+
+func PartitionCount(count int) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.partitionCount = count
+	}
+}
+
+func ReplicationFactor(replicas int) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.replicationFactor = replicas
+	}
+}
+
+func Load(load float64) EndpointManagerOption {
+	return func(args *endpointManager) {
+		args.load = load
+	}
+}
+
+func newEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
+	tm := &endpointManager{
+		partitionCount:       DefaultPartitionCount,
+		replicationFactor:    DefaultReplicationFactor,
+		load:                 DefaultLoad,
+		backend:              backend,
+		services:             make(map[string]*service),
+		deviceTypeServiceMap: make(map[string]string),
+	}
+
+	for _, option := range opts {
+		option(tm)
+	}
+	return tm
+}
+
+func NewEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
+	return newEndpointManager(backend, opts...)
+}
+
+func (ep *endpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error) {
+	logger.Debugw(ctx, "getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+	if err != nil {
+		return "", err
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return "", status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	endpoint := m.getEndPoint()
+	if endpoint == "" {
+		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
+	}
+	logger.Debugw(ctx, "returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
+	return endpoint, nil
+}
+
+func (ep *endpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	logger.Debugw(ctx, "device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+	if err != nil {
+		return false, nil
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return false, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	return m.getReplica() == ReplicaID(replicaNumber), nil
+}
+
+func (ep *endpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error) {
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+	if err != nil {
+		return 0, nil
+	}
+	m, ok := owner.(Member)
+	if !ok {
+		return 0, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
+	}
+	return m.getReplica(), nil
+}
+
+func (ep *endpointManager) getOwner(ctx context.Context, deviceID string, serviceType string) (consistent.Member, error) {
+	serv, dType, err := ep.getServiceAndDeviceType(ctx, serviceType)
+	if err != nil {
+		return nil, err
+	}
+	key := ep.makeKey(deviceID, dType, serviceType)
+	return serv.consistentRing.LocateKey(key), nil
+}
+
+func (ep *endpointManager) getServiceAndDeviceType(ctx context.Context, serviceType string) (*service, string, error) {
+	// Check whether service exist
+	ep.servicesLock.RLock()
+	serv, serviceExist := ep.services[serviceType]
+	ep.servicesLock.RUnlock()
+
+	// Load the service and device types if needed
+	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
+		if err := ep.loadServices(ctx); err != nil {
+			return nil, "", err
+		}
+
+		// Check whether the service exists now
+		ep.servicesLock.RLock()
+		serv, serviceExist = ep.services[serviceType]
+		ep.servicesLock.RUnlock()
+		if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
+			return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
+		}
+	}
+
+	ep.deviceTypeServiceMapLock.RLock()
+	defer ep.deviceTypeServiceMapLock.RUnlock()
+	for dType, sType := range ep.deviceTypeServiceMap {
+		if sType == serviceType {
+			return serv, dType, nil
+		}
+	}
+	return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
+}
+
+func (ep *endpointManager) getConsistentConfig() consistent.Config {
+	return consistent.Config{
+		PartitionCount:    ep.partitionCount,
+		ReplicationFactor: ep.replicationFactor,
+		Load:              ep.load,
+		Hasher:            hasher{},
+	}
+}
+
+// loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
+// the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
+// instead of watching for updates in the dB and acting on it.
+func (ep *endpointManager) loadServices(ctx context.Context) error {
+	ep.servicesLock.Lock()
+	defer ep.servicesLock.Unlock()
+	ep.deviceTypeServiceMapLock.Lock()
+	defer ep.deviceTypeServiceMapLock.Unlock()
+
+	if ep.backend == nil {
+		return status.Error(codes.Aborted, "backend-not-set")
+	}
+	ep.services = make(map[string]*service)
+	ep.deviceTypeServiceMap = make(map[string]string)
+
+	// Load the adapters
+	blobs, err := ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "adapters")
+	if err != nil {
+		return err
+	}
+
+	// Data is marshalled as proto bytes in the data store
+	for _, blob := range blobs {
+		data := blob.Value.([]byte)
+		adapter := &voltha.Adapter{}
+		if err := proto.Unmarshal(data, adapter); err != nil {
+			return err
+		}
+		// A valid adapter should have the vendorID set
+		if adapter.Vendor != "" {
+			if _, ok := ep.services[adapter.Type]; !ok {
+				ep.services[adapter.Type] = &service{
+					id:             adapter.Type,
+					totalReplicas:  adapter.TotalReplicas,
+					replicas:       make(map[ReplicaID]Endpoint),
+					consistentRing: consistent.New(nil, ep.getConsistentConfig()),
+				}
+
+			}
+			currentReplica := ReplicaID(adapter.CurrentReplica)
+			endpoint := Endpoint(adapter.Endpoint)
+			ep.services[adapter.Type].replicas[currentReplica] = endpoint
+			ep.services[adapter.Type].consistentRing.Add(newMember(adapter.Id, adapter.Type, adapter.Vendor, endpoint, adapter.Version, currentReplica))
+		}
+	}
+	// Load the device types
+	blobs, err = ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "device_types")
+	if err != nil {
+		return err
+	}
+	for _, blob := range blobs {
+		data := blob.Value.([]byte)
+		deviceType := &voltha.DeviceType{}
+		if err := proto.Unmarshal(data, deviceType); err != nil {
+			return err
+		}
+		if _, ok := ep.deviceTypeServiceMap[deviceType.Id]; !ok {
+			ep.deviceTypeServiceMap[deviceType.Id] = deviceType.Adapter
+		}
+	}
+
+	// Log the loaded data in debug mode to facilitate trouble shooting
+	if logger.V(log.DebugLevel) {
+		for key, val := range ep.services {
+			members := val.consistentRing.GetMembers()
+			logger.Debugw(ctx, "service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
+			for _, m := range members {
+				n := m.(Member)
+				logger.Debugw(ctx, "service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
+			}
+		}
+		logger.Debugw(ctx, "device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
+	}
+	return nil
+}
+
+// makeKey creates the string that the hash function uses to create the hash
+func (ep *endpointManager) makeKey(deviceID string, deviceType string, serviceType string) []byte {
+	return []byte(fmt.Sprintf("%s_%s_%s", serviceType, deviceType, deviceID))
+}
+
+// The consistent package requires a hasher function
+type hasher struct{}
+
+// Sum64 provides the hasher function.  Based upon numerous testing scenarios, the xxhash package seems to provide the
+// best distribution compare to other hash packages
+func (h hasher) Sum64(data []byte) uint64 {
+	return xxhash.Sum64(data)
+}
+
+// Member represents a member on the consistent ring
+type Member interface {
+	String() string
+	getReplica() ReplicaID
+	getEndPoint() Endpoint
+	getID() string
+	getServiceType() string
+}
+
+// member implements the Member interface
+type member struct {
+	id          string
+	serviceType string
+	vendor      string
+	version     string
+	replica     ReplicaID
+	endpoint    Endpoint
+}
+
+func newMember(ID string, serviceType string, vendor string, endPoint Endpoint, version string, replica ReplicaID) Member {
+	return &member{
+		id:          ID,
+		serviceType: serviceType,
+		vendor:      vendor,
+		version:     version,
+		replica:     replica,
+		endpoint:    endPoint,
+	}
+}
+
+func (m *member) String() string {
+	return string(m.endpoint)
+}
+
+func (m *member) getReplica() ReplicaID {
+	return m.replica
+}
+
+func (m *member) getEndPoint() Endpoint {
+	return m.endpoint
+}
+
+func (m *member) getID() string {
+	return m.id
+}
+
+func (m *member) getServiceType() string {
+	return m.serviceType
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
new file mode 100644
index 0000000..b149e7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
@@ -0,0 +1,1085 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opentracing/opentracing-go"
+)
+
+const (
+	DefaultMaxRetries     = 3
+	DefaultRequestTimeout = 60000 // 60000 milliseconds - to handle a wider latency range
+)
+
+const (
+	TransactionKey = "transactionID"
+	FromTopic      = "fromTopic"
+)
+
+var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
+var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
+
+// requestHandlerChannel represents an interface associated with a channel.  Whenever, an event is
+// obtained from that channel, this interface is invoked.   This is used to handle
+// async requests into the Core via the kafka messaging bus
+type requestHandlerChannel struct {
+	requesthandlerInterface interface{}
+	ch                      <-chan *ic.InterContainerMessage
+}
+
+// transactionChannel represents a combination of a topic and a channel onto which a response received
+// on the kafka bus will be sent to
+type transactionChannel struct {
+	topic *Topic
+	ch    chan *ic.InterContainerMessage
+}
+
+type InterContainerProxy interface {
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	GetDefaultTopic() *Topic
+	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
+	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
+	SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error
+	SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error
+	UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error
+	DeleteTopic(ctx context.Context, topic Topic) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	SendLiveness(ctx context.Context) error
+}
+
+// interContainerProxy represents the messaging proxy
+type interContainerProxy struct {
+	kafkaAddress                   string
+	defaultTopic                   *Topic
+	defaultRequestHandlerInterface interface{}
+	kafkaClient                    Client
+	doneCh                         chan struct{}
+	doneOnce                       sync.Once
+
+	// This map is used to map a topic to an interface and channel.   When a request is received
+	// on that channel (registered to the topic) then that interface is invoked.
+	topicToRequestHandlerChannelMap   map[string]*requestHandlerChannel
+	lockTopicRequestHandlerChannelMap sync.RWMutex
+
+	// This map is used to map a channel to a response topic.   This channel handles all responses on that
+	// channel for that topic and forward them to the appropriate consumers channel, using the
+	// transactionIdToChannelMap.
+	topicToResponseChannelMap   map[string]<-chan *ic.InterContainerMessage
+	lockTopicResponseChannelMap sync.RWMutex
+
+	// This map is used to map a transaction to a consumers channel.  This is used whenever a request has been
+	// sent out and we are waiting for a response.
+	transactionIdToChannelMap     map[string]*transactionChannel
+	lockTransactionIdToChannelMap sync.RWMutex
+}
+
+type InterContainerProxyOption func(*interContainerProxy)
+
+func InterContainerAddress(address string) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.kafkaAddress = address
+	}
+}
+
+func DefaultTopic(topic *Topic) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.defaultTopic = topic
+	}
+}
+
+func RequestHandlerInterface(handler interface{}) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.defaultRequestHandlerInterface = handler
+	}
+}
+
+func MsgClient(client Client) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func newInterContainerProxy(opts ...InterContainerProxyOption) *interContainerProxy {
+	proxy := &interContainerProxy{
+		kafkaAddress: DefaultKafkaAddress,
+		doneCh:       make(chan struct{}),
+	}
+
+	for _, option := range opts {
+		option(proxy)
+	}
+
+	return proxy
+}
+
+func NewInterContainerProxy(opts ...InterContainerProxyOption) InterContainerProxy {
+	return newInterContainerProxy(opts...)
+}
+
+func (kp *interContainerProxy) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-Proxy")
+
+	// Kafka MsgClient should already have been created.  If not, output fatal error
+	if kp.kafkaClient == nil {
+		logger.Fatal(ctx, "kafka-client-not-set")
+	}
+
+	// Start the kafka client
+	if err := kp.kafkaClient.Start(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-proxy", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the topic to response channel map
+	kp.topicToResponseChannelMap = make(map[string]<-chan *ic.InterContainerMessage)
+	//
+	// Create the transactionId to Channel Map
+	kp.transactionIdToChannelMap = make(map[string]*transactionChannel)
+
+	// Create the topic to request channel map
+	kp.topicToRequestHandlerChannelMap = make(map[string]*requestHandlerChannel)
+
+	return nil
+}
+
+func (kp *interContainerProxy) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-intercontainer-proxy")
+	kp.doneOnce.Do(func() { close(kp.doneCh) })
+	// TODO : Perform cleanup
+	kp.kafkaClient.Stop(ctx)
+	err := kp.deleteAllTopicRequestHandlerChannelMap(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+	}
+	err = kp.deleteAllTopicResponseChannelMap(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+	}
+	kp.deleteAllTransactionIdToChannelMap(ctx)
+}
+
+func (kp *interContainerProxy) GetDefaultTopic() *Topic {
+	return kp.defaultTopic
+}
+
+// InvokeAsyncRPC is used to make an RPC request asynchronously
+func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
+
+	logger.Debugw(ctx, "InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
+
+	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, !waitForResponse)
+	if spanArg != nil {
+		kvArgs = append(kvArgs, &spanArg[0])
+	}
+	defer span.Finish()
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.GetDefaultTopic()
+	}
+
+	chnl := make(chan *RpcResponse)
+
+	go func() {
+
+		// once we're done,
+		// close the response channel
+		defer close(chnl)
+
+		var err error
+		var protoRequest *ic.InterContainerMessage
+
+		// Encode the request
+		protoRequest, err = encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+		if err != nil {
+			logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+			log.MarkSpanError(ctx, errors.New("cannot-format-request"))
+			chnl <- NewResponse(RpcFormattingError, err, nil)
+			return
+		}
+
+		// Subscribe for response, if needed, before sending request
+		var ch <-chan *ic.InterContainerMessage
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+			log.MarkSpanError(ctx, errors.New("failed-to-subscribe-for-response"))
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// Send request - if the topic is formatted with a device Id then we will send the request using a
+		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+		logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+
+		// if the message is not sent on kafka publish an event an close the channel
+		if err = kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// if the client is not waiting for a response send the ack and close the channel
+		chnl <- NewResponse(RpcSent, nil, nil)
+		if !waitForResponse {
+			return
+		}
+
+		defer func() {
+			// Remove the subscription for a response on return
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Warnw(ctx, "invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
+			}
+		}()
+
+		// Wait for response as well as timeout or cancellation
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				log.MarkSpanError(ctx, errors.New("channel-closed"))
+				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
+			}
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			if responseBody, err := decodeResponse(ctx, msg); err != nil {
+				chnl <- NewResponse(RpcReply, err, nil)
+			} else {
+				if responseBody.Success {
+					chnl <- NewResponse(RpcReply, nil, responseBody.Result)
+				} else {
+					// response body contains an error
+					unpackErr := &ic.Error{}
+					if err := ptypes.UnmarshalAny(responseBody.Result, unpackErr); err != nil {
+						chnl <- NewResponse(RpcReply, err, nil)
+					} else {
+						chnl <- NewResponse(RpcReply, status.Error(codes.Internal, unpackErr.Reason), nil)
+					}
+				}
+			}
+		case <-ctx.Done():
+			logger.Errorw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
+			chnl <- NewResponse(RpcTimeout, err, nil)
+		case <-kp.doneCh:
+			chnl <- NewResponse(RpcSystemClosing, nil, nil)
+			logger.Warnw(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+		}
+	}()
+	return chnl
+}
+
+// Method to extract Open-tracing Span from Context and serialize it for transport over Kafka embedded as a additional argument.
+// Additional argument is injected using key as "span" and value as Span marshalled into a byte slice
+//
+// The span name is automatically constructed using the RPC name with following convention (<rpc-name> represents name of invoked method):
+// - RPC invoked in Sync manner (WaitForResponse=true) : kafka-rpc-<rpc-name>
+// - RPC invoked in Async manner (WaitForResponse=false) : kafka-async-rpc-<rpc-name>
+// - Inter Adapter RPC invoked in Sync manner (WaitForResponse=true) : kafka-inter-adapter-rpc-<rpc-name>
+// - Inter Adapter RPC invoked in Async manner (WaitForResponse=false) : kafka-inter-adapter-async-rpc-<rpc-name>
+func (kp *interContainerProxy) embedSpanAsArg(ctx context.Context, rpc string, isAsync bool) ([]KVArg, opentracing.Span, context.Context) {
+	var err error
+	var newCtx context.Context
+	var spanToInject opentracing.Span
+
+	var spanName strings.Builder
+	spanName.WriteString("kafka-")
+
+	// In case of inter adapter message, use Msg Type for constructing RPC name
+	if rpc == "process_inter_adapter_message" {
+		if msgType, ok := ctx.Value("inter-adapter-msg-type").(ic.InterAdapterMessageType_Types); ok {
+			spanName.WriteString("inter-adapter-")
+			rpc = msgType.String()
+		}
+	}
+
+	if isAsync {
+		spanName.WriteString("async-rpc-")
+	} else {
+		spanName.WriteString("rpc-")
+	}
+	spanName.WriteString(rpc)
+
+	if isAsync {
+		spanToInject, newCtx = log.CreateAsyncSpan(ctx, spanName.String())
+	} else {
+		spanToInject, newCtx = log.CreateChildSpan(ctx, spanName.String())
+	}
+
+	spanToInject.SetBaggageItem("rpc-span-name", spanName.String())
+
+	textMapCarrier := opentracing.TextMapCarrier(make(map[string]string))
+	if err = opentracing.GlobalTracer().Inject(spanToInject.Context(), opentracing.TextMap, textMapCarrier); err != nil {
+		logger.Warnw(ctx, "unable-to-serialize-span-to-textmap", log.Fields{"span": spanToInject, "error": err})
+		return nil, spanToInject, newCtx
+	}
+
+	var textMapJson []byte
+	if textMapJson, err = json.Marshal(textMapCarrier); err != nil {
+		logger.Warnw(ctx, "unable-to-marshal-textmap-to-json-string", log.Fields{"textMap": textMapCarrier, "error": err})
+		return nil, spanToInject, newCtx
+	}
+
+	spanArg := make([]KVArg, 1)
+	spanArg[0] = KVArg{Key: "span", Value: &ic.StrType{Val: string(textMapJson)}}
+	return spanArg, spanToInject, newCtx
+}
+
+// InvokeRPC is used to send a request to a given topic
+func (kp *interContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
+
+	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, false)
+	if spanArg != nil {
+		kvArgs = append(kvArgs, &spanArg[0])
+	}
+	defer span.Finish()
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.defaultTopic
+	}
+
+	// Encode the request
+	protoRequest, err := encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+	if err != nil {
+		logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		log.MarkSpanError(ctx, errors.New("cannot-format-request"))
+		return false, nil
+	}
+
+	// Subscribe for response, if needed, before sending request
+	var ch <-chan *ic.InterContainerMessage
+	if waitForResponse {
+		var err error
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		}
+	}
+
+	// Send request - if the topic is formatted with a device Id then we will send the request using a
+	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+	//key := GetDeviceIdFromTopic(*toTopic)
+	logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	go func() {
+		if err := kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			log.MarkSpanError(ctx, errors.New("send-failed"))
+			logger.Errorw(ctx, "send-failed", log.Fields{
+				"topic": toTopic,
+				"key":   key,
+				"error": err})
+		}
+	}()
+
+	if waitForResponse {
+		// Create a child context based on the parent context, if any
+		var cancel context.CancelFunc
+		childCtx := context.Background()
+		if ctx == nil {
+			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
+		} else {
+			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
+		}
+		defer cancel()
+
+		// Wait for response as well as timeout or cancellation
+		// Remove the subscription for a response on return
+		defer func() {
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Errorw(ctx, "response-unsubscribe-failed", log.Fields{
+					"id":    protoRequest.Header.Id,
+					"error": err})
+			}
+		}()
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				log.MarkSpanError(ctx, errors.New("channel-closed"))
+				protoError := &ic.Error{Reason: "channel-closed"}
+				var marshalledArg *any.Any
+				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+					return false, nil // Should never happen
+				}
+				return false, marshalledArg
+			}
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			var responseBody *ic.InterContainerResponseBody
+			var err error
+			if responseBody, err = decodeResponse(ctx, msg); err != nil {
+				logger.Errorw(ctx, "decode-response-error", log.Fields{"error": err})
+				// FIXME we should return something
+			}
+			return responseBody.Success, responseBody.Result
+		case <-ctx.Done():
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-childCtx.Done():
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-kp.doneCh:
+			logger.Infow(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			return true, nil
+		}
+	}
+	return true, nil
+}
+
+// SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
+// when a message is received on a given topic
+func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error {
+
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic); err != nil {
+		//if ch, err = kp.Subscribe(topic); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+
+	kp.defaultRequestHandlerInterface = handler
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ctx, ch, topic, handler)
+
+	return nil
+}
+
+// SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
+// when a message is received on a given topic.  So far there is only 1 target registered per microservice
+func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error {
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
+
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ctx, ch, topic, kp.defaultRequestHandlerInterface)
+
+	return nil
+}
+
+func (kp *interContainerProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error {
+	return kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name)
+}
+
+func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(ctx context.Context, topic string) error {
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		var err error
+		if err = kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic})
+		}
+		delete(kp.topicToResponseChannelMap, topic)
+		return err
+	} else {
+		return fmt.Errorf("%s-Topic-not-found", topic)
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTopicResponseChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-response-channel")
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToResponseChannelMap {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToResponseChannelMap, topic)
+		}
+	}
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
+}
+
+func (kp *interContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; !exist {
+		kp.topicToRequestHandlerChannelMap[topic] = arg
+	}
+}
+
+func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(ctx context.Context, topic string) error {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			return err
+		}
+		delete(kp.topicToRequestHandlerChannelMap, topic)
+		return nil
+	} else {
+		return fmt.Errorf("%s-Topic-not-found", topic)
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-request-channel")
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToRequestHandlerChannelMap {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToRequestHandlerChannelMap, topic)
+		}
+	}
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
+}
+
+func (kp *interContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if _, exist := kp.transactionIdToChannelMap[id]; !exist {
+		kp.transactionIdToChannelMap[id] = &transactionChannel{topic: topic, ch: arg}
+	}
+}
+
+func (kp *interContainerProxy) deleteFromTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if transChannel, exist := kp.transactionIdToChannelMap[id]; exist {
+		// Close the channel first
+		close(transChannel.ch)
+		delete(kp.transactionIdToChannelMap, id)
+	}
+}
+
+func (kp *interContainerProxy) deleteTopicTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		if value.topic.Name == id {
+			close(value.ch)
+			delete(kp.transactionIdToChannelMap, key)
+		}
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap(ctx context.Context) {
+	logger.Debug(ctx, "delete-all-transaction-id-channel-map")
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		close(value.ch)
+		delete(kp.transactionIdToChannelMap, key)
+	}
+}
+
+func (kp *interContainerProxy) DeleteTopic(ctx context.Context, topic Topic) error {
+	// If we have any consumers on that topic we need to close them
+	if err := kp.deleteFromTopicResponseChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+	}
+	if err := kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+	}
+	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
+
+	return kp.kafkaClient.DeleteTopic(ctx, &topic)
+}
+
+func encodeReturnedValue(ctx context.Context, returnedVal interface{}) (*any.Any, error) {
+	// Encode the response argument - needs to be a proto message
+	if returnedVal == nil {
+		return nil, nil
+	}
+	protoValue, ok := returnedVal.(proto.Message)
+	if !ok {
+		logger.Warnw(ctx, "response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		err := errors.New("response-value-not-proto-message")
+		return nil, err
+	}
+
+	// Marshal the returned value, if any
+	var marshalledReturnedVal *any.Any
+	var err error
+	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-returned-val", log.Fields{"error": err})
+		return nil, err
+	}
+	return marshalledReturnedVal, nil
+}
+
+func encodeDefaultFailedResponse(ctx context.Context, request *ic.InterContainerMessage) *ic.InterContainerMessage {
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		Timestamp: ptypes.TimestampNow(),
+	}
+	responseBody := &ic.InterContainerResponseBody{
+		Success: false,
+		Result:  nil,
+	}
+	var marshalledResponseBody *any.Any
+	var err error
+	// Error should never happen here
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-failed-response-body", log.Fields{"error": err})
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}
+
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeResponse(ctx context.Context, request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
+	//logger.Debugw(ctx, "encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		KeyTopic:  request.Header.KeyTopic,
+		Timestamp: ptypes.TimestampNow(),
+	}
+
+	// Go over all returned values
+	var marshalledReturnedVal *any.Any
+	var err error
+
+	// for now we support only 1 returned value - (excluding the error)
+	if len(returnedValues) > 0 {
+		if marshalledReturnedVal, err = encodeReturnedValue(ctx, returnedValues[0]); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		}
+	}
+
+	responseBody := &ic.InterContainerResponseBody{
+		Success: success,
+		Result:  marshalledReturnedVal,
+	}
+
+	// Marshal the response body
+	var marshalledResponseBody *any.Any
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		return nil, err
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}, nil
+}
+
+func CallFuncByName(ctx context.Context, myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
+	myClassValue := reflect.ValueOf(myClass)
+	// Capitalize the first letter in the funcName to workaround the first capital letters required to
+	// invoke a function from a different package
+	funcName = strings.Title(funcName)
+	m := myClassValue.MethodByName(funcName)
+	if !m.IsValid() {
+		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
+	}
+	in := make([]reflect.Value, len(params)+1)
+	in[0] = reflect.ValueOf(ctx)
+	for i, param := range params {
+		in[i+1] = reflect.ValueOf(param)
+	}
+	out = m.Call(in)
+	return
+}
+
+func (kp *interContainerProxy) addTransactionId(ctx context.Context, transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
+	arg := &KVArg{
+		Key:   TransactionKey,
+		Value: &ic.StrType{Val: transactionId},
+	}
+
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   arg.Key,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+func (kp *interContainerProxy) addFromTopic(ctx context.Context, fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   FromTopic,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+// Method to extract the Span embedded in Kafka RPC request on the receiver side. If span is found embedded in the KV args (with key as "span"),
+// it is de-serialized and injected into the Context to be carried forward by the RPC request processor thread.
+// If no span is found embedded, even then a span is created with name as "kafka-rpc-<rpc-name>" to enrich the Context for RPC calls coming
+// from components currently not sending the span (e.g. openonu adapter)
+func (kp *interContainerProxy) enrichContextWithSpan(ctx context.Context, rpcName string, args []*ic.Argument) (opentracing.Span, context.Context) {
+
+	for _, arg := range args {
+		if arg.Key == "span" {
+			var err error
+			var textMapString ic.StrType
+			if err = ptypes.UnmarshalAny(arg.Value, &textMapString); err != nil {
+				logger.Warnw(ctx, "unable-to-unmarshal-kvarg-to-textmap-string", log.Fields{"value": arg.Value})
+				break
+			}
+
+			spanTextMap := make(map[string]string)
+			if err = json.Unmarshal([]byte(textMapString.Val), &spanTextMap); err != nil {
+				logger.Warnw(ctx, "unable-to-unmarshal-textmap-from-json-string", log.Fields{"textMapString": textMapString, "error": err})
+				break
+			}
+
+			var spanContext opentracing.SpanContext
+			if spanContext, err = opentracing.GlobalTracer().Extract(opentracing.TextMap, opentracing.TextMapCarrier(spanTextMap)); err != nil {
+				logger.Warnw(ctx, "unable-to-deserialize-textmap-to-span", log.Fields{"textMap": spanTextMap, "error": err})
+				break
+			}
+
+			var receivedRpcName string
+			extractBaggage := func(k, v string) bool {
+				if k == "rpc-span-name" {
+					receivedRpcName = v
+					return false
+				}
+				return true
+			}
+
+			spanContext.ForeachBaggageItem(extractBaggage)
+
+			return opentracing.StartSpanFromContext(ctx, receivedRpcName, opentracing.FollowsFrom(spanContext))
+		}
+	}
+
+	// Create new Child Span with rpc as name if no span details were received in kafka arguments
+	var spanName strings.Builder
+	spanName.WriteString("kafka-")
+
+	// In case of inter adapter message, use Msg Type for constructing RPC name
+	if rpcName == "process_inter_adapter_message" {
+		for _, arg := range args {
+			if arg.Key == "msg" {
+				iamsg := ic.InterAdapterMessage{}
+				if err := ptypes.UnmarshalAny(arg.Value, &iamsg); err == nil {
+					spanName.WriteString("inter-adapter-")
+					rpcName = iamsg.Header.Type.String()
+				}
+			}
+		}
+	}
+
+	spanName.WriteString("rpc-")
+	spanName.WriteString(rpcName)
+
+	return opentracing.StartSpanFromContext(ctx, spanName.String())
+}
+
+func (kp *interContainerProxy) handleMessage(ctx context.Context, msg *ic.InterContainerMessage, targetInterface interface{}) {
+
+	// First extract the header to know whether this is a request - responses are handled by a different handler
+	if msg.Header.Type == ic.MessageType_REQUEST {
+		var out []reflect.Value
+		var err error
+
+		// Get the request body
+		requestBody := &ic.InterContainerRequestBody{}
+		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-request", log.Fields{"error": err})
+		} else {
+			span, ctx := kp.enrichContextWithSpan(ctx, requestBody.Rpc, requestBody.Args)
+			defer span.Finish()
+
+			logger.Debugw(ctx, "received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			// let the callee unpack the arguments as its the only one that knows the real proto type
+			// Augment the requestBody with the message Id as it will be used in scenarios where cores
+			// are set in pairs and competing
+			requestBody.Args = kp.addTransactionId(ctx, msg.Header.Id, requestBody.Args)
+
+			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
+			// needs to send an unsollicited message to the currently requested container
+			requestBody.Args = kp.addFromTopic(ctx, msg.Header.FromTopic, requestBody.Args)
+
+			out, err = CallFuncByName(ctx, targetInterface, requestBody.Rpc, requestBody.Args)
+			if err != nil {
+				logger.Warn(ctx, err)
+			}
+		}
+		// Response required?
+		if requestBody.ResponseRequired {
+			// If we already have an error before then just return that
+			var returnError *ic.Error
+			var returnedValues []interface{}
+			var success bool
+			if err != nil {
+				returnError = &ic.Error{Reason: err.Error()}
+				returnedValues = make([]interface{}, 1)
+				returnedValues[0] = returnError
+			} else {
+				returnedValues = make([]interface{}, 0)
+				// Check for errors first
+				lastIndex := len(out) - 1
+				if out[lastIndex].Interface() != nil { // Error
+					if retError, ok := out[lastIndex].Interface().(error); ok {
+						if retError.Error() == ErrorTransactionNotAcquired.Error() {
+							logger.Debugw(ctx, "Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							return // Ignore - process is in competing mode and ignored transaction
+						}
+						returnError = &ic.Error{Reason: retError.Error()}
+						returnedValues = append(returnedValues, returnError)
+					} else { // Should never happen
+						returnError = &ic.Error{Reason: "incorrect-error-returns"}
+						returnedValues = append(returnedValues, returnError)
+					}
+				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
+					logger.Warnw(ctx, "Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					return // Ignore - should not happen
+				} else { // Non-error case
+					success = true
+					for idx, val := range out {
+						//logger.Debugw(ctx, "returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						if idx != lastIndex {
+							returnedValues = append(returnedValues, val.Interface())
+						}
+					}
+				}
+			}
+
+			var icm *ic.InterContainerMessage
+			if icm, err = encodeResponse(ctx, msg, success, returnedValues...); err != nil {
+				logger.Warnw(ctx, "error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				icm = encodeDefaultFailedResponse(ctx, msg)
+			}
+			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
+			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
+			// present then the key will be empty, hence all messages for a given topic will be sent to all
+			// partitions.
+			replyTopic := &Topic{Name: msg.Header.FromTopic}
+			key := msg.Header.KeyTopic
+			logger.Debugw(ctx, "sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			// TODO: handle error response.
+			go func() {
+				if err := kp.kafkaClient.Send(ctx, icm, replyTopic, key); err != nil {
+					logger.Errorw(ctx, "send-reply-failed", log.Fields{
+						"topic": replyTopic,
+						"key":   key,
+						"error": err})
+				}
+			}()
+		}
+	} else if msg.Header.Type == ic.MessageType_RESPONSE {
+		logger.Debugw(ctx, "response-received", log.Fields{"msg-header": msg.Header})
+		go kp.dispatchResponse(ctx, msg)
+	} else {
+		logger.Warnw(ctx, "unsupported-message-received", log.Fields{"msg-header": msg.Header})
+	}
+}
+
+func (kp *interContainerProxy) waitForMessages(ctx context.Context, ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
+	//	Wait for messages
+	for msg := range ch {
+		//logger.Debugw(ctx, "request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		go kp.handleMessage(context.Background(), msg, targetInterface)
+	}
+}
+
+func (kp *interContainerProxy) dispatchResponse(ctx context.Context, msg *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.RLock()
+	defer kp.lockTransactionIdToChannelMap.RUnlock()
+	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
+		logger.Debugw(ctx, "no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		return
+	}
+	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
+}
+
+// subscribeForResponse allows a caller to subscribe to a given topic when waiting for a response.
+// This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
+// API. There is one response channel waiting for kafka messages before dispatching the message to the
+// corresponding waiting channel
+func (kp *interContainerProxy) subscribeForResponse(ctx context.Context, topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
+	logger.Debugw(ctx, "subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+
+	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
+	// broadcast any message for this topic to all channels waiting on it.
+	// Set channel size to 1 to prevent deadlock, see VOL-2708
+	ch := make(chan *ic.InterContainerMessage, 1)
+	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
+
+	return ch, nil
+}
+
+func (kp *interContainerProxy) unSubscribeForResponse(ctx context.Context, trnsId string) error {
+	logger.Debugw(ctx, "unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+	kp.deleteFromTransactionIdToChannelMap(trnsId)
+	return nil
+}
+
+func (kp *interContainerProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableLivenessChannel(ctx, enable)
+}
+
+func (kp *interContainerProxy) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableHealthinessChannel(ctx, enable)
+}
+
+func (kp *interContainerProxy) SendLiveness(ctx context.Context) error {
+	return kp.kafkaClient.SendLiveness(ctx)
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeRequest(ctx context.Context, rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
+	requestHeader := &ic.Header{
+		Id:        uuid.New().String(),
+		Type:      ic.MessageType_REQUEST,
+		FromTopic: replyTopic.Name,
+		ToTopic:   toTopic.Name,
+		KeyTopic:  key,
+		Timestamp: ptypes.TimestampNow(),
+	}
+	requestBody := &ic.InterContainerRequestBody{
+		Rpc:              rpc,
+		ResponseRequired: true,
+		ReplyToTopic:     replyTopic.Name,
+	}
+
+	for _, arg := range kvArgs {
+		if arg == nil {
+			// In case the caller sends an array with empty args
+			continue
+		}
+		var marshalledArg *any.Any
+		var err error
+		// ascertain the value interface type is a proto.Message
+		protoValue, ok := arg.Value.(proto.Message)
+		if !ok {
+			logger.Warnw(ctx, "argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			err := errors.New("argument-value-not-proto-message")
+			return nil, err
+		}
+		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+			return nil, err
+		}
+		protoArg := &ic.Argument{
+			Key:   arg.Key,
+			Value: marshalledArg,
+		}
+		requestBody.Args = append(requestBody.Args, protoArg)
+	}
+
+	var marshalledData *any.Any
+	var err error
+	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+		return nil, err
+	}
+	request := &ic.InterContainerMessage{
+		Header: requestHeader,
+		Body:   marshalledData,
+	}
+	return request, nil
+}
+
+func decodeResponse(ctx context.Context, response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
+	//	Extract the message body
+	responseBody := ic.InterContainerResponseBody{}
+	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		return nil, err
+	}
+	//logger.Debugw(ctx, "response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+
+	return &responseBody, nil
+
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
new file mode 100644
index 0000000..3273470
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
@@ -0,0 +1,1157 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Shopify/sarama"
+	scc "github.com/bsm/sarama-cluster"
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+// consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
+// topic, the consumer(s) broadcasts the message to all the listening channels.   The consumer can be a partition
+//consumer or a group consumer
+type consumerChannels struct {
+	consumers []interface{}
+	channels  []chan *ic.InterContainerMessage
+}
+
+// static check to ensure SaramaClient implements Client
+var _ Client = &SaramaClient{}
+
+// SaramaClient represents the messaging proxy
+type SaramaClient struct {
+	cAdmin                        sarama.ClusterAdmin
+	KafkaAddress                  string
+	producer                      sarama.AsyncProducer
+	consumer                      sarama.Consumer
+	groupConsumers                map[string]*scc.Consumer
+	lockOfGroupConsumers          sync.RWMutex
+	consumerGroupPrefix           string
+	consumerType                  int
+	consumerGroupName             string
+	producerFlushFrequency        int
+	producerFlushMessages         int
+	producerFlushMaxmessages      int
+	producerRetryMax              int
+	producerRetryBackOff          time.Duration
+	producerReturnSuccess         bool
+	producerReturnErrors          bool
+	consumerMaxwait               int
+	maxProcessingTime             int
+	numPartitions                 int
+	numReplicas                   int
+	autoCreateTopic               bool
+	doneCh                        chan int
+	metadataCallback              func(fromTopic string, timestamp time.Time)
+	topicToConsumerChannelMap     map[string]*consumerChannels
+	lockTopicToConsumerChannelMap sync.RWMutex
+	topicLockMap                  map[string]*sync.RWMutex
+	lockOfTopicLockMap            sync.RWMutex
+	metadataMaxRetry              int
+	alive                         bool
+	livenessMutex                 sync.Mutex
+	liveness                      chan bool
+	livenessChannelInterval       time.Duration
+	lastLivenessTime              time.Time
+	started                       bool
+	healthinessMutex              sync.Mutex
+	healthy                       bool
+	healthiness                   chan bool
+}
+
+type SaramaClientOption func(*SaramaClient)
+
+func Address(address string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.KafkaAddress = address
+	}
+}
+
+func ConsumerGroupPrefix(prefix string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerGroupPrefix = prefix
+	}
+}
+
+func ConsumerGroupName(name string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerGroupName = name
+	}
+}
+
+func ConsumerType(consumer int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerType = consumer
+	}
+}
+
+func ProducerFlushFrequency(frequency int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushFrequency = frequency
+	}
+}
+
+func ProducerFlushMessages(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushMessages = num
+	}
+}
+
+func ProducerFlushMaxMessages(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushMaxmessages = num
+	}
+}
+
+func ProducerMaxRetries(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerRetryMax = num
+	}
+}
+
+func ProducerRetryBackoff(duration time.Duration) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerRetryBackOff = duration
+	}
+}
+
+func ProducerReturnOnErrors(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerReturnErrors = opt
+	}
+}
+
+func ProducerReturnOnSuccess(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerReturnSuccess = opt
+	}
+}
+
+func ConsumerMaxWait(wait int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerMaxwait = wait
+	}
+}
+
+func MaxProcessingTime(pTime int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.maxProcessingTime = pTime
+	}
+}
+
+func NumPartitions(number int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.numPartitions = number
+	}
+}
+
+func NumReplicas(number int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.numReplicas = number
+	}
+}
+
+func AutoCreateTopic(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.autoCreateTopic = opt
+	}
+}
+
+func MetadatMaxRetries(retry int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.metadataMaxRetry = retry
+	}
+}
+
+func LivenessChannelInterval(opt time.Duration) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.livenessChannelInterval = opt
+	}
+}
+
+func NewSaramaClient(opts ...SaramaClientOption) *SaramaClient {
+	client := &SaramaClient{
+		KafkaAddress: DefaultKafkaAddress,
+	}
+	client.consumerType = DefaultConsumerType
+	client.producerFlushFrequency = DefaultProducerFlushFrequency
+	client.producerFlushMessages = DefaultProducerFlushMessages
+	client.producerFlushMaxmessages = DefaultProducerFlushMaxmessages
+	client.producerReturnErrors = DefaultProducerReturnErrors
+	client.producerReturnSuccess = DefaultProducerReturnSuccess
+	client.producerRetryMax = DefaultProducerRetryMax
+	client.producerRetryBackOff = DefaultProducerRetryBackoff
+	client.consumerMaxwait = DefaultConsumerMaxwait
+	client.maxProcessingTime = DefaultMaxProcessingTime
+	client.numPartitions = DefaultNumberPartitions
+	client.numReplicas = DefaultNumberReplicas
+	client.autoCreateTopic = DefaultAutoCreateTopic
+	client.metadataMaxRetry = DefaultMetadataMaxRetry
+	client.livenessChannelInterval = DefaultLivenessChannelInterval
+
+	for _, option := range opts {
+		option(client)
+	}
+
+	client.groupConsumers = make(map[string]*scc.Consumer)
+
+	client.lockTopicToConsumerChannelMap = sync.RWMutex{}
+	client.topicLockMap = make(map[string]*sync.RWMutex)
+	client.lockOfTopicLockMap = sync.RWMutex{}
+	client.lockOfGroupConsumers = sync.RWMutex{}
+
+	// healthy and alive until proven otherwise
+	client.alive = true
+	client.healthy = true
+
+	return client
+}
+
+func (sc *SaramaClient) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-kafka-sarama-client")
+
+	// Create the Done channel
+	sc.doneCh = make(chan int, 1)
+
+	var err error
+
+	// Add a cleanup in case of failure to startup
+	defer func() {
+		if err != nil {
+			sc.Stop(ctx)
+		}
+	}()
+
+	// Create the Cluster Admin
+	if err = sc.createClusterAdmin(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-cluster-admin", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the Publisher
+	if err := sc.createPublisher(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-publisher", log.Fields{"error": err})
+		return err
+	}
+
+	if sc.consumerType == DefaultConsumerType {
+		// Create the master consumers
+		if err := sc.createConsumer(ctx); err != nil {
+			logger.Errorw(ctx, "Cannot-create-kafka-consumers", log.Fields{"error": err})
+			return err
+		}
+	}
+
+	// Create the topic to consumers/channel map
+	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
+
+	logger.Info(ctx, "kafka-sarama-client-started")
+
+	sc.started = true
+
+	return nil
+}
+
+func (sc *SaramaClient) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-sarama-client")
+
+	sc.started = false
+
+	//Send a message over the done channel to close all long running routines
+	sc.doneCh <- 1
+
+	if sc.producer != nil {
+		if err := sc.producer.Close(); err != nil {
+			logger.Errorw(ctx, "closing-producer-failed", log.Fields{"error": err})
+		}
+	}
+
+	if sc.consumer != nil {
+		if err := sc.consumer.Close(); err != nil {
+			logger.Errorw(ctx, "closing-partition-consumer-failed", log.Fields{"error": err})
+		}
+	}
+
+	for key, val := range sc.groupConsumers {
+		logger.Debugw(ctx, "closing-group-consumer", log.Fields{"topic": key})
+		if err := val.Close(); err != nil {
+			logger.Errorw(ctx, "closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+		}
+	}
+
+	if sc.cAdmin != nil {
+		if err := sc.cAdmin.Close(); err != nil {
+			logger.Errorw(ctx, "closing-cluster-admin-failed", log.Fields{"error": err})
+		}
+	}
+
+	//TODO: Clear the consumers map
+	//sc.clearConsumerChannelMap()
+
+	logger.Info(ctx, "sarama-client-stopped")
+}
+
+//createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
+// the invoking function must hold the lock
+func (sc *SaramaClient) createTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
+	// Set the topic details
+	topicDetail := &sarama.TopicDetail{}
+	topicDetail.NumPartitions = int32(numPartition)
+	topicDetail.ReplicationFactor = int16(repFactor)
+	topicDetail.ConfigEntries = make(map[string]*string)
+	topicDetails := make(map[string]*sarama.TopicDetail)
+	topicDetails[topic.Name] = topicDetail
+
+	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
+		if err == sarama.ErrTopicAlreadyExists {
+			//	Not an error
+			logger.Debugw(ctx, "topic-already-exist", log.Fields{"topic": topic.Name})
+			return nil
+		}
+		logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err})
+		return err
+	}
+	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
+	// do so.
+	logger.Debugw(ctx, "topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	return nil
+}
+
+//CreateTopic is a public API to create a topic on the Kafka Broker.  It uses a lock on a specific topic to
+// ensure no two go routines are performing operations on the same topic
+func (sc *SaramaClient) CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	return sc.createTopic(ctx, topic, numPartition, repFactor)
+}
+
+//DeleteTopic removes a topic from the kafka Broker
+func (sc *SaramaClient) DeleteTopic(ctx context.Context, topic *Topic) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	// Remove the topic from the broker
+	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
+		if err == sarama.ErrUnknownTopicOrPartition {
+			//	Not an error as does not exist
+			logger.Debugw(ctx, "topic-not-exist", log.Fields{"topic": topic.Name})
+			return nil
+		}
+		logger.Errorw(ctx, "delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		return err
+	}
+
+	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
+	if err := sc.clearTopicFromConsumerChannelMap(ctx, *topic); err != nil {
+		logger.Errorw(ctx, "failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+		return err
+	}
+	return nil
+}
+
+// Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
+// messages from that topic
+func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	logger.Debugw(ctx, "subscribe", log.Fields{"topic": topic.Name})
+
+	// If a consumers already exist for that topic then resuse it
+	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
+		logger.Debugw(ctx, "topic-already-subscribed", log.Fields{"topic": topic.Name})
+		// Create a channel specific for that consumers and add it to the consumers channel map
+		ch := make(chan *ic.InterContainerMessage)
+		sc.addChannelToConsumerChannelMap(ctx, topic, ch)
+		return ch, nil
+	}
+
+	// Register for the topic and set it up
+	var consumerListeningChannel chan *ic.InterContainerMessage
+	var err error
+
+	// Use the consumerType option to figure out the type of consumer to launch
+	if sc.consumerType == PartitionConsumer {
+		if sc.autoCreateTopic {
+			if err = sc.createTopic(ctx, topic, sc.numPartitions, sc.numReplicas); err != nil {
+				logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+				return nil, err
+			}
+		}
+		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(ctx, topic, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+			return nil, err
+		}
+	} else if sc.consumerType == GroupCustomer {
+		// TODO: create topic if auto create is on.  There is an issue with the sarama cluster library that
+		// does not consume from a precreated topic in some scenarios
+		//if sc.autoCreateTopic {
+		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
+		//		logger.Errorw(ctx, "create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
+		//		return nil, err
+		//	}
+		//}
+		//groupId := sc.consumerGroupName
+		groupId := getGroupId(kvArgs...)
+		// Include the group prefix
+		if groupId != "" {
+			groupId = sc.consumerGroupPrefix + groupId
+		} else {
+			// Need to use a unique group Id per topic
+			groupId = sc.consumerGroupPrefix + topic.Name
+		}
+		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(ctx, topic, groupId, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+			return nil, err
+		}
+
+	} else {
+		logger.Warnw(ctx, "unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		return nil, errors.New("unknown-consumer-type")
+	}
+
+	return consumerListeningChannel, nil
+}
+
+//UnSubscribe unsubscribe a consumer from a given topic
+func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	logger.Debugw(ctx, "unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	var err error
+	if err = sc.removeChannelFromConsumerChannelMap(ctx, *topic, ch); err != nil {
+		logger.Errorw(ctx, "failed-removing-channel", log.Fields{"error": err})
+	}
+	if err = sc.deleteFromGroupConsumers(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "failed-deleting-group-consumer", log.Fields{"error": err})
+	}
+	return err
+}
+
+func (sc *SaramaClient) SubscribeForMetadata(ctx context.Context, callback func(fromTopic string, timestamp time.Time)) {
+	sc.metadataCallback = callback
+}
+
+func (sc *SaramaClient) updateLiveness(ctx context.Context, alive bool) {
+	// Post a consistent stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Production of liveness
+	// events to the channel is rate-limited by livenessChannelInterval.
+	sc.livenessMutex.Lock()
+	defer sc.livenessMutex.Unlock()
+	if sc.liveness != nil {
+		if sc.alive != alive {
+			logger.Info(ctx, "update-liveness-channel-because-change")
+			sc.liveness <- alive
+			sc.lastLivenessTime = time.Now()
+		} else if time.Since(sc.lastLivenessTime) > sc.livenessChannelInterval {
+			logger.Info(ctx, "update-liveness-channel-because-interval")
+			sc.liveness <- alive
+			sc.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Only emit a log message when the state changes
+	if sc.alive != alive {
+		logger.Info(ctx, "set-client-alive", log.Fields{"alive": alive})
+		sc.alive = alive
+	}
+}
+
+// Once unhealthy, we never go back
+func (sc *SaramaClient) setUnhealthy(ctx context.Context) {
+	sc.healthy = false
+	sc.healthinessMutex.Lock()
+	defer sc.healthinessMutex.Unlock()
+	if sc.healthiness != nil {
+		logger.Infow(ctx, "set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		sc.healthiness <- sc.healthy
+	}
+}
+
+func (sc *SaramaClient) isLivenessError(ctx context.Context, err error) bool {
+	// Sarama producers and consumers encapsulate the error inside
+	// a ProducerError or ConsumerError struct.
+	if prodError, ok := err.(*sarama.ProducerError); ok {
+		err = prodError.Err
+	} else if consumerError, ok := err.(*sarama.ConsumerError); ok {
+		err = consumerError.Err
+	}
+
+	// Sarama-Cluster will compose the error into a ClusterError struct,
+	// which we can't do a compare by reference. To handle that, we the
+	// best we can do is compare the error strings.
+
+	switch err.Error() {
+	case context.DeadlineExceeded.Error():
+		logger.Info(ctx, "is-liveness-error-timeout")
+		return true
+	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
+		logger.Info(ctx, "is-liveness-error-no-brokers")
+		return true
+	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
+		logger.Info(ctx, "is-liveness-error-shutting-down")
+		return true
+	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
+		logger.Info(ctx, "is-liveness-error-not-available")
+		return true
+	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
+		logger.Info(ctx, "is-liveness-error-circuit-breaker-open")
+		return true
+	}
+
+	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
+		logger.Info(ctx, "is-liveness-error-connection-refused")
+		return true
+	}
+
+	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
+		logger.Info(ctx, "is-liveness-error-io-timeout")
+		return true
+	}
+
+	// Other errors shouldn't trigger a loss of liveness
+
+	logger.Infow(ctx, "is-liveness-error-ignored", log.Fields{"err": err})
+
+	return false
+}
+
+// send formats and sends the request onto the kafka messaging bus.
+func (sc *SaramaClient) Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error {
+
+	// Assert message is a proto message
+	var protoMsg proto.Message
+	var ok bool
+	// ascertain the value interface type is a proto.Message
+	if protoMsg, ok = msg.(proto.Message); !ok {
+		logger.Warnw(ctx, "message-not-proto-message", log.Fields{"msg": msg})
+		return fmt.Errorf("not-a-proto-msg-%s", msg)
+	}
+
+	var marshalled []byte
+	var err error
+	//	Create the Sarama producer message
+	if marshalled, err = proto.Marshal(protoMsg); err != nil {
+		logger.Errorw(ctx, "marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		return err
+	}
+	key := ""
+	if len(keys) > 0 {
+		key = keys[0] // Only the first key is relevant
+	}
+	kafkaMsg := &sarama.ProducerMessage{
+		Topic: topic.Name,
+		Key:   sarama.StringEncoder(key),
+		Value: sarama.ByteEncoder(marshalled),
+	}
+
+	// Send message to kafka
+	sc.producer.Input() <- kafkaMsg
+	// Wait for result
+	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
+	select {
+	case ok := <-sc.producer.Successes():
+		logger.Debugw(ctx, "message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
+	case notOk := <-sc.producer.Errors():
+		logger.Debugw(ctx, "error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
+		}
+		return notOk
+	}
+	return nil
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every publish, which indicates whether
+// or not the channel is still live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (sc *SaramaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-liveness-channel", log.Fields{"enable": enable})
+	if enable {
+		sc.livenessMutex.Lock()
+		defer sc.livenessMutex.Unlock()
+		if sc.liveness == nil {
+			logger.Info(ctx, "kafka-create-liveness-channel")
+			// At least 1, so we can immediately post to it without blocking
+			// Setting a bigger number (10) allows the monitor to fall behind
+			// without blocking others. The monitor shouldn't really fall
+			// behind...
+			sc.liveness = make(chan bool, 10)
+			// post intial state to the channel
+			sc.liveness <- sc.alive
+		}
+	} else {
+		// TODO: Think about whether we need the ability to turn off
+		// liveness monitoring
+		panic("Turning off liveness reporting is not supported")
+	}
+	return sc.liveness
+}
+
+// Enable the Healthiness monitor channel. This channel will report "false"
+// if the kafka consumers die, or some other problem occurs which is
+// catastrophic that would require re-creating the client.
+func (sc *SaramaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+	if enable {
+		sc.healthinessMutex.Lock()
+		defer sc.healthinessMutex.Unlock()
+		if sc.healthiness == nil {
+			logger.Info(ctx, "kafka-create-healthiness-channel")
+			// At least 1, so we can immediately post to it without blocking
+			// Setting a bigger number (10) allows the monitor to fall behind
+			// without blocking others. The monitor shouldn't really fall
+			// behind...
+			sc.healthiness = make(chan bool, 10)
+			// post intial state to the channel
+			sc.healthiness <- sc.healthy
+		}
+	} else {
+		// TODO: Think about whether we need the ability to turn off
+		// liveness monitoring
+		panic("Turning off healthiness reporting is not supported")
+	}
+	return sc.healthiness
+}
+
+// send an empty message on the liveness channel to check whether connectivity has
+// been restored.
+func (sc *SaramaClient) SendLiveness(ctx context.Context) error {
+	if !sc.started {
+		return fmt.Errorf("SendLiveness() called while not started")
+	}
+
+	kafkaMsg := &sarama.ProducerMessage{
+		Topic: "_liveness_test",
+		Value: sarama.StringEncoder(time.Now().Format(time.RFC3339)), // for debugging / informative use
+	}
+
+	// Send message to kafka
+	sc.producer.Input() <- kafkaMsg
+	// Wait for result
+	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
+	select {
+	case ok := <-sc.producer.Successes():
+		logger.Debugw(ctx, "liveness-message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
+	case notOk := <-sc.producer.Errors():
+		logger.Debugw(ctx, "liveness-error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
+		}
+		return notOk
+	}
+	return nil
+}
+
+// getGroupId returns the group id from the key-value args.
+func getGroupId(kvArgs ...*KVArg) string {
+	for _, arg := range kvArgs {
+		if arg.Key == GroupIdKey {
+			return arg.Value.(string)
+		}
+	}
+	return ""
+}
+
+// getOffset returns the offset from the key-value args.
+func getOffset(kvArgs ...*KVArg) int64 {
+	for _, arg := range kvArgs {
+		if arg.Key == Offset {
+			return arg.Value.(int64)
+		}
+	}
+	return sarama.OffsetNewest
+}
+
+func (sc *SaramaClient) createClusterAdmin(ctx context.Context) error {
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+
+	// Create a cluster Admin
+	var cAdmin sarama.ClusterAdmin
+	var err error
+	if cAdmin, err = sarama.NewClusterAdmin([]string{sc.KafkaAddress}, config); err != nil {
+		logger.Errorw(ctx, "cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
+		return err
+	}
+	sc.cAdmin = cAdmin
+	return nil
+}
+
+func (sc *SaramaClient) lockTopic(topic *Topic) {
+	sc.lockOfTopicLockMap.Lock()
+	if _, exist := sc.topicLockMap[topic.Name]; exist {
+		sc.lockOfTopicLockMap.Unlock()
+		sc.topicLockMap[topic.Name].Lock()
+	} else {
+		sc.topicLockMap[topic.Name] = &sync.RWMutex{}
+		sc.lockOfTopicLockMap.Unlock()
+		sc.topicLockMap[topic.Name].Lock()
+	}
+}
+
+func (sc *SaramaClient) unLockTopic(topic *Topic) {
+	sc.lockOfTopicLockMap.Lock()
+	defer sc.lockOfTopicLockMap.Unlock()
+	if _, exist := sc.topicLockMap[topic.Name]; exist {
+		sc.topicLockMap[topic.Name].Unlock()
+	}
+}
+
+func (sc *SaramaClient) addTopicToConsumerChannelMap(id string, arg *consumerChannels) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if _, exist := sc.topicToConsumerChannelMap[id]; !exist {
+		sc.topicToConsumerChannelMap[id] = arg
+	}
+}
+
+func (sc *SaramaClient) getConsumerChannel(topic *Topic) *consumerChannels {
+	sc.lockTopicToConsumerChannelMap.RLock()
+	defer sc.lockTopicToConsumerChannelMap.RUnlock()
+
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		return consumerCh
+	}
+	return nil
+}
+
+func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan *ic.InterContainerMessage) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		consumerCh.channels = append(consumerCh.channels, ch)
+		return
+	}
+	logger.Warnw(ctx, "consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+}
+
+//closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
+func closeConsumers(ctx context.Context, consumers []interface{}) error {
+	var err error
+	for _, consumer := range consumers {
+		//	Is it a partition consumers?
+		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
+			if errTemp := partionConsumer.Close(); errTemp != nil {
+				logger.Debugw(ctx, "partition!!!", log.Fields{"err": errTemp})
+				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
+					// This can occur on race condition
+					err = nil
+				} else {
+					err = errTemp
+				}
+			}
+		} else if groupConsumer, ok := consumer.(*scc.Consumer); ok {
+			if errTemp := groupConsumer.Close(); errTemp != nil {
+				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
+					// This can occur on race condition
+					err = nil
+				} else {
+					err = errTemp
+				}
+			}
+		}
+	}
+	return err
+}
+
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan *ic.InterContainerMessage) error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		// Channel will be closed in the removeChannel method
+		consumerCh.channels = removeChannel(ctx, consumerCh.channels, ch)
+		// If there are no more channels then we can close the consumers itself
+		if len(consumerCh.channels) == 0 {
+			logger.Debugw(ctx, "closing-consumers", log.Fields{"topic": topic})
+			err := closeConsumers(ctx, consumerCh.consumers)
+			//err := consumerCh.consumers.Close()
+			delete(sc.topicToConsumerChannelMap, topic.Name)
+			return err
+		}
+		return nil
+	}
+	logger.Warnw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
+	return errors.New("topic-does-not-exist")
+}
+
+func (sc *SaramaClient) clearTopicFromConsumerChannelMap(ctx context.Context, topic Topic) error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		for _, ch := range consumerCh.channels {
+			// Channel will be closed in the removeChannel method
+			removeChannel(ctx, consumerCh.channels, ch)
+		}
+		err := closeConsumers(ctx, consumerCh.consumers)
+		//if err == sarama.ErrUnknownTopicOrPartition {
+		//	// Not an error
+		//	err = nil
+		//}
+		//err := consumerCh.consumers.Close()
+		delete(sc.topicToConsumerChannelMap, topic.Name)
+		return err
+	}
+	logger.Debugw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
+	return nil
+}
+
+//createPublisher creates the publisher which is used to send a message onto kafka
+func (sc *SaramaClient) createPublisher(ctx context.Context) error {
+	// This Creates the publisher
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+	config.Producer.Partitioner = sarama.NewRandomPartitioner
+	config.Producer.Flush.Frequency = time.Duration(sc.producerFlushFrequency)
+	config.Producer.Flush.Messages = sc.producerFlushMessages
+	config.Producer.Flush.MaxMessages = sc.producerFlushMaxmessages
+	config.Producer.Return.Errors = sc.producerReturnErrors
+	config.Producer.Return.Successes = sc.producerReturnSuccess
+	//config.Producer.RequiredAcks = sarama.WaitForAll
+	config.Producer.RequiredAcks = sarama.WaitForLocal
+
+	brokers := []string{sc.KafkaAddress}
+
+	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
+		logger.Errorw(ctx, "error-starting-publisher", log.Fields{"error": err})
+		return err
+	} else {
+		sc.producer = producer
+	}
+	logger.Info(ctx, "Kafka-publisher-created")
+	return nil
+}
+
+func (sc *SaramaClient) createConsumer(ctx context.Context) error {
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+	config.Consumer.Return.Errors = true
+	config.Consumer.Fetch.Min = 1
+	config.Consumer.MaxWaitTime = time.Duration(sc.consumerMaxwait) * time.Millisecond
+	config.Consumer.MaxProcessingTime = time.Duration(sc.maxProcessingTime) * time.Millisecond
+	config.Consumer.Offsets.Initial = sarama.OffsetNewest
+	config.Metadata.Retry.Max = sc.metadataMaxRetry
+	brokers := []string{sc.KafkaAddress}
+
+	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
+		logger.Errorw(ctx, "error-starting-consumers", log.Fields{"error": err})
+		return err
+	} else {
+		sc.consumer = consumer
+	}
+	logger.Info(ctx, "Kafka-consumers-created")
+	return nil
+}
+
+// createGroupConsumer creates a consumers group
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
+	config := scc.NewConfig()
+	config.Version = sarama.V1_0_0_0
+	config.ClientID = uuid.New().String()
+	config.Group.Mode = scc.ConsumerModeMultiplex
+	config.Consumer.Group.Heartbeat.Interval, _ = time.ParseDuration("1s")
+	config.Consumer.Return.Errors = true
+	//config.Group.Return.Notifications = false
+	//config.Consumer.MaxWaitTime = time.Duration(DefaultConsumerMaxwait) * time.Millisecond
+	//config.Consumer.MaxProcessingTime = time.Duration(DefaultMaxProcessingTime) * time.Millisecond
+	config.Consumer.Offsets.Initial = initialOffset
+	//config.Consumer.Offsets.Initial = sarama.OffsetOldest
+	brokers := []string{sc.KafkaAddress}
+
+	topics := []string{topic.Name}
+	var consumer *scc.Consumer
+	var err error
+
+	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
+		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		return nil, err
+	}
+	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+
+	//sc.groupConsumers[topic.Name] = consumer
+	sc.addToGroupConsumers(topic.Name, consumer)
+	return consumer, nil
+}
+
+// dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
+// topic via the unique channel each subscriber received during subscription
+func (sc *SaramaClient) dispatchToConsumers(consumerCh *consumerChannels, protoMessage *ic.InterContainerMessage) {
+	// Need to go over all channels and publish messages to them - do we need to copy msg?
+	sc.lockTopicToConsumerChannelMap.RLock()
+	for _, ch := range consumerCh.channels {
+		go func(c chan *ic.InterContainerMessage) {
+			c <- protoMessage
+		}(ch)
+	}
+	sc.lockTopicToConsumerChannelMap.RUnlock()
+
+	if callback := sc.metadataCallback; callback != nil {
+		ts, _ := ptypes.Timestamp(protoMessage.Header.Timestamp)
+		callback(protoMessage.Header.FromTopic, ts)
+	}
+}
+
+func (sc *SaramaClient) consumeFromAPartition(ctx context.Context, topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+startloop:
+	for {
+		select {
+		case err, ok := <-consumer.Errors():
+			if ok {
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
+					logger.Warnw(ctx, "partition-consumers-error", log.Fields{"error": err})
+				}
+			} else {
+				// Channel is closed
+				break startloop
+			}
+		case msg, ok := <-consumer.Messages():
+			//logger.Debugw(ctx, "message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			if !ok {
+				// channel is closed
+				break startloop
+			}
+			msgBody := msg.Value
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			icm := &ic.InterContainerMessage{}
+			if err := proto.Unmarshal(msgBody, icm); err != nil {
+				logger.Warnw(ctx, "partition-invalid-message", log.Fields{"error": err})
+				continue
+			}
+			go sc.dispatchToConsumers(consumerChnls, icm)
+		case <-sc.doneCh:
+			logger.Infow(ctx, "partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			break startloop
+		}
+	}
+	logger.Infow(ctx, "partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
+}
+
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+
+startloop:
+	for {
+		select {
+		case err, ok := <-consumer.Errors():
+			if ok {
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
+				}
+				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+			} else {
+				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				// channel is closed
+				break startloop
+			}
+		case msg, ok := <-consumer.Messages():
+			if !ok {
+				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				// Channel closed
+				break startloop
+			}
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			msgBody := msg.Value
+			icm := &ic.InterContainerMessage{}
+			if err := proto.Unmarshal(msgBody, icm); err != nil {
+				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
+				continue
+			}
+			go sc.dispatchToConsumers(consumerChnls, icm)
+			consumer.MarkOffset(msg, "")
+		case ntf := <-consumer.Notifications():
+			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
+		case <-sc.doneCh:
+			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
+			break startloop
+		}
+	}
+	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
+}
+
+func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
+	logger.Debugw(ctx, "starting-consumers", log.Fields{"topic": topic.Name})
+	var consumerCh *consumerChannels
+	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
+		logger.Errorw(ctx, "consumers-not-exist", log.Fields{"topic": topic.Name})
+		return errors.New("consumers-not-exist")
+	}
+	// For each consumer listening for that topic, start a consumption loop
+	for _, consumer := range consumerCh.consumers {
+		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
+			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
+		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
+			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
+		} else {
+			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
+			return errors.New("invalid-consumer")
+		}
+	}
+	return nil
+}
+
+//// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
+//// for that topic.  It also starts the routine that listens for messages on that topic.
+func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+	var pConsumers []sarama.PartitionConsumer
+	var err error
+
+	if pConsumers, err = sc.createPartitionConsumers(ctx, topic, initialOffset); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+
+	consumersIf := make([]interface{}, 0)
+	for _, pConsumer := range pConsumers {
+		consumersIf = append(consumersIf, pConsumer)
+	}
+
+	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
+	// unbuffered to verify race conditions.
+	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	cc := &consumerChannels{
+		consumers: consumersIf,
+		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+	}
+
+	// Add the consumers channel to the map
+	sc.addTopicToConsumerChannelMap(topic.Name, cc)
+
+	//Start a consumers to listen on that specific topic
+	go func() {
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
+				"topic": topic,
+				"error": err})
+		}
+	}()
+
+	return consumerListeningChannel, nil
+}
+
+// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
+// for that topic.  It also starts the routine that listens for messages on that topic.
+func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+	// TODO:  Replace this development partition consumers with a group consumers
+	var pConsumer *scc.Consumer
+	var err error
+	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
+	// unbuffered to verify race conditions.
+	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	cc := &consumerChannels{
+		consumers: []interface{}{pConsumer},
+		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+	}
+
+	// Add the consumers channel to the map
+	sc.addTopicToConsumerChannelMap(topic.Name, cc)
+
+	//Start a consumers to listen on that specific topic
+	go func() {
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
+				"topic": topic,
+				"error": err})
+		}
+	}()
+
+	return consumerListeningChannel, nil
+}
+
+func (sc *SaramaClient) createPartitionConsumers(ctx context.Context, topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
+	logger.Debugw(ctx, "creating-partition-consumers", log.Fields{"topic": topic.Name})
+	partitionList, err := sc.consumer.Partitions(topic.Name)
+	if err != nil {
+		logger.Warnw(ctx, "get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+
+	pConsumers := make([]sarama.PartitionConsumer, 0)
+	for _, partition := range partitionList {
+		var pConsumer sarama.PartitionConsumer
+		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
+			logger.Warnw(ctx, "consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			return nil, err
+		}
+		pConsumers = append(pConsumers, pConsumer)
+	}
+	return pConsumers, nil
+}
+
+func removeChannel(ctx context.Context, channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+	var i int
+	var channel chan *ic.InterContainerMessage
+	for i, channel = range channels {
+		if channel == ch {
+			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
+			close(channel)
+			logger.Debug(ctx, "channel-closed")
+			return channels[:len(channels)-1]
+		}
+	}
+	return channels
+}
+
+func (sc *SaramaClient) addToGroupConsumers(topic string, consumer *scc.Consumer) {
+	sc.lockOfGroupConsumers.Lock()
+	defer sc.lockOfGroupConsumers.Unlock()
+	if _, exist := sc.groupConsumers[topic]; !exist {
+		sc.groupConsumers[topic] = consumer
+	}
+}
+
+func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
+	sc.lockOfGroupConsumers.Lock()
+	defer sc.lockOfGroupConsumers.Unlock()
+	if _, exist := sc.groupConsumers[topic]; exist {
+		consumer := sc.groupConsumers[topic]
+		delete(sc.groupConsumers, topic)
+		if err := consumer.Close(); err != nil {
+			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
new file mode 100644
index 0000000..bdc615f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"github.com/golang/protobuf/ptypes/any"
+	"strings"
+)
+
+const (
+	TopicSeparator = "_"
+	DeviceIdLength = 24
+)
+
+// A Topic definition - may be augmented with additional attributes eventually
+type Topic struct {
+	// The name of the topic. It must start with a letter,
+	// and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+	// underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+	// signs (`%`).
+	Name string
+}
+
+type KVArg struct {
+	Key   string
+	Value interface{}
+}
+
+type RpcMType int
+
+const (
+	RpcFormattingError RpcMType = iota
+	RpcSent
+	RpcReply
+	RpcTimeout
+	RpcTransportError
+	RpcSystemClosing
+)
+
+type RpcResponse struct {
+	MType RpcMType
+	Err   error
+	Reply *any.Any
+}
+
+func NewResponse(messageType RpcMType, err error, body *any.Any) *RpcResponse {
+	return &RpcResponse{
+		MType: messageType,
+		Err:   err,
+		Reply: body,
+	}
+}
+
+// TODO:  Remove and provide better may to get the device id
+// GetDeviceIdFromTopic extract the deviceId from the topic name.  The topic name is formatted either as:
+//			<any string> or <any string>_<deviceId>.  The device Id is 24 characters long.
+func GetDeviceIdFromTopic(topic Topic) string {
+	pos := strings.LastIndex(topic.Name, TopicSeparator)
+	if pos == -1 {
+		return ""
+	}
+	adjustedPos := pos + len(TopicSeparator)
+	if adjustedPos >= len(topic.Name) {
+		return ""
+	}
+	deviceId := topic.Name[adjustedPos:len(topic.Name)]
+	if len(deviceId) != DeviceIdLength {
+		return ""
+	}
+	return deviceId
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
new file mode 100644
index 0000000..b0ce81b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package log
+
+var logger CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = RegisterPackage(JSON, ErrorLevel, Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
new file mode 100644
index 0000000..7b1a123
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
@@ -0,0 +1,662 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+// 1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+// 2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+// 3. Dynamic log level change - for a given package (SetPackageLogLevel)
+// 4. Provides a default logger for unregistered packages (however avoid its usage)
+// 5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+// 6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach).  In the examples below, log refers to this log package.
+//
+// 1. In the appropriate package, add the following in the init section of the package (usually in a common.go file)
+//    The log level can be changed and any number of default fields can be added as well. The log level specifies
+//    the lowest log level that will be in the output while the fields will be automatically added to all log printouts.
+//    However, as voltha components re-initialize the log level of each registered package to default initial loglevel
+//    passed as CLI argument, the log level passed in RegisterPackage call effectively has no effect.
+//
+//    var logger log.CLogger
+//    func init() {
+//              logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"key1": "value1"})
+//    }
+//
+// 2. In the calling package, use any of the publicly available functions of local package-level logger instance created
+//    in previous step.  Here is an example to write an Info log with additional fields:
+//
+//    logger.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+// 3. To dynamically change the log level, you can use
+//          a) SetLogLevel from inside your package or
+//          b) SetPackageLogLevel from anywhere or
+//          c) SetAllLogLevel from anywhere.
+//
+//    Dynamic Loglevel configuration feature also uses SetPackageLogLevel method based on triggers received due to
+//    Changes to configured loglevels
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"path"
+	"runtime"
+	"strings"
+
+	zp "go.uber.org/zap"
+	zc "go.uber.org/zap/zapcore"
+)
+
+type LogLevel int8
+
+const (
+	// DebugLevel logs a message at debug level
+	DebugLevel = LogLevel(iota)
+	// InfoLevel logs a message at info level
+	InfoLevel
+	// WarnLevel logs a message at warning level
+	WarnLevel
+	// ErrorLevel logs a message at error level
+	ErrorLevel
+	// FatalLevel logs a message, then calls os.Exit(1).
+	FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Context Aware Logger represents an abstract logging interface.  Any logging implementation used
+// will need to abide by this interface
+type CLogger interface {
+	Debug(context.Context, ...interface{})
+	Debugln(context.Context, ...interface{})
+	Debugf(context.Context, string, ...interface{})
+	Debugw(context.Context, string, Fields)
+
+	Info(context.Context, ...interface{})
+	Infoln(context.Context, ...interface{})
+	Infof(context.Context, string, ...interface{})
+	Infow(context.Context, string, Fields)
+
+	Warn(context.Context, ...interface{})
+	Warnln(context.Context, ...interface{})
+	Warnf(context.Context, string, ...interface{})
+	Warnw(context.Context, string, Fields)
+
+	Error(context.Context, ...interface{})
+	Errorln(context.Context, ...interface{})
+	Errorf(context.Context, string, ...interface{})
+	Errorw(context.Context, string, Fields)
+
+	Fatal(context.Context, ...interface{})
+	Fatalln(context.Context, ...interface{})
+	Fatalf(context.Context, string, ...interface{})
+	Fatalw(context.Context, string, Fields)
+
+	With(Fields) CLogger
+
+	// The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+	//
+	Warning(context.Context, ...interface{})
+	Warningln(context.Context, ...interface{})
+	Warningf(context.Context, string, ...interface{})
+
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l LogLevel) bool
+
+	//Returns the log level of this specific logger
+	GetLogLevel() LogLevel
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *clogger
+var cfg zp.Config
+
+var loggers map[string]*clogger
+var cfgs map[string]zp.Config
+
+type clogger struct {
+	log         *zp.SugaredLogger
+	parent      *zp.Logger
+	packageName string
+}
+
+func logLevelToAtomicLevel(l LogLevel) zp.AtomicLevel {
+	switch l {
+	case DebugLevel:
+		return zp.NewAtomicLevelAt(zc.DebugLevel)
+	case InfoLevel:
+		return zp.NewAtomicLevelAt(zc.InfoLevel)
+	case WarnLevel:
+		return zp.NewAtomicLevelAt(zc.WarnLevel)
+	case ErrorLevel:
+		return zp.NewAtomicLevelAt(zc.ErrorLevel)
+	case FatalLevel:
+		return zp.NewAtomicLevelAt(zc.FatalLevel)
+	}
+	return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func logLevelToLevel(l LogLevel) zc.Level {
+	switch l {
+	case DebugLevel:
+		return zc.DebugLevel
+	case InfoLevel:
+		return zc.InfoLevel
+	case WarnLevel:
+		return zc.WarnLevel
+	case ErrorLevel:
+		return zc.ErrorLevel
+	case FatalLevel:
+		return zc.FatalLevel
+	}
+	return zc.ErrorLevel
+}
+
+func levelToLogLevel(l zc.Level) LogLevel {
+	switch l {
+	case zc.DebugLevel:
+		return DebugLevel
+	case zc.InfoLevel:
+		return InfoLevel
+	case zc.WarnLevel:
+		return WarnLevel
+	case zc.ErrorLevel:
+		return ErrorLevel
+	case zc.FatalLevel:
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func StringToLogLevel(l string) (LogLevel, error) {
+	switch strings.ToUpper(l) {
+	case "DEBUG":
+		return DebugLevel, nil
+	case "INFO":
+		return InfoLevel, nil
+	case "WARN":
+		return WarnLevel, nil
+	case "ERROR":
+		return ErrorLevel, nil
+	case "FATAL":
+		return FatalLevel, nil
+	}
+	return 0, errors.New("Given LogLevel is invalid : " + l)
+}
+
+func LogLevelToString(l LogLevel) (string, error) {
+	switch l {
+	case DebugLevel:
+		return "DEBUG", nil
+	case InfoLevel:
+		return "INFO", nil
+	case WarnLevel:
+		return "WARN", nil
+	case ErrorLevel:
+		return "ERROR", nil
+	case FatalLevel:
+		return "FATAL", nil
+	}
+	return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+}
+
+func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
+	return zp.Config{
+		Level:            logLevelToAtomicLevel(level),
+		Encoding:         outputType,
+		Development:      true,
+		OutputPaths:      []string{"stdout"},
+		ErrorOutputPaths: []string{"stderr"},
+		InitialFields:    defaultFields,
+		EncoderConfig: zc.EncoderConfig{
+			LevelKey:       "level",
+			MessageKey:     "msg",
+			TimeKey:        "ts",
+			CallerKey:      "caller",
+			StacktraceKey:  "stacktrace",
+			LineEnding:     zc.DefaultLineEnding,
+			EncodeLevel:    zc.LowercaseLevelEncoder,
+			EncodeTime:     zc.ISO8601TimeEncoder,
+			EncodeDuration: zc.SecondsDurationEncoder,
+			EncodeCaller:   zc.ShortCallerEncoder,
+		},
+	}
+}
+
+func ConstructZapConfig(outputType string, level LogLevel, fields Fields) zp.Config {
+	return getDefaultConfig(outputType, level, fields)
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked.  This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level LogLevel, defaultFields Fields) (CLogger, error) {
+	// Build a custom config using zap
+	cfg = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	defaultLogger = &clogger{
+		log:    l.Sugar(),
+		parent: l,
+	}
+
+	return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map.  Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger.  If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func RegisterPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (CLogger, error) {
+	if cfgs == nil {
+		cfgs = make(map[string]zp.Config)
+	}
+	if loggers == nil {
+		loggers = make(map[string]*clogger)
+	}
+
+	var pkgName string
+	for _, name := range pkgNames {
+		pkgName = name
+		break
+	}
+	if pkgName == "" {
+		pkgName, _, _, _ = getCallerInfo()
+	}
+
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName], nil
+	}
+
+	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfgs[pkgName].Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	loggers[pkgName] = &clogger{
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
+	}
+	return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+	for pkgName, cfg := range cfgs {
+		for k, v := range defaultFields {
+			if cfg.InitialFields == nil {
+				cfg.InitialFields = make(map[string]interface{})
+			}
+			cfg.InitialFields[k] = v
+		}
+		l, err := cfg.Build(zp.AddCallerSkip(1))
+		if err != nil {
+			return err
+		}
+
+		// Update the existing zap logger instance
+		loggers[pkgName].log = l.Sugar()
+		loggers[pkgName].parent = l
+	}
+	return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+	i := 0
+	keys := make([]string, len(loggers))
+	for k := range loggers {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// UpdateLogger updates the logger associated with a caller's package with supplied defaultFields
+func UpdateLogger(defaultFields Fields) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; !exist {
+		return fmt.Errorf("package-%s-not-registered", pkgName)
+	}
+
+	// Build a new logger
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("config-%s-not-registered", pkgName)
+	}
+
+	cfg := cfgs[pkgName]
+	for k, v := range defaultFields {
+		if cfg.InitialFields == nil {
+			cfg.InitialFields = make(map[string]interface{})
+		}
+		cfg.InitialFields[k] = v
+	}
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return err
+	}
+
+	// Update the existing zap logger instance
+	loggers[pkgName].log = l.Sugar()
+	loggers[pkgName].parent = l
+
+	return nil
+}
+
+func setLevel(cfg zp.Config, level LogLevel) {
+	switch level {
+	case DebugLevel:
+		cfg.Level.SetLevel(zc.DebugLevel)
+	case InfoLevel:
+		cfg.Level.SetLevel(zc.InfoLevel)
+	case WarnLevel:
+		cfg.Level.SetLevel(zc.WarnLevel)
+	case ErrorLevel:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	case FatalLevel:
+		cfg.Level.SetLevel(zc.FatalLevel)
+	default:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	}
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level.  This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level LogLevel) {
+	// Get proper config
+	if cfg, ok := cfgs[packageName]; ok {
+		setLevel(cfg, level)
+	}
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level LogLevel) {
+	// Get proper config
+	for _, cfg := range cfgs {
+		setLevel(cfg, level)
+	}
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (LogLevel, error) {
+	var name string
+	if len(packageName) == 1 {
+		name = packageName[0]
+	} else {
+		name, _, _, _ = getCallerInfo()
+	}
+	if cfg, ok := cfgs[name]; ok {
+		return levelToLogLevel(cfg.Level.Level()), nil
+	}
+	return 0, fmt.Errorf("unknown-package-%s", name)
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() LogLevel {
+	return levelToLogLevel(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level LogLevel) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("unregistered-package-%s", pkgName)
+	}
+	cfg := cfgs[pkgName]
+	setLevel(cfg, level)
+	return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level LogLevel) {
+	setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+	for _, logger := range loggers {
+		if logger != nil {
+			if logger.parent != nil {
+				if err := logger.parent.Sync(); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	if defaultLogger != nil {
+		if defaultLogger.parent != nil {
+			if err := defaultLogger.parent.Sync(); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+	// Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+	// filename, then first look for the last log.go filename and then grab the caller info one level higher.
+	maxLevel := 3
+	skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+	pc := make([]uintptr, maxLevel)
+	n := runtime.Callers(skiplevel, pc)
+	packageName := ""
+	funcName := ""
+	fileName := ""
+	var line int
+	if n == 0 {
+		return packageName, fileName, funcName, line
+	}
+	frames := runtime.CallersFrames(pc[:n])
+	var frame runtime.Frame
+	var foundFrame runtime.Frame
+	more := true
+	for more {
+		frame, more = frames.Next()
+		_, fileName = path.Split(frame.File)
+		if fileName != "log.go" {
+			foundFrame = frame // First frame after log.go in the frame stack
+			break
+		}
+	}
+	parts := strings.Split(foundFrame.Function, ".")
+	pl := len(parts)
+	if pl >= 2 {
+		funcName = parts[pl-1]
+		if parts[pl-2][0] == '(' {
+			packageName = strings.Join(parts[0:pl-2], ".")
+		} else {
+			packageName = strings.Join(parts[0:pl-1], ".")
+		}
+	}
+
+	if strings.HasSuffix(packageName, ".init") {
+		packageName = strings.TrimSuffix(packageName, ".init")
+	}
+
+	if strings.HasSuffix(fileName, ".go") {
+		fileName = strings.TrimSuffix(fileName, ".go")
+	}
+
+	return packageName, fileName, funcName, foundFrame.Line
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l clogger) With(keysAndValues Fields) CLogger {
+	return clogger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l clogger) Debug(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l clogger) Debugln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l clogger) Debugf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Debugw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(DebugLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l clogger) Info(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l clogger) Infoln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+	//msg := fmt.Sprintln(args...)
+	//l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l clogger) Infof(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Infow(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(InfoLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l clogger) Warn(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warnln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l clogger) Warnf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Warnw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(WarnLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l clogger) Error(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l clogger) Errorln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l clogger) Errorf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Errorw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(ErrorLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l clogger) Fatal(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l clogger) Fatalln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l clogger) Fatalf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Fatalw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(FatalLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l clogger) Warning(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warningln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l clogger) Warningf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l clogger) V(level LogLevel) bool {
+	return l.parent.Core().Enabled(logLevelToLevel(level))
+}
+
+// GetLogLevel returns the current level of the logger
+func (l clogger) GetLogLevel() LogLevel {
+	return levelToLogLevel(cfgs[l.packageName].Level.Level())
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
new file mode 100644
index 0000000..82c3d7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// File contains utility functions to support Open Tracing in conjunction with
+// Enhanced Logging based on context propagation
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opentracing/opentracing-go"
+	jtracing "github.com/uber/jaeger-client-go"
+	jcfg "github.com/uber/jaeger-client-go/config"
+	"io"
+	"os"
+	"strings"
+	"sync"
+)
+
+const (
+	RootSpanNameKey = "op-name"
+)
+
+// Global Settings governing the Log Correlation and Tracing features. Should only
+// be updated through the exposed public methods
+type LogFeaturesManager struct {
+	isTracePublishingEnabled bool
+	isLogCorrelationEnabled  bool
+	componentName            string // Name of component extracted from ENV variable
+	activeTraceAgentAddress  string
+	lock                     sync.Mutex
+}
+
+var globalLFM *LogFeaturesManager = &LogFeaturesManager{}
+
+func GetGlobalLFM() *LogFeaturesManager {
+	return globalLFM
+}
+
+// A Wrapper to utilize currently Active Tracer instance. The middleware library being used for generating
+// Spans for GRPC API calls does not support dynamically setting the Active Tracer similar to the SetGlobalTracer method
+// provided by OpenTracing API
+type ActiveTracerProxy struct {
+}
+
+func (atw ActiveTracerProxy) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
+	return opentracing.GlobalTracer().StartSpan(operationName, opts...)
+}
+
+func (atw ActiveTracerProxy) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	return opentracing.GlobalTracer().Inject(sm, format, carrier)
+}
+
+func (atw ActiveTracerProxy) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+	return opentracing.GlobalTracer().Extract(format, carrier)
+}
+
+// Jaeger complaint Logger instance to redirect logs to Default Logger
+type traceLogger struct {
+	logger *clogger
+}
+
+func (tl traceLogger) Error(msg string) {
+	tl.logger.Error(context.Background(), msg)
+}
+
+func (tl traceLogger) Infof(msg string, args ...interface{}) {
+	// Tracing logs should be performed only at Debug Verbosity
+	tl.logger.Debugf(context.Background(), msg, args...)
+}
+
+// Wrapper to handle correct Closer call at the time of Process Termination
+type traceCloser struct {
+}
+
+func (c traceCloser) Close() error {
+	currentActiveTracer := opentracing.GlobalTracer()
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+
+	return nil
+}
+
+// Method to Initialize Jaeger based Tracing client based on initial status of Tracing Publish and Log Correlation
+func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
+	lfm.componentName = os.Getenv("COMPONENT_NAME")
+	if lfm.componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	// Use NoopTracer when both Tracing Publishing and Log Correlation are disabled
+	if !tracePublishEnabled && !logCorrelationEnabled {
+		logger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
+		lfm.isTracePublishingEnabled = false
+		lfm.isLogCorrelationEnabled = false
+		opentracing.SetGlobalTracer(opentracing.NoopTracer{})
+		return traceCloser{}, nil
+	}
+
+	tracer, _, err := lfm.constructJaegerTracer(tracePublishEnabled, traceAgentAddress, true)
+	if err != nil {
+		return nil, err
+	}
+
+	// Initialize variables representing Active Status
+	opentracing.SetGlobalTracer(tracer)
+	lfm.isTracePublishingEnabled = tracePublishEnabled
+	lfm.activeTraceAgentAddress = traceAgentAddress
+	lfm.isLogCorrelationEnabled = logCorrelationEnabled
+	return traceCloser{}, nil
+}
+
+// Method to replace Active Tracer along with graceful closer of previous tracer
+func (lfm *LogFeaturesManager) replaceActiveTracer(tracer opentracing.Tracer) {
+	currentActiveTracer := opentracing.GlobalTracer()
+	opentracing.SetGlobalTracer(tracer)
+
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+}
+
+func (lfm *LogFeaturesManager) GetLogCorrelationStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isLogCorrelationEnabled
+}
+
+func (lfm *LogFeaturesManager) SetLogCorrelationStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isLogCorrelationEnabled {
+		logger.Debugf(context.Background(), "Ignoring Log Correlation Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Log Correlation has been enabled and current active tracer is a NoopTracer instance.
+		// Continue using the earlier tracer instance in case of any error
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); ok {
+			tracer, _, err := lfm.constructJaegerTracer(lfm.isTracePublishingEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Log Correlation Enable operation failed with error: %s", err.Error())
+				return
+			}
+
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isLogCorrelationEnabled = true
+		logger.Info(context.Background(), "Log Correlation has been enabled")
+
+	} else {
+		// Switch to NoopTracer when Log Correlation has been disabled and Tracing Publish is already disabled
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); !ok && !lfm.isTracePublishingEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		}
+
+		lfm.isLogCorrelationEnabled = false
+		logger.Info(context.Background(), "Log Correlation has been disabled")
+	}
+}
+
+func (lfm *LogFeaturesManager) GetTracePublishingStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isTracePublishingEnabled
+}
+
+func (lfm *LogFeaturesManager) SetTracePublishingStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isTracePublishingEnabled {
+		logger.Debugf(context.Background(), "Ignoring Trace Publishing Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Tracing Publish has been enabled (even if a Jaeger instance is already active)
+		// This is needed to ensure that a fresh lookup of Jaeger Agent address is performed again while performing
+		// Disable-Enable of Tracing
+		tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+		if err != nil {
+			logger.Warnf(context.Background(), "Trace Publishing Enable operation failed with error: %s", err.Error())
+			return
+		}
+		lfm.replaceActiveTracer(tracer)
+
+		lfm.isTracePublishingEnabled = true
+		logger.Info(context.Background(), "Tracing Publishing has been enabled")
+	} else {
+		// Switch to NoopTracer when Tracing Publish has been disabled and Log Correlation is already disabled
+		if !lfm.isLogCorrelationEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		} else {
+			// Else construct a new Jaeger Instance with publishing disabled
+			tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Trace Publishing Disable operation failed with error: %s", err.Error())
+				return
+			}
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isTracePublishingEnabled = false
+		logger.Info(context.Background(), "Tracing Publishing has been disabled")
+	}
+}
+
+// Method to contruct a new Jaeger Tracer instance based on given Trace Agent address and Publish status.
+// The last attribute indicates whether to use Loopback IP for creating Jaeger Client when the DNS lookup
+// of supplied Trace Agent address has failed. It is fine to fallback during the initialization step, but
+// not later (when enabling/disabling the status dynamically)
+func (lfm *LogFeaturesManager) constructJaegerTracer(tracePublishEnabled bool, traceAgentAddress string, fallbackToLoopbackAllowed bool) (opentracing.Tracer, io.Closer, error) {
+	cfg := jcfg.Configuration{ServiceName: lfm.componentName}
+
+	var err error
+	var jReporterConfig jcfg.ReporterConfig
+	var jReporterCfgOption jtracing.Reporter
+
+	logger.Info(context.Background(), "Constructing new Jaeger Tracer instance")
+
+	// Attempt Trace Agent Address first; will fallback to Loopback IP if it fails
+	jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
+	jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+
+	if err != nil {
+		if !fallbackToLoopbackAllowed {
+			return nil, nil, errors.New("Reporter Creation for given Trace Agent address " + traceAgentAddress + " failed with error : " + err.Error())
+		}
+
+		logger.Infow(context.Background(), "Unable to create Reporter with given Trace Agent address",
+			Fields{"error": err, "address": traceAgentAddress})
+		// The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
+		// It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
+		// Thus, falback to use loopback IP for Reporter initialization before throwing back any error
+		tracePublishEnabled = false
+
+		jReporterConfig.LocalAgentHostPort = "127.0.0.1:6831"
+		jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+		if err != nil {
+			return nil, nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
+		}
+	}
+
+	// To start with, we are using Constant Sampling type
+	samplerParam := 0 // 0: Do not publish span, 1: Publish
+	if tracePublishEnabled {
+		samplerParam = 1
+	}
+	jSamplerConfig := jcfg.SamplerConfig{Type: "const", Param: float64(samplerParam)}
+	jSamplerCfgOption, err := jSamplerConfig.NewSampler(lfm.componentName, jtracing.NewNullMetrics())
+	if err != nil {
+		return nil, nil, errors.New("Unable to create Sampler : " + err.Error())
+	}
+
+	return cfg.NewTracer(jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
+}
+
+func TerminateTracing(c io.Closer) {
+	err := c.Close()
+	if err != nil {
+		logger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
+	}
+}
+
+// Extracts details of Execution Context as log fields from the Tracing Span injected into the
+// context instance. Following log fields are extracted:
+// 1. Operation Name : key as 'op-name' and value as Span operation name
+// 2. Operation Id : key as 'op-id' and value as 64 bit Span Id in hex digits string
+//
+// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
+//
+// If no Span is found associated with context, blank slice is returned without any log fields
+func (lfm *LogFeaturesManager) ExtractContextAttributes(ctx context.Context) []interface{} {
+	if !lfm.isLogCorrelationEnabled {
+		return make([]interface{}, 0)
+	}
+
+	attrMap := make(map[string]interface{})
+
+	if ctx != nil {
+		if span := opentracing.SpanFromContext(ctx); span != nil {
+			if jspan, ok := span.(*jtracing.Span); ok {
+				// Add Log fields for operation identified by Root Level Span (Trace)
+				opId := fmt.Sprintf("%016x", jspan.SpanContext().TraceID().Low) // Using Sprintf to avoid removal of leading 0s
+				opName := jspan.BaggageItem(RootSpanNameKey)
+
+				taskId := fmt.Sprintf("%016x", uint64(jspan.SpanContext().SpanID())) // Using Sprintf to avoid removal of leading 0s
+				taskName := jspan.OperationName()
+
+				if opName == "" {
+					span.SetBaggageItem(RootSpanNameKey, taskName)
+					opName = taskName
+				}
+
+				attrMap["op-id"] = opId
+				attrMap["op-name"] = opName
+
+				// Add Log fields for task identified by Current Span, if it is different
+				// than operation
+				if taskId != opId {
+					attrMap["task-id"] = taskId
+					attrMap["task-name"] = taskName
+				}
+
+				for k, v := range jspan.Tags() {
+					// Ignore the special tags added by Jaeger, middleware (sampler.type, span.*) present in the span
+					if strings.HasPrefix(k, "sampler.") || strings.HasPrefix(k, "span.") || k == "component" {
+						continue
+					}
+
+					attrMap[k] = v
+				}
+
+				processBaggageItems := func(k, v string) bool {
+					if k != "rpc-span-name" {
+						attrMap[k] = v
+					}
+					return true
+				}
+
+				jspan.SpanContext().ForeachBaggageItem(processBaggageItems)
+			}
+		}
+	}
+
+	return serializeMap(attrMap)
+}
+
+// Method to inject additional log fields into Span e.g. device-id
+func EnrichSpan(ctx context.Context, keyAndValues ...Fields) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		if jspan, ok := span.(*jtracing.Span); ok {
+			// Inject as a BaggageItem when the Span is the Root Span so that it propagates
+			// across the components along with Root Span (called as Trace)
+			// Else, inject as a Tag so that it is attached to the Child Task
+			isRootSpan := false
+			if jspan.SpanContext().TraceID().String() == jspan.SpanContext().SpanID().String() {
+				isRootSpan = true
+			}
+
+			for _, field := range keyAndValues {
+				for k, v := range field {
+					if isRootSpan {
+						span.SetBaggageItem(k, v.(string))
+					} else {
+						span.SetTag(k, v)
+					}
+				}
+			}
+		}
+	}
+}
+
+// Method to inject Error into the Span in event of any operation failure
+func MarkSpanError(ctx context.Context, err error) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		span.SetTag("error", true)
+		span.SetTag("err", err)
+	}
+}
+
+// Creates a Child Span from Parent Span embedded in passed context. Should be used before starting a new major
+// operation in Synchronous or Asynchronous mode (go routine), such as following:
+// 1. Start of all implemented External API methods unless using a interceptor for auto-injection of Span (Server side impl)
+// 2. Just before calling an Third-Party lib which is invoking a External API (etcd, kafka)
+// 3. In start of a Go Routine responsible for performing a major task involving significant duration
+// 4. Any method which is suspected to be time consuming...
+func CreateChildSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+	childSpan, newCtx := opentracing.StartSpanFromContext(ctx, taskName)
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		childSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return childSpan, newCtx
+}
+
+// Creates a Async Child Span with Follows-From relationship from Parent Span embedded in passed context.
+// Should be used only in scenarios when
+// a) There is dis-continuation in execution and thus result of Child span does not affect the Parent flow at all
+// b) The execution of Child Span is guaranteed to start after the completion of Parent Span
+// In case of any confusion, use CreateChildSpan method
+// Some situations where this method would be suitable includes Kafka Async RPC call, Propagation of Event across
+// a channel etc.
+func CreateAsyncSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	var asyncSpan opentracing.Span
+	var newCtx context.Context
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+
+	// We should always be creating Aysnc span from a Valid parent span. If not, create a Child span instead
+	if parentSpan == nil {
+		logger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(ctx, taskName)
+	} else {
+		// Use Background context as the base for Follows-from case; else new span is getting both Child and FollowsFrom relationship
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(context.Background(), taskName, opentracing.FollowsFrom(parentSpan.Context()))
+	}
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		asyncSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return asyncSpan, newCtx
+}
+
+// Extracts the span from Source context and injects into the supplied Target context.
+// This should be used in situations wherein we are calling a time-sensitive operation (etcd update) and hence
+// had a context.Background() used earlier to avoid any cancellation/timeout of operation by passed context.
+// This will allow propagation of span with a different base context (and not the original context)
+func WithSpanFromContext(targetCtx, sourceCtx context.Context) context.Context {
+	span := opentracing.SpanFromContext(sourceCtx)
+	return opentracing.ContextWithSpan(targetCtx, span)
+}
+
+// Utility method to convert log Fields into array of interfaces expected by zap logger methods
+func serializeMap(fields Fields) []interface{} {
+	data := make([]interface{}, len(fields)*2)
+	i := 0
+	for k, v := range fields {
+		data[i] = k
+		data[i+1] = v
+		i = i + 2
+	}
+	return data
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/common.go
new file mode 100644
index 0000000..e058e48
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package meters
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/meter_utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/meter_utils.go
new file mode 100644
index 0000000..d220c0b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/meter_utils.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package meters
+
+import (
+	"context"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
+	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+)
+
+// GetTrafficShapingInfo returns CIR,PIR and GIR values
+func GetTrafficShapingInfo(ctx context.Context, meterConfig *ofp.OfpMeterConfig) (*tp_pb.TrafficShapingInfo, error) {
+	switch meterBandSize := len(meterConfig.Bands); {
+	case meterBandSize == 1:
+		band := meterConfig.Bands[0]
+		if band.BurstSize == 0 { // GIR, tcont type 1
+			return &tp_pb.TrafficShapingInfo{Gir: band.Rate}, nil
+		}
+		return &tp_pb.TrafficShapingInfo{Pir: band.Rate, Pbs: band.BurstSize}, nil // PIR, tcont type 4
+	case meterBandSize == 2:
+		firstBand, secondBand := meterConfig.Bands[0], meterConfig.Bands[1]
+		if firstBand.BurstSize == 0 && secondBand.BurstSize == 0 &&
+			firstBand.Rate == secondBand.Rate { // PIR == GIR, tcont type 1
+			return &tp_pb.TrafficShapingInfo{Pir: firstBand.Rate, Gir: secondBand.Rate}, nil
+		}
+		if firstBand.BurstSize > 0 && secondBand.BurstSize > 0 { // PIR, CIR, tcont type 2 or 3
+			if firstBand.Rate > secondBand.Rate { // always PIR >= CIR
+				return &tp_pb.TrafficShapingInfo{Pir: firstBand.Rate, Pbs: firstBand.BurstSize, Cir: secondBand.Rate, Cbs: secondBand.BurstSize}, nil
+			}
+			return &tp_pb.TrafficShapingInfo{Pir: secondBand.Rate, Pbs: secondBand.BurstSize, Cir: firstBand.Rate, Cbs: firstBand.BurstSize}, nil
+		}
+	case meterBandSize == 3: // PIR,CIR,GIR, tcont type 5
+		var count, girIndex int
+		for i, band := range meterConfig.Bands {
+			if band.BurstSize == 0 { // find GIR
+				count = count + 1
+				girIndex = i
+			}
+		}
+		if count == 1 {
+			bands := make([]*ofp.OfpMeterBandHeader, len(meterConfig.Bands))
+			copy(bands, meterConfig.Bands)
+			pirCirBands := append(bands[:girIndex], bands[girIndex+1:]...)
+			firstBand, secondBand := pirCirBands[0], pirCirBands[1]
+			if firstBand.Rate > secondBand.Rate {
+				return &tp_pb.TrafficShapingInfo{Pir: firstBand.Rate, Pbs: firstBand.BurstSize, Cir: secondBand.Rate, Cbs: secondBand.BurstSize, Gir: meterConfig.Bands[girIndex].Rate}, nil
+			}
+			return &tp_pb.TrafficShapingInfo{Pir: secondBand.Rate, Pbs: secondBand.BurstSize, Cir: firstBand.Rate, Cbs: firstBand.BurstSize, Gir: meterConfig.Bands[girIndex].Rate}, nil
+		}
+	default:
+		logger.Errorw(ctx, "invalid-meter-config", log.Fields{"meter-config": meterConfig})
+		return nil, fmt.Errorf("invalid-meter-config: %v", meterConfig)
+	}
+	return nil, fmt.Errorf("invalid-meter-config: %v", meterConfig)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics/performance_metrics.go
new file mode 100644
index 0000000..699e8f0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics/performance_metrics.go
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package pmmetrics
+
+import (
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+// PmMetrics structure holds metric and device info
+type PmMetrics struct {
+	deviceID          string
+	frequency         uint32
+	grouped           bool
+	frequencyOverride bool
+	metrics           map[string]*voltha.PmConfig
+}
+
+type PmMetricsOption func(*PmMetrics)
+
+// Frequency is to poll stats at this interval
+func Frequency(frequency uint32) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequency = frequency
+	}
+}
+
+// GetSubscriberMetrics will return the metrics subscribed for the device.
+func (pm *PmMetrics) GetSubscriberMetrics() map[string]*voltha.PmConfig {
+	if pm == nil {
+		return nil
+	}
+	return pm.metrics
+}
+
+func Grouped(grouped bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.grouped = grouped
+	}
+}
+
+func FrequencyOverride(frequencyOverride bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequencyOverride = frequencyOverride
+	}
+}
+
+// UpdateFrequency will update the frequency.
+func (pm *PmMetrics) UpdateFrequency(frequency uint32) {
+	pm.frequency = frequency
+}
+
+// Metrics will store the PMMetric params
+func Metrics(pmNames []string) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.metrics = make(map[string]*voltha.PmConfig)
+		for _, name := range pmNames {
+			args.metrics[name] = &voltha.PmConfig{
+				Name:    name,
+				Type:    voltha.PmConfig_COUNTER,
+				Enabled: true,
+			}
+		}
+	}
+}
+
+// NewPmMetrics will return the pmmetric object
+func NewPmMetrics(deviceID string, opts ...PmMetricsOption) *PmMetrics {
+	pm := &PmMetrics{}
+	pm.deviceID = deviceID
+	for _, option := range opts {
+		option(pm)
+	}
+	return pm
+}
+
+// ToPmConfigs will enable the defined pmmetric
+func (pm *PmMetrics) ToPmConfigs() *voltha.PmConfigs {
+	pmConfigs := &voltha.PmConfigs{
+		Id:           pm.deviceID,
+		DefaultFreq:  pm.frequency,
+		Grouped:      pm.grouped,
+		FreqOverride: pm.frequencyOverride,
+	}
+	for _, v := range pm.metrics {
+		pmConfigs.Metrics = append(pmConfigs.Metrics, &voltha.PmConfig{Name: v.Name, Type: v.Type, Enabled: v.Enabled})
+	}
+	return pmConfigs
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/common.go
new file mode 100644
index 0000000..76207a0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ponresourcemanager
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/ponresourcemanager.go
new file mode 100644
index 0000000..804a6f3
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/ponresourcemanager.go
@@ -0,0 +1,1367 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ponresourcemanager
+
+import (
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+
+	bitmap "github.com/boljen/go-bitmap"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	tp "github.com/opencord/voltha-lib-go/v5/pkg/techprofile"
+)
+
+const (
+	//Constants to identify resource pool
+	UNI_ID     = "UNI_ID"
+	ONU_ID     = "ONU_ID"
+	ALLOC_ID   = "ALLOC_ID"
+	GEMPORT_ID = "GEMPORT_ID"
+	FLOW_ID    = "FLOW_ID"
+
+	//Constants for passing command line arugments
+	OLT_MODEL_ARG = "--olt_model"
+
+	PATH_PREFIX = "%s/resource_manager/{%s}"
+
+	/*The path under which configuration data is stored is defined as technology/device agnostic.
+	  That means the path does not include any specific technology/device variable. Using technology/device
+	  agnostic path also makes northbound applications, that need to write to this path,
+	  technology/device agnostic.
+
+	  Default kv client of PonResourceManager reads from/writes to PATH_PREFIX defined above.
+	  That is why, an additional kv client (named KVStoreForConfig) is defined to read from the config path.
+	*/
+
+	PATH_PREFIX_FOR_CONFIG = "%s/resource_manager/config"
+	/*The resource ranges for a given device model should be placed
+	  at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+	  path on the KV store.
+	  If Resource Range parameters are to be read from the external KV store,
+	  they are expected to be stored in the following format.
+	  Note: All parameters are MANDATORY for now.
+	  constants used as keys to reference the resource range parameters from
+	  and external KV store.
+	*/
+	UNI_ID_START_IDX      = "uni_id_start"
+	UNI_ID_END_IDX        = "uni_id_end"
+	ONU_ID_START_IDX      = "onu_id_start"
+	ONU_ID_END_IDX        = "onu_id_end"
+	ONU_ID_SHARED_IDX     = "onu_id_shared"
+	ALLOC_ID_START_IDX    = "alloc_id_start"
+	ALLOC_ID_END_IDX      = "alloc_id_end"
+	ALLOC_ID_SHARED_IDX   = "alloc_id_shared"
+	GEMPORT_ID_START_IDX  = "gemport_id_start"
+	GEMPORT_ID_END_IDX    = "gemport_id_end"
+	GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+	FLOW_ID_START_IDX     = "flow_id_start"
+	FLOW_ID_END_IDX       = "flow_id_end"
+	FLOW_ID_SHARED_IDX    = "flow_id_shared"
+	NUM_OF_PON_PORT       = "pon_ports"
+
+	/*
+	   The KV store backend is initialized with a path prefix and we need to
+	   provide only the suffix.
+	*/
+	PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
+
+	//resource path suffix
+	//Path on the KV store for storing alloc id ranges and resource pool for a given interface
+	//Format: <device_id>/alloc_id_pool/<pon_intf_id>
+	ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
+	//Path on the KV store for storing gemport id ranges and resource pool for a given interface
+	//Format: <device_id>/gemport_id_pool/<pon_intf_id>
+	GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
+	//Path on the KV store for storing onu id ranges and resource pool for a given interface
+	//Format: <device_id>/onu_id_pool/<pon_intf_id>
+	ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
+	//Path on the KV store for storing flow id ranges and resource pool for a given interface
+	//Format: <device_id>/flow_id_pool/<pon_intf_id>
+	FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
+
+	//Path on the KV store for storing list of alloc IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+	ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
+
+	//Path on the KV store for storing list of gemport IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+	GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
+
+	//Path on the KV store for storing list of Flow IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+	FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
+
+	//Flow Id info: Use to store more metadata associated with the flow_id
+	FLOW_ID_INFO_PATH_PREFIX = "{%s}/flow_id_info"
+	//Format: <device_id>/flow_id_info/<(pon_intf_id, onu_id)>
+	FLOW_ID_INFO_PATH_INTF_ONU_PREFIX = "{%s}/flow_id_info/{%s}"
+	//Format: <device_id>/flow_id_info/<(pon_intf_id, onu_id)><flow_id>
+	FLOW_ID_INFO_PATH = FLOW_ID_INFO_PATH_PREFIX + "/{%s}/{%d}"
+
+	//Constants for internal usage.
+	PON_INTF_ID     = "pon_intf_id"
+	START_IDX       = "start_idx"
+	END_IDX         = "end_idx"
+	POOL            = "pool"
+	NUM_OF_PON_INTF = 16
+
+	KVSTORE_RETRY_TIMEOUT = 5 * time.Second
+	//Path on the KV store for storing reserved gem ports
+	//Format: reserved_gemport_ids
+	RESERVED_GEMPORT_IDS_PATH = "reserved_gemport_ids"
+)
+
+//type ResourceTypeIndex string
+//type ResourceType string
+
+type PONResourceManager struct {
+	//Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
+	Technology       string
+	DeviceType       string
+	DeviceID         string
+	Backend          string // ETCD only currently
+	Address          string // address of the KV store
+	OLTModel         string
+	KVStore          *db.Backend
+	KVStoreForConfig *db.Backend
+	TechProfileMgr   tp.TechProfileIf // create object of *tp.TechProfileMgr
+
+	// Below attribute, pon_resource_ranges, should be initialized
+	// by reading from KV store.
+	PonResourceRanges  map[string]interface{}
+	SharedResourceMgrs map[string]*PONResourceManager
+	SharedIdxByType    map[string]string
+	IntfIDs            []uint32 // list of pon interface IDs
+	Globalorlocal      string
+}
+
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+	logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
+	switch storeType {
+	case "etcd":
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func SetKVClient(ctx context.Context, Technology string, Backend string, Addr string, configClient bool, basePathKvStore string) *db.Backend {
+	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+	// issue between kv store and backend , core is not calling NewBackend directly
+	kvClient, err := newKVClient(ctx, Backend, Addr, KVSTORE_RETRY_TIMEOUT)
+	if err != nil {
+		logger.Fatalw(ctx, "Failed to init KV client\n", log.Fields{"err": err})
+		return nil
+	}
+
+	var pathPrefix string
+	if configClient {
+		pathPrefix = fmt.Sprintf(PATH_PREFIX_FOR_CONFIG, basePathKvStore)
+	} else {
+		pathPrefix = fmt.Sprintf(PATH_PREFIX, basePathKvStore, Technology)
+	}
+
+	kvbackend := &db.Backend{
+		Client:     kvClient,
+		StoreType:  Backend,
+		Address:    Addr,
+		Timeout:    KVSTORE_RETRY_TIMEOUT,
+		PathPrefix: pathPrefix}
+
+	return kvbackend
+}
+
+// NewPONResourceManager creates a new PON resource manager.
+func NewPONResourceManager(ctx context.Context, Technology string, DeviceType string, DeviceID string, Backend string, Address string, basePathKvStore string) (*PONResourceManager, error) {
+	var PONMgr PONResourceManager
+	PONMgr.Technology = Technology
+	PONMgr.DeviceType = DeviceType
+	PONMgr.DeviceID = DeviceID
+	PONMgr.Backend = Backend
+	PONMgr.Address = Address
+	PONMgr.KVStore = SetKVClient(ctx, Technology, Backend, Address, false, basePathKvStore)
+	if PONMgr.KVStore == nil {
+		logger.Error(ctx, "KV Client initilization failed")
+		return nil, errors.New("Failed to init KV client")
+	}
+	// init kv client to read from the config path
+	PONMgr.KVStoreForConfig = SetKVClient(ctx, Technology, Backend, Address, true, basePathKvStore)
+	if PONMgr.KVStoreForConfig == nil {
+		logger.Error(ctx, "KV Config Client initilization failed")
+		return nil, errors.New("Failed to init KV Config client")
+	}
+	// Initialize techprofile for this technology
+	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(ctx, &PONMgr, Backend, Address, basePathKvStore); PONMgr.TechProfileMgr == nil {
+		logger.Error(ctx, "Techprofile initialization failed")
+		return nil, errors.New("Failed to init tech profile")
+	}
+	PONMgr.PonResourceRanges = make(map[string]interface{})
+	PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
+	PONMgr.SharedIdxByType = make(map[string]string)
+	PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
+	PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
+	PONMgr.OLTModel = DeviceType
+	return &PONMgr, nil
+}
+
+/*
+  Initialize PON resource ranges with config fetched from kv store.
+  return boolean: True if PON resource ranges initialized else false
+  Try to initialize the PON Resource Ranges from KV store based on the
+  OLT model key, if available
+*/
+
+func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore(ctx context.Context) bool {
+	//Initialize PON resource ranges with config fetched from kv store.
+	//:return boolean: True if PON resource ranges initialized else false
+	// Try to initialize the PON Resource Ranges from KV store based on the
+	// OLT model key, if available
+	if PONRMgr.OLTModel == "" {
+		logger.Error(ctx, "Failed to get OLT model")
+		return false
+	}
+	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
+	//get resource from kv store
+	Result, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err != nil {
+		logger.Debugf(ctx, "Error in fetching resource %s from KV strore", Path)
+		return false
+	}
+	if Result == nil {
+		logger.Debug(ctx, "There may be no resources in the KV store in case of fresh bootup, return true")
+		return false
+	}
+	//update internal ranges from kv ranges. If there are missing
+	// values in the KV profile, continue to use the defaults
+	Value, err := ToByte(Result.Value)
+	if err != nil {
+		logger.Error(ctx, "Failed to convert kvpair to byte string")
+		return false
+	}
+	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
+		logger.Error(ctx, "Failed to Unmarshal json byte")
+		return false
+	}
+	logger.Debug(ctx, "Init resource ranges from kvstore success")
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateRanges(ctx context.Context, StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+	SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
+	/*
+	   Update the ranges for all reosurce type in the intermnal maps
+	   param: resource type start index
+	   param: start ID
+	   param: resource type end index
+	   param: end ID
+	   param: resource type shared index
+	   param: shared pool id
+	   param: global resource manager
+	*/
+	logger.Debugf(ctx, "update ranges for %s, %d", StartIDx, StartID)
+
+	if StartID != 0 {
+		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+			PONRMgr.PonResourceRanges[StartIDx] = StartID
+		}
+	}
+	if EndID != 0 {
+		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+			PONRMgr.PonResourceRanges[EndIDx] = EndID
+		}
+	}
+	//if SharedPoolID != 0 {
+	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
+	//}
+	if RMgr != nil {
+		PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
+	}
+}
+
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ctx context.Context,
+	ONUIDStart uint32,
+	ONUIDEnd uint32,
+	ONUIDSharedPoolID uint32,
+	AllocIDStart uint32,
+	AllocIDEnd uint32,
+	AllocIDSharedPoolID uint32,
+	GEMPortIDStart uint32,
+	GEMPortIDEnd uint32,
+	GEMPortIDSharedPoolID uint32,
+	FlowIDStart uint32,
+	FlowIDEnd uint32,
+	FlowIDSharedPoolID uint32,
+	UNIIDStart uint32,
+	UNIIDEnd uint32,
+	NoOfPONPorts uint32,
+	IntfIDs []uint32) bool {
+
+	/*Initialize default PON resource ranges
+
+	  :param onu_id_start_idx: onu id start index
+	  :param onu_id_end_idx: onu id end index
+	  :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+	  :param alloc_id_start_idx: alloc id start index
+	  :param alloc_id_end_idx: alloc id end index
+	  :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+	  :param gemport_id_start_idx: gemport id start index
+	  :param gemport_id_end_idx: gemport id end index
+	  :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+	  :param flow_id_start_idx: flow id start index
+	  :param flow_id_end_idx: flow id end index
+	  :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+	  :param num_of_pon_ports: number of PON ports
+	  :param intf_ids: interfaces serviced by this manager
+	*/
+	PONRMgr.UpdateRanges(ctx, ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ctx, UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+	logger.Debug(ctx, "Initialize default range values")
+	var i uint32
+	if IntfIDs == nil {
+		for i = 0; i < NoOfPONPorts; i++ {
+			PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
+		}
+	} else {
+		PONRMgr.IntfIDs = IntfIDs
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) InitDeviceResourcePool(ctx context.Context) error {
+
+	//Initialize resource pool for all PON ports.
+
+	logger.Debug(ctx, "Init resource ranges")
+
+	var err error
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ONU_ID,
+			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+			logger.Error(ctx, "Failed to init ONU ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ALLOC_ID,
+			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+			logger.Error(ctx, "Failed to init ALLOC ID resource pool ")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, GEMPORT_ID,
+			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+			logger.Error(ctx, "Failed to init GEMPORT ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, FLOW_ID,
+			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+			logger.Error(ctx, "Failed to init FLOW ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) InitDeviceResourcePoolForIntf(ctx context.Context, intfID uint32) error {
+
+	logger.Debug(ctx, "Init resource ranges for intf %d", intfID)
+
+	var err error
+
+	if err = PONRMgr.InitResourceIDPool(ctx, intfID, ONU_ID,
+		PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+		PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+		logger.Error(ctx, "Failed to init ONU ID resource pool")
+		return err
+	}
+
+	if err = PONRMgr.InitResourceIDPool(ctx, intfID, ALLOC_ID,
+		PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+		PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+		logger.Error(ctx, "Failed to init ALLOC ID resource pool ")
+		return err
+	}
+
+	if err = PONRMgr.InitResourceIDPool(ctx, intfID, GEMPORT_ID,
+		PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+		PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+		logger.Error(ctx, "Failed to init GEMPORT ID resource pool")
+		return err
+	}
+
+	if err = PONRMgr.InitResourceIDPool(ctx, intfID, FLOW_ID,
+		PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+		PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+		logger.Error(ctx, "Failed to init FLOW ID resource pool")
+		return err
+	}
+
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearDeviceResourcePool(ctx context.Context) error {
+
+	//Clear resource pool for all PON ports.
+
+	logger.Debug(ctx, "Clear resource ranges")
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ONU_ID); !status {
+			logger.Error(ctx, "Failed to clear ONU ID resource pool")
+			return errors.New("Failed to clear ONU ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ALLOC_ID); !status {
+			logger.Error(ctx, "Failed to clear ALLOC ID resource pool ")
+			return errors.New("Failed to clear ALLOC ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, GEMPORT_ID); !status {
+			logger.Error(ctx, "Failed to clear GEMPORT ID resource pool")
+			return errors.New("Failed to clear GEMPORT ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, FLOW_ID); !status {
+			logger.Error(ctx, "Failed to clear FLOW ID resource pool")
+			return errors.New("Failed to clear FLOW ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearDeviceResourcePoolForIntf(ctx context.Context, intfID uint32) error {
+
+	logger.Debugf(ctx, "Clear resource ranges for intf %d", intfID)
+
+	if status := PONRMgr.ClearResourceIDPool(ctx, intfID, ONU_ID); !status {
+		logger.Error(ctx, "Failed to clear ONU ID resource pool")
+		return errors.New("Failed to clear ONU ID resource pool")
+	}
+
+	if status := PONRMgr.ClearResourceIDPool(ctx, intfID, ALLOC_ID); !status {
+		logger.Error(ctx, "Failed to clear ALLOC ID resource pool ")
+		return errors.New("Failed to clear ALLOC ID resource pool")
+	}
+
+	if status := PONRMgr.ClearResourceIDPool(ctx, intfID, GEMPORT_ID); !status {
+		logger.Error(ctx, "Failed to clear GEMPORT ID resource pool")
+		return errors.New("Failed to clear GEMPORT ID resource pool")
+	}
+
+	if status := PONRMgr.ClearResourceIDPool(ctx, intfID, FLOW_ID); !status {
+		logger.Error(ctx, "Failed to clear FLOW ID resource pool")
+		return errors.New("Failed to clear FLOW ID resource pool")
+	}
+
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) InitResourceIDPool(ctx context.Context, Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
+
+	/*Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+	  :param pon_intf_id: OLT PON interface id
+	  :param resource_type: String to identify type of resource
+	  :param start_idx: start index for onu id pool
+	  :param end_idx: end index for onu id pool
+	  :return boolean: True if resource id pool initialized else false
+	*/
+
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.InitResourceIDPool(ctx, Intf, ResourceType, StartID, EndID)
+	}
+
+	Path := PONRMgr.GetPath(ctx, Intf, ResourceType)
+	if Path == "" {
+		logger.Errorf(ctx, "Failed to get path for resource type %s", ResourceType)
+		return fmt.Errorf("Failed to get path for resource type %s", ResourceType)
+	}
+
+	//In case of adapter reboot and reconciliation resource in kv store
+	//checked for its presence if not kv store update happens
+	Res, err := PONRMgr.GetResource(ctx, Path)
+	if (err == nil) && (Res != nil) {
+		logger.Debugf(ctx, "Resource %s already present in store ", Path)
+		return nil
+	} else {
+		var excluded []uint32
+		if ResourceType == GEMPORT_ID {
+			//get gem port ids defined in the KV store, if any, and exclude them from the gem port id pool
+			if reservedGemPortIds, defined := PONRMgr.getReservedGemPortIdsFromKVStore(ctx); defined {
+				excluded = reservedGemPortIds
+				logger.Debugw(ctx, "Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
+			}
+		}
+		FormatResult, err := PONRMgr.FormatResource(ctx, Intf, StartID, EndID, excluded)
+		if err != nil {
+			logger.Errorf(ctx, "Failed to format resource")
+			return err
+		}
+		// Add resource as json in kv store.
+		err = PONRMgr.KVStore.Put(ctx, Path, FormatResult)
+		if err == nil {
+			logger.Debug(ctx, "Successfuly posted to kv store")
+			return err
+		}
+	}
+
+	logger.Debug(ctx, "Error initializing pool")
+
+	return err
+}
+
+func (PONRMgr *PONResourceManager) getReservedGemPortIdsFromKVStore(ctx context.Context) ([]uint32, bool) {
+	var reservedGemPortIds []uint32
+	// read reserved gem ports from the config path
+	KvPair, err := PONRMgr.KVStoreForConfig.Get(ctx, RESERVED_GEMPORT_IDS_PATH)
+	if err != nil {
+		logger.Errorw(ctx, "Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
+		return reservedGemPortIds, false
+	}
+	if KvPair == nil || KvPair.Value == nil {
+		//no reserved gem port defined in the store
+		return reservedGemPortIds, false
+	}
+	Val, err := kvstore.ToByte(KvPair.Value)
+	if err != nil {
+		logger.Errorw(ctx, "Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
+		return reservedGemPortIds, false
+	}
+	if err = json.Unmarshal(Val, &reservedGemPortIds); err != nil {
+		logger.Errorw(ctx, "Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
+		return reservedGemPortIds, false
+	}
+	return reservedGemPortIds, true
+}
+
+func (PONRMgr *PONResourceManager) FormatResource(ctx context.Context, IntfID uint32, StartIDx uint32, EndIDx uint32,
+	Excluded []uint32) ([]byte, error) {
+	/*
+	   Format resource as json.
+	   :param pon_intf_id: OLT PON interface id
+	   :param start_idx: start index for id pool
+	   :param end_idx: end index for id pool
+	   :Id values to be Excluded from the pool
+	   :return dictionary: resource formatted as map
+	*/
+	// Format resource as json to be stored in backend store
+	Resource := make(map[string]interface{})
+	Resource[PON_INTF_ID] = IntfID
+	Resource[START_IDX] = StartIDx
+	Resource[END_IDX] = EndIDx
+	/*
+	   Resource pool stored in backend store as binary string.
+	   Tracking the resource allocation will be done by setting the bits \
+	   in the byte array. The index set will be the resource number allocated.
+	*/
+	var TSData *bitmap.Threadsafe
+	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
+		logger.Error(ctx, "Failed to create a bitmap")
+		return nil, errors.New("Failed to create bitmap")
+	}
+	for _, excludedID := range Excluded {
+		if excludedID < StartIDx || excludedID > EndIDx {
+			logger.Warnf(ctx, "Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
+				StartIDx, EndIDx)
+			continue
+		}
+		PONRMgr.reserveID(ctx, TSData, StartIDx, excludedID)
+	}
+	Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
+
+	Value, err := json.Marshal(Resource)
+	if err != nil {
+		logger.Errorf(ctx, "Failed to marshall resource")
+		return nil, err
+	}
+	return Value, err
+}
+func (PONRMgr *PONResourceManager) GetResource(ctx context.Context, Path string) (map[string]interface{}, error) {
+	/*
+	   Get resource from kv store.
+
+	   :param path: path to get resource
+	   :return: resource if resource present in kv store else None
+	*/
+	//get resource from kv store
+
+	var Value []byte
+	Result := make(map[string]interface{})
+	var Str string
+
+	Resource, err := PONRMgr.KVStore.Get(ctx, Path)
+	if (err != nil) || (Resource == nil) {
+		logger.Debugf(ctx, "Resource  unavailable at %s", Path)
+		return nil, err
+	}
+
+	Value, err = ToByte(Resource.Value)
+	if err != nil {
+		return nil, err
+	}
+
+	// decode resource fetched from backend store to dictionary
+	err = json.Unmarshal(Value, &Result)
+	if err != nil {
+		logger.Error(ctx, "Failed to decode resource")
+		return Result, err
+	}
+	/*
+	   resource pool in backend store stored as binary string whereas to
+	   access the pool to generate/release IDs it need to be converted
+	   as BitArray
+	*/
+	Str, err = ToString(Result[POOL])
+	if err != nil {
+		logger.Error(ctx, "Failed to conver to kv pair to string")
+		return Result, err
+	}
+	Decode64, _ := base64.StdEncoding.DecodeString(Str)
+	Result[POOL], err = ToByte(Decode64)
+	if err != nil {
+		logger.Error(ctx, "Failed to convert resource pool to byte")
+		return Result, err
+	}
+
+	return Result, err
+}
+
+func (PONRMgr *PONResourceManager) GetPath(ctx context.Context, IntfID uint32, ResourceType string) string {
+	/*
+	   Get path for given resource type.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :return: path for given resource type
+	*/
+
+	/*
+	   Get the shared pool for the given resource type.
+	   all the resource ranges and the shared resource maps are initialized during the init.
+	*/
+	SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
+	if SharedPoolID != 0 {
+		IntfID = SharedPoolID
+	}
+	var Path string
+	if ResourceType == ONU_ID {
+		Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == ALLOC_ID {
+		Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == GEMPORT_ID {
+		Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == FLOW_ID {
+		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else {
+		logger.Error(ctx, "Invalid resource pool identifier")
+	}
+	return Path
+}
+
+func (PONRMgr *PONResourceManager) GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
+	/*
+	   Create alloc/gemport/onu/flow id for given OLT PON interface.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :param num_of_id: required number of ids
+	   :return list/uint32/None: list, uint32 or None if resource type is
+	    alloc_id/gemport_id, onu_id or invalid type respectively
+	*/
+
+	logger.Debugw(ctx, "getting-resource-id", log.Fields{
+		"intf-id":       IntfID,
+		"resource-type": ResourceType,
+		"num":           NumIDs,
+	})
+
+	if NumIDs < 1 {
+		logger.Error(ctx, "Invalid number of resources requested")
+		return nil, fmt.Errorf("Invalid number of resources requested %d", NumIDs)
+	}
+	// delegate to the master instance if sharing enabled across instances
+
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
+	}
+	logger.Debugf(ctx, "Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+
+	Path := PONRMgr.GetPath(ctx, IntfID, ResourceType)
+	if Path == "" {
+		logger.Errorf(ctx, "Failed to get path for resource type %s", ResourceType)
+		return nil, fmt.Errorf("Failed to get path for resource type %s", ResourceType)
+	}
+	logger.Debugf(ctx, "Get resource for type %s on path %s", ResourceType, Path)
+	var Result []uint32
+	var NextID uint32
+	Resource, err := PONRMgr.GetResource(ctx, Path)
+	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
+		if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
+			logger.Error(ctx, "Failed to Generate ID")
+			return Result, err
+		}
+		Result = append(Result, NextID)
+	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
+		if NumIDs == 1 {
+			if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
+				logger.Error(ctx, "Failed to Generate ID")
+				return Result, err
+			}
+			Result = append(Result, NextID)
+		} else {
+			for NumIDs > 0 {
+				if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
+					logger.Error(ctx, "Failed to Generate ID")
+					return Result, err
+				}
+				Result = append(Result, NextID)
+				NumIDs--
+			}
+		}
+	} else {
+		logger.Error(ctx, "get resource failed")
+		return Result, err
+	}
+
+	//Update resource in kv store
+	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		return nil, fmt.Errorf("Failed to update resource %s", Path)
+	}
+	return Result, nil
+}
+
+func checkValidResourceType(ResourceType string) bool {
+	KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
+
+	for _, v := range KnownResourceTypes {
+		if v == ResourceType {
+			return true
+		}
+	}
+	return false
+}
+
+func (PONRMgr *PONResourceManager) FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error {
+	/*
+	  Release alloc/gemport/onu/flow id for given OLT PON interface.
+	  :param pon_intf_id: OLT PON interface id
+	  :param resource_type: String to identify type of resource
+	  :param release_content: required number of ids
+	  :return boolean: True if all IDs in given release_content release else False
+	*/
+
+	logger.Debugw(ctx, "freeing-resource-id", log.Fields{
+		"intf-id":         IntfID,
+		"resource-type":   ResourceType,
+		"release-content": ReleaseContent,
+	})
+
+	if !checkValidResourceType(ResourceType) {
+		err := fmt.Errorf("Invalid resource type: %s", ResourceType)
+		logger.Error(ctx, err.Error())
+		return err
+	}
+	if ReleaseContent == nil {
+		err := fmt.Errorf("Nothing to release")
+		logger.Debug(ctx, err.Error())
+		return err
+	}
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
+	}
+	Path := PONRMgr.GetPath(ctx, IntfID, ResourceType)
+	if Path == "" {
+		err := fmt.Errorf("Failed to get path for IntfId %d and ResourceType %s", IntfID, ResourceType)
+		logger.Error(ctx, err.Error())
+		return err
+	}
+	Resource, err := PONRMgr.GetResource(ctx, Path)
+	if err != nil {
+		logger.Error(ctx, err.Error())
+		return err
+	}
+	for _, Val := range ReleaseContent {
+		PONRMgr.ReleaseID(ctx, Resource, Val)
+	}
+	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
+		err := fmt.Errorf("Free resource for %s failed", Path)
+		logger.Errorf(ctx, err.Error())
+		return err
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) UpdateResource(ctx context.Context, Path string, Resource map[string]interface{}) error {
+	/*
+	   Update resource in resource kv store.
+	   :param path: path to update resource
+	   :param resource: resource need to be updated
+	   :return boolean: True if resource updated in kv store else False
+	*/
+	// TODO resource[POOL] = resource[POOL].bin
+	Value, err := json.Marshal(Resource)
+	if err != nil {
+		logger.Error(ctx, "failed to Marshal")
+		return err
+	}
+	err = PONRMgr.KVStore.Put(ctx, Path, Value)
+	if err != nil {
+		logger.Error(ctx, "failed to put data to kv store %s", Path)
+		return err
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearResourceIDPool(ctx context.Context, contIntfID uint32, ResourceType string) bool {
+	/*
+	   Clear Resource Pool for a given Resource Type on a given PON Port.
+	   :return boolean: True if removed else False
+	*/
+
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.ClearResourceIDPool(ctx, contIntfID, ResourceType)
+	}
+	Path := PONRMgr.GetPath(ctx, contIntfID, ResourceType)
+	if Path == "" {
+		logger.Error(ctx, "Failed to get path")
+		return false
+	}
+
+	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
+		logger.Errorf(ctx, "Failed to delete resource %s", Path)
+		return false
+	}
+	logger.Debugf(ctx, "Cleared resource %s", Path)
+	return true
+}
+
+func (PONRMgr PONResourceManager) InitResourceMap(ctx context.Context, PONIntfONUID string) {
+	/*
+	   Initialize resource map
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	// initialize pon_intf_onu_id tuple to alloc_ids map
+	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	var AllocIDs []byte
+	Result := PONRMgr.KVStore.Put(ctx, AllocIDPath, AllocIDs)
+	if Result != nil {
+		logger.Error(ctx, "Failed to update the KV store")
+		return
+	}
+	// initialize pon_intf_onu_id tuple to gemport_ids map
+	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	var GEMPortIDs []byte
+	Result = PONRMgr.KVStore.Put(ctx, GEMPortIDPath, GEMPortIDs)
+	if Result != nil {
+		logger.Error(ctx, "Failed to update the KV store")
+		return
+	}
+}
+
+func (PONRMgr PONResourceManager) RemoveResourceMap(ctx context.Context, PONIntfONUID string) bool {
+	/*
+	   Remove resource map
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	// remove pon_intf_onu_id tuple to alloc_ids map
+	var err error
+	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	if err = PONRMgr.KVStore.Delete(ctx, AllocIDPath); err != nil {
+		logger.Errorf(ctx, "Failed to remove resource %s", AllocIDPath)
+		return false
+	}
+	// remove pon_intf_onu_id tuple to gemport_ids map
+	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	err = PONRMgr.KVStore.Delete(ctx, GEMPortIDPath)
+	if err != nil {
+		logger.Errorf(ctx, "Failed to remove resource %s", GEMPortIDPath)
+		return false
+	}
+
+	FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	if FlowIDs, err := PONRMgr.KVStore.List(ctx, FlowIDPath); err != nil {
+		for _, Flow := range FlowIDs {
+			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
+			if err = PONRMgr.KVStore.Delete(ctx, FlowIDInfoPath); err != nil {
+				logger.Errorf(ctx, "Failed to remove resource %s", FlowIDInfoPath)
+				return false
+			}
+		}
+	}
+
+	if err = PONRMgr.KVStore.Delete(ctx, FlowIDPath); err != nil {
+		logger.Errorf(ctx, "Failed to remove resource %s", FlowIDPath)
+		return false
+	}
+
+	return true
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(ctx context.Context, IntfONUID string) []uint32 {
+	/*
+	   Get currently configured alloc ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of alloc_ids if available, else None
+	*/
+	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, err := ToByte(Value.Value)
+			if err != nil {
+				logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": err})
+				return Data
+			}
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				logger.Error(ctx, "Failed to unmarshal", log.Fields{"error": err})
+				return Data
+			}
+		}
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(ctx context.Context, IntfONUID string) []uint32 {
+	/*
+	   Get currently configured gemport ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of gemport IDs if available, else None
+	*/
+
+	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	logger.Debugf(ctx, "Getting current gemports for %s", Path)
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, _ := ToByte(Value.Value)
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
+				return Data
+			}
+		}
+	} else {
+		logger.Errorf(ctx, "Failed to get data from kvstore for %s", Path)
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(ctx context.Context, IntfONUID string) []uint32 {
+	/*
+	   Get currently configured flow ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of Flow IDs if available, else None
+	*/
+
+	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, _ := ToByte(Value.Value)
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				logger.Error(ctx, "Failed to unmarshal")
+				return Data
+			}
+		}
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetFlowIDInfo(ctx context.Context, IntfONUID string, FlowID uint32, Data interface{}) error {
+	/*
+	   Get flow details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow Id reference
+	   :param Data: Result
+	   :return error: nil if no error in getting from KV store
+	*/
+
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, err := ToByte(Value.Value)
+			if err != nil {
+				logger.Errorw(ctx, "Failed to convert flowinfo into byte array", log.Fields{"error": err})
+				return err
+			}
+			if err = json.Unmarshal(Val, Data); err != nil {
+				logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(ctx context.Context, IntfONUID string, FlowID uint32) bool {
+	/*
+	   Get flow_id details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow Id reference
+	*/
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
+		logger.Errorf(ctx, "Falied to remove resource %s", Path)
+		return false
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) RemoveAllFlowIDInfo(ctx context.Context, IntfONUID string) bool {
+	/*
+	    Remove flow_id_info details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH_INTF_ONU_PREFIX, PONRMgr.DeviceID, IntfONUID)
+
+	if err := PONRMgr.KVStore.DeleteWithPrefix(ctx, Path); err != nil {
+		logger.Errorf(ctx, "Falied to remove resource %s", Path)
+		return false
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(ctx context.Context, IntfONUID string, AllocIDs []uint32) error {
+	/*
+	   Update currently configured alloc ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param alloc_ids: list of alloc ids
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	Value, err = json.Marshal(AllocIDs)
+	if err != nil {
+		logger.Error(ctx, "failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(ctx context.Context, IntfONUID string, GEMPortIDs []uint32) error {
+	/*
+	   Update currently configured gemport ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param gemport_ids: list of gem port ids
+	*/
+
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	logger.Debugf(ctx, "Updating gemport ids for %s", Path)
+	Value, err = json.Marshal(GEMPortIDs)
+	if err != nil {
+		logger.Error(ctx, "failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
+	/*
+	   Check for a flow id in a given list of flow IDs.
+	   :param FLowIDList: List of Flow IDs
+	   :param FlowID: Flowd to check in the list
+	   : return true and the index if present false otherwise.
+	*/
+
+	for idx := range FlowIDList {
+		if FlowID == FlowIDList[idx] {
+			return true, uint32(idx)
+		}
+	}
+	return false, 0
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(ctx context.Context, IntfONUID string, FlowID uint32, Add bool) error {
+	/*
+	   Update the flow_id list of the ONU (add or remove flow_id from the list)
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: flow ID
+	   :param add: Boolean flag to indicate whether the flow_id should be
+	               added or removed from the list. Defaults to adding the flow.
+	*/
+	var Value []byte
+	var err error
+	var RetVal bool
+	var IDx uint32
+	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(ctx, IntfONUID)
+
+	if Add {
+		if RetVal, _ = checkForFlowIDInList(FlowIDs, FlowID); RetVal {
+			return nil
+		}
+		FlowIDs = append(FlowIDs, FlowID)
+	} else {
+		if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); !RetVal {
+			return nil
+		}
+		// delete the index and shift
+		FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
+	}
+	Value, err = json.Marshal(FlowIDs)
+	if err != nil {
+		logger.Error(ctx, "Failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(ctx context.Context, IntfONUID string, FlowID uint32, FlowData interface{}) error {
+	/*
+	   Update any metadata associated with the flow_id. The flow_data could be json
+	   or any of other data structure. The resource manager doesnt care
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow ID
+	   :param flow_data: Flow data blob
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+	Value, err = json.Marshal(FlowData)
+	if err != nil {
+		logger.Error(ctx, "failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) GenerateNextID(ctx context.Context, Resource map[string]interface{}) (uint32, error) {
+	/*
+	   Generate unique id having OFFSET as start
+	   :param resource: resource used to generate ID
+	   :return uint32: generated id
+	*/
+	ByteArray, err := ToByte(Resource[POOL])
+	if err != nil {
+		logger.Error(ctx, "Failed to convert resource to byte array")
+		return 0, err
+	}
+	Data := bitmap.TSFromData(ByteArray, false)
+	if Data == nil {
+		logger.Error(ctx, "Failed to get data from byte array")
+		return 0, errors.New("Failed to get data from byte array")
+	}
+
+	Len := Data.Len()
+	var Idx int
+	for Idx = 0; Idx < Len; Idx++ {
+		if !Data.Get(Idx) {
+			break
+		}
+	}
+	Data.Set(Idx, true)
+	res := uint32(Resource[START_IDX].(float64))
+	Resource[POOL] = Data.Data(false)
+	logger.Debugf(ctx, "Generated ID for %d", (uint32(Idx) + res))
+	return (uint32(Idx) + res), err
+}
+
+func (PONRMgr *PONResourceManager) ReleaseID(ctx context.Context, Resource map[string]interface{}, Id uint32) bool {
+	/*
+	   Release unique id having OFFSET as start index.
+	   :param resource: resource used to release ID
+	   :param unique_id: id need to be released
+	*/
+	ByteArray, err := ToByte(Resource[POOL])
+	if err != nil {
+		logger.Error(ctx, "Failed to convert resource to byte array")
+		return false
+	}
+	Data := bitmap.TSFromData(ByteArray, false)
+	if Data == nil {
+		logger.Error(ctx, "Failed to get resource pool")
+		return false
+	}
+	Idx := Id - uint32(Resource[START_IDX].(float64))
+	Data.Set(int(Idx), false)
+	Resource[POOL] = Data.Data(false)
+
+	return true
+}
+
+/* Reserves a unique id in the specified resource pool.
+:param Resource: resource used to reserve ID
+:param Id: ID to be reserved
+*/
+func (PONRMgr *PONResourceManager) reserveID(ctx context.Context, TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
+	Data := bitmap.TSFromData(TSData.Data(false), false)
+	if Data == nil {
+		logger.Error(ctx, "Failed to get resource pool")
+		return false
+	}
+	Idx := Id - StartIndex
+	Data.Set(int(Idx), true)
+	return true
+}
+
+func (PONRMgr *PONResourceManager) GetTechnology() string {
+	return PONRMgr.Technology
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeAllocID() string {
+	return ALLOC_ID
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID() string {
+	return GEMPORT_ID
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeOnuID() string {
+	return ONU_ID
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
new file mode 100644
index 0000000..119d78e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
new file mode 100644
index 0000000..b66f398
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"context"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"net/http"
+	"sync"
+)
+
+// ProbeContextKey used to fetch the Probe instance from a context
+type ProbeContextKeyType string
+
+// ServiceStatus typed values for service status
+type ServiceStatus int
+
+const (
+	// ServiceStatusUnknown initial state of services
+	ServiceStatusUnknown ServiceStatus = iota
+
+	// ServiceStatusPreparing to optionally be used for prep, such as connecting
+	ServiceStatusPreparing
+
+	// ServiceStatusPrepared to optionally be used when prep is complete, but before run
+	ServiceStatusPrepared
+
+	// ServiceStatusRunning service is functional
+	ServiceStatusRunning
+
+	// ServiceStatusStopped service has stopped, but not because of error
+	ServiceStatusStopped
+
+	// ServiceStatusFailed service has stopped because of an error
+	ServiceStatusFailed
+
+	// ServiceStatusNotReady service has started but is unable to accept requests
+	ServiceStatusNotReady
+)
+
+const (
+	// ProbeContextKey value of context key to fetch probe
+	ProbeContextKey = ProbeContextKeyType("status-update-probe")
+)
+
+// String convert ServiceStatus values to strings
+func (s ServiceStatus) String() string {
+	switch s {
+	default:
+		fallthrough
+	case ServiceStatusUnknown:
+		return "Unknown"
+	case ServiceStatusPreparing:
+		return "Preparing"
+	case ServiceStatusPrepared:
+		return "Prepared"
+	case ServiceStatusRunning:
+		return "Running"
+	case ServiceStatusStopped:
+		return "Stopped"
+	case ServiceStatusFailed:
+		return "Failed"
+	case ServiceStatusNotReady:
+		return "NotReady"
+	}
+}
+
+// ServiceStatusUpdate status update event
+type ServiceStatusUpdate struct {
+	Name   string
+	Status ServiceStatus
+}
+
+// Probe reciever on which to implement probe capabilities
+type Probe struct {
+	readyFunc  func(map[string]ServiceStatus) bool
+	healthFunc func(map[string]ServiceStatus) bool
+
+	mutex     sync.RWMutex
+	status    map[string]ServiceStatus
+	isReady   bool
+	isHealthy bool
+}
+
+// WithReadyFunc override the default ready calculation function
+func (p *Probe) WithReadyFunc(readyFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.readyFunc = readyFunc
+	return p
+}
+
+// WithHealthFunc override the default health calculation function
+func (p *Probe) WithHealthFunc(healthFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.healthFunc = healthFunc
+	return p
+}
+
+// RegisterService register one or more service names with the probe, status will be track against service name
+func (p *Probe) RegisterService(ctx context.Context, names ...string) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+	for _, name := range names {
+		if _, ok := p.status[name]; !ok {
+			p.status[name] = ServiceStatusUnknown
+			logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
+		}
+	}
+
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+}
+
+// UpdateStatus utility function to send a service update to the probe
+func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	// if status hasn't changed, avoid doing useless work
+	existingStatus, ok := p.status[name]
+	if ok && (existingStatus == status) {
+		return
+	}
+
+	p.status[name] = status
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+	logger.Debugw(ctx, "probe-service-status-updated",
+		log.Fields{
+			"service-name": name,
+			"status":       status.String(),
+			"ready":        p.isReady,
+			"health":       p.isHealthy,
+		})
+}
+
+func (p *Probe) GetStatus(name string) ServiceStatus {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	currentStatus, ok := p.status[name]
+	if ok {
+		return currentStatus
+	}
+
+	return ServiceStatusUnknown
+}
+
+func GetProbeFromContext(ctx context.Context) *Probe {
+	if ctx != nil {
+		if value := ctx.Value(ProbeContextKey); value != nil {
+			if p, ok := value.(*Probe); ok {
+				return p
+			}
+		}
+	}
+	return nil
+}
+
+// UpdateStatusFromContext a convenience function to pull the Probe reference from the
+// Context, if it exists, and then calling UpdateStatus on that Probe reference. If Context
+// is nil or if a Probe reference is not associated with the ProbeContextKey then nothing
+// happens
+func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
+	p := GetProbeFromContext(ctx)
+	if p != nil {
+		p.UpdateStatus(ctx, name, status)
+	}
+}
+
+// pulled out to a function to help better enable unit testing
+func (p *Probe) readzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isReady {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) healthzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isHealthy {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+	ctx := context.Background()
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	w.Header().Set("Content-Type", "application/json")
+	if _, err := w.Write([]byte("{")); err != nil {
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	comma := ""
+	for c, s := range p.status {
+		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+			w.WriteHeader(http.StatusInternalServerError)
+			return
+		}
+		comma = ", "
+	}
+	if _, err := w.Write([]byte("}")); err != nil {
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}
+
+// ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
+func (p *Probe) ListenAndServe(ctx context.Context, address string) {
+	mux := http.NewServeMux()
+
+	// Returns the result of the readyFunc calculation
+	mux.HandleFunc("/readz", p.readzFunc)
+
+	// Returns the result of the healthFunc calculation
+	mux.HandleFunc("/healthz", p.healthzFunc)
+
+	// Returns the details of the services and their status as JSON
+	mux.HandleFunc("/detailz", p.detailzFunc)
+	s := &http.Server{
+		Addr:    address,
+		Handler: mux,
+	}
+	logger.Fatal(ctx, s.ListenAndServe())
+}
+
+func (p *Probe) IsReady() bool {
+	return p.isReady
+}
+
+// defaultReadyFunc if all services are running then ready, else not
+func defaultReadyFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status != ServiceStatusRunning {
+			return false
+		}
+	}
+	return true
+}
+
+// defaultHealthFunc if no service is stopped or failed, then healthy, else not.
+// service is start as unknown, so they are considered healthy
+func defaultHealthFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status == ServiceStatusStopped || status == ServiceStatusFailed {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/4QueueHybridProfileMap1.json
new file mode 100644
index 0000000..d11f8e4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/4QueueHybridProfileMap1.json
@@ -0,0 +1,141 @@
+ {
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/README.md
new file mode 100644
index 0000000..88d6564
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/README.md
@@ -0,0 +1,344 @@
+Technology Profile Management
+Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+Example JSON :
+
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3 
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
+
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/XGS-PON/64
+
+jq -c . 4QueueHybridProfileMap1.json |  etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/XGS-PON/64
+
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/XGS-PON/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+
+
+#List Tech profile 
+etcdctl --endpoints=<EtcdIPAddres>:2379 get  service/voltha/technology_profiles/XGS-PON/64
+
+
+# Example output
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get service/voltha/technology_profiles/XGS-PON/64 | jq .
+
+etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/XGS-PON/64
+# Example outpout
+service/voltha/technology_profiles/XGS-PON/64/uni-1
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+# Display a specified Technology profile instance, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get service/voltha/technology_profiles/XGS-PON/64/pon-{0}/onu-{1}/uni-{0} | jq .
+
+etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/XGS-PON/64/pon-{0}/onu-{1}/uni-{0}
+# Example outpout
+service/voltha/technology_profiles/XGS-PON/64/pon-{0}/onu-{1}/uni-{0}
+{"name":"Default_1tcont_1gem_Profile","subscriber_identifier":"pon-{0}/onu-{1}/uni-{0}","profile_type":"XGS-PON","version":1,"num_gem_ports":1,"instance_control":{"ONU":"multi-instance","uni":"single-instance","max_gem_payload_size":"auto"},"us_scheduler":{"alloc_id":1024,"direction":"UPSTREAM","additional_bw":"AdditionalBW_BestEffort","priority":0,"weight":0,"q_sched_policy":"Hybrid"},"ds_scheduler":{"alloc_id":1024,"direction":"DOWNSTREAM","additional_bw":"AdditionalBW_BestEffort","priority":0,"weight":0,"q_sched_policy":"Hybrid"},"upstream_gem_port_attribute_list":[{"gemport_id":1024,"max_q_size":"auto","pbit_map":"0b11111111","aes_encryption":"True","scheduling_policy":"WRR","priority_q":0,"weight":0,"discard_policy":"TailDrop","discard_config":{"min_threshold":0,"max_threshold":0,"max_probability":0},"is_multicast":"","dynamic_access_control_list":"","static_access_control_list":"","multicast_gem_id":0}],"downstream_gem_port_attribute_list":[{"gemport_id":1024,"max_q_size":"auto","pbit_map":"0b11111111","aes_encryption":"True","scheduling_policy":"WRR","priority_q":0,"weight":0,"discard_policy":"TailDrop","discard_config":{"min_threshold":0,"max_threshold":0,"max_probability":0},"is_multicast":"","dynamic_access_control_list":"","static_access_control_list":"","multicast_gem_id":0}]}
+
+# Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /XGS-PON/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /XGS-PON/64
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/SingleQueueEponProfile.json b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/SingleQueueEponProfile.json
new file mode 100644
index 0000000..4015251
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/SingleQueueEponProfile.json
@@ -0,0 +1,61 @@
+{

+    "name": "SingleQueueEponProfile",

+    "profile_type": "EPON",

+    "version": 1,

+    "num_gem_ports": 1,

+    "instance_control": {

+      "onu": "multi-instance",

+      "uni": "single-instance",

+      "max_gem_payload_size": "auto"

+    },

+    "epon_attribute": {

+        "package_type": "B"

+    },

+    "upstream_queue_attribute_list": [

+      {

+        "pbit_map": "0b11111111",

+        "aes_encryption": "False",

+        "traffic_type": "BE",

+        "unsolicited_grant_size": 0,

+        "nominal_interval": 0,

+        "tolerated_poll_jitter": 0,

+        "request_transmission_policy": 0,

+        "num_q_sets": 2,

+        "q_thresholds": {

+          "q_threshold1":5500,

+          "q_threshold2":0,

+          "q_threshold3":0,

+          "q_threshold4":0,

+          "q_threshold5":0,

+          "q_threshold6":0,

+          "q_threshold7":0

+        },

+        "scheduling_policy": "StrictPriority",

+        "priority_q": 4,

+        "weight": 0,

+        "discard_policy": "TailDrop",

+        "max_q_size": "auto",

+        "discard_config": {

+          "min_threshold": 0,

+          "max_threshold": 0,

+          "max_probability": 0

+        }

+      }

+    ],

+    "downstream_queue_attribute_list": [

+      {

+        "pbit_map": "0b11111111",

+        "aes_encryption": "True",

+        "scheduling_policy": "StrictPriority",

+        "priority_q": 4,

+        "weight": 0,

+        "discard_policy": "TailDrop",

+        "max_q_size": "auto",

+        "discard_config": {

+          "min_threshold": 0,

+          "max_threshold": 0,

+          "max_probability": 0

+        }

+      }

+    ]

+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/common.go
new file mode 100644
index 0000000..1e89822
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/config.go
new file mode 100644
index 0000000..d13a876
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/config.go
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"time"
+)
+
+// tech profile default constants
+const (
+	defaultTechProfileName        = "Default_1tcont_1gem_Profile"
+	DEFAULT_TECH_PROFILE_TABLE_ID = 64
+	defaultVersion                = 1.0
+	defaultLogLevel               = 0
+	defaultGemportsCount          = 1
+	defaultPbits                  = "0b11111111"
+
+	defaultKVStoreTimeout = 5 * time.Second //in seconds
+
+	// Tech profile path prefix in kv store (for the TP template)
+	// NOTE that this hardcoded to service/voltha as the TP template is shared across stacks
+	defaultTpKvPathPrefix = "service/voltha/technology_profiles"
+
+	// Tech profile path prefix in kv store (for TP instances)
+	defaultKVPathPrefix = "%s/technology_profiles"
+
+	// Resource instance path prefix in KV store (for Resource Instances)
+	defaultResourceInstancePathPrefix = "%s/resource_instances"
+
+	// Tech profile path in kv store
+	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
+
+)
+
+//Tech-Profile JSON String Keys
+// NOTE: Tech profile templeate JSON file should comply with below keys
+const (
+	NAME                               = "name"
+	PROFILE_TYPE                       = "profile_type"
+	VERSION                            = "version"
+	NUM_GEM_PORTS                      = "num_gem_ports"
+	INSTANCE_CONTROL                   = "instance_control"
+	US_SCHEDULER                       = "us_scheduler"
+	DS_SCHEDULER                       = "ds_scheduler"
+	UPSTREAM_GEM_PORT_ATTRIBUTE_LIST   = "upstream_gem_port_attribute_list"
+	DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
+	ONU                                = "onu"
+	UNI                                = "uni"
+	MAX_GEM_PAYLOAD_SIZE               = "max_gem_payload_size"
+	DIRECTION                          = "direction"
+	ADDITIONAL_BW                      = "additional_bw"
+	PRIORITY                           = "priority"
+	Q_SCHED_POLICY                     = "q_sched_policy"
+	WEIGHT                             = "weight"
+	PBIT_MAP                           = "pbit_map"
+	DISCARD_CONFIG                     = "discard_config"
+	MAX_THRESHOLD                      = "max_threshold"
+	MIN_THRESHOLD                      = "min_threshold"
+	MAX_PROBABILITY                    = "max_probability"
+	DISCARD_POLICY                     = "discard_policy"
+	PRIORITY_Q                         = "priority_q"
+	SCHEDULING_POLICY                  = "scheduling_policy"
+	MAX_Q_SIZE                         = "max_q_size"
+	AES_ENCRYPTION                     = "aes_encryption"
+	// String Keys for EPON
+	EPON_ATTRIBUTE              = "epon_attribute"
+	PACKAGE_TYPE                = "package_type"
+	TRAFFIC_TYPE                = "traffic type"
+	UNSOLICITED_GRANT_SIZE      = "unsolicited_grant_size"
+	NOMINAL_INTERVAL            = "nominal_interval"
+	TOLERATED_POLL_JITTER       = "tolerated_poll_jitter"
+	REQUEST_TRANSMISSION_POLICY = "request_transmission_policy"
+	NUM_Q_SETS                  = "num_q_sets"
+	Q_THRESHOLDS                = "q_thresholds"
+	Q_THRESHOLD1                = "q_threshold1"
+	Q_THRESHOLD2                = "q_threshold2"
+	Q_THRESHOLD3                = "q_threshold3"
+	Q_THRESHOLD4                = "q_threshold4"
+	Q_THRESHOLD5                = "q_threshold5"
+	Q_THRESHOLD6                = "q_threshold6"
+	Q_THRESHOLD7                = "q_threshold7"
+)
+
+// TechprofileFlags represents the set of configurations used
+type TechProfileFlags struct {
+	KVStoreAddress               string
+	KVStoreType                  string
+	KVStoreTimeout               time.Duration
+	KVBackend                    *db.Backend // this is the backend used to store TP instances
+	DefaultTpKVBackend           *db.Backend // this is the backend used to read the TP profile
+	ResourceInstanceKVBacked     *db.Backend // this is the backed used to read/write Resource Instances
+	TPKVPathPrefix               string
+	defaultTpKvPathPrefix        string
+	TPFileKVPath                 string
+	ResourceInstanceKVPathPrefix string
+	DefaultTPName                string
+	TPVersion                    uint32
+	NumGemPorts                  uint32
+	DefaultPbits                 []string
+	LogLevel                     int
+	DefaultTechProfileID         uint32
+	DefaultNumGemPorts           uint32
+}
+
+func NewTechProfileFlags(KVStoreType string, KVStoreAddress string, basePathKvStore string) *TechProfileFlags {
+	// initialize with default values
+	var techProfileFlags = TechProfileFlags{
+		KVBackend:                    nil,
+		KVStoreAddress:               KVStoreAddress,
+		KVStoreType:                  KVStoreType,
+		KVStoreTimeout:               defaultKVStoreTimeout,
+		DefaultTPName:                defaultTechProfileName,
+		TPKVPathPrefix:               fmt.Sprintf(defaultKVPathPrefix, basePathKvStore),
+		defaultTpKvPathPrefix:        defaultTpKvPathPrefix,
+		TPVersion:                    defaultVersion,
+		TPFileKVPath:                 defaultTechProfileKVPath,
+		ResourceInstanceKVPathPrefix: fmt.Sprintf(defaultResourceInstancePathPrefix, basePathKvStore),
+		DefaultTechProfileID:         DEFAULT_TECH_PROFILE_TABLE_ID,
+		DefaultNumGemPorts:           defaultGemportsCount,
+		DefaultPbits:                 []string{defaultPbits},
+		LogLevel:                     defaultLogLevel,
+	}
+
+	return &techProfileFlags
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile.go
new file mode 100644
index 0000000..757118a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile.go
@@ -0,0 +1,1499 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"github.com/gogo/protobuf/proto"
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/opencord/voltha-protos/v4/go/openolt"
+	"regexp"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+)
+
+// Interface to pon resource manager APIs
+type iPonResourceMgr interface {
+	GetResourceID(ctx context.Context, intfID uint32, resourceType string, numIDs uint32) ([]uint32, error)
+	FreeResourceID(ctx context.Context, intfID uint32, resourceType string, ReleaseContent []uint32) error
+	GetResourceTypeAllocID() string
+	GetResourceTypeGemPortID() string
+	GetResourceTypeOnuID() string
+	GetTechnology() string
+}
+
+type SchedulingPolicy int32
+
+const (
+	SchedulingPolicy_WRR            SchedulingPolicy = 0
+	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
+)
+
+type AdditionalBW int32
+
+const (
+	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
+	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
+	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
+)
+
+type DiscardPolicy int32
+
+const (
+	DiscardPolicy_TailDrop  DiscardPolicy = 0
+	DiscardPolicy_WTailDrop DiscardPolicy = 1
+	DiscardPolicy_Red       DiscardPolicy = 2
+	DiscardPolicy_WRed      DiscardPolicy = 3
+)
+
+// Required uniPortName format
+var uniPortNameFormatRegexp = regexp.MustCompile(`^olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+
+// instance control defaults
+const (
+	defaultOnuInstance    = "multi-instance"
+	defaultUniInstance    = "single-instance"
+	defaultGemPayloadSize = "auto"
+)
+
+// default discard config constants
+const (
+	defaultMinThreshold   = 0
+	defaultMaxThreshold   = 0
+	defaultMaxProbability = 0
+)
+
+// default scheduler contants
+const (
+	defaultPriority = 0
+	defaultWeight   = 0
+)
+
+// default GEM attribute constants
+const (
+	defaultAESEncryption     = "True"
+	defaultPriorityQueue     = 0
+	defaultQueueWeight       = 0
+	defaultMaxQueueSize      = "auto"
+	defaultIsMulticast       = "False"
+	defaultAccessControlList = "224.0.0.0-239.255.255.255"
+	defaultMcastGemID        = 4069
+)
+
+// Default EPON constants
+const (
+	defaultPakageType = "B"
+)
+const (
+	defaultTrafficType               = "BE"
+	defaultUnsolicitedGrantSize      = 0
+	defaultNominalInterval           = 0
+	defaultToleratedPollJitter       = 0
+	defaultRequestTransmissionPolicy = 0
+	defaultNumQueueSet               = 2
+)
+const (
+	defaultQThreshold1 = 5500
+	defaultQThreshold2 = 0
+	defaultQThreshold3 = 0
+	defaultQThreshold4 = 0
+	defaultQThreshold5 = 0
+	defaultQThreshold6 = 0
+	defaultQThreshold7 = 0
+)
+
+const (
+	xgspon = "XGS-PON"
+	xgpon  = "XGPON"
+	gpon   = "GPON"
+	epon   = "EPON"
+)
+
+const (
+	MaxUniPortPerOnu = 16 // TODO: Adapter uses its own constant for MaxUniPort. How to synchronize this and have a single source of truth?
+)
+
+type TechProfileMgr struct {
+	config                *TechProfileFlags
+	resourceMgr           iPonResourceMgr
+	OnuIDMgmtLock         sync.RWMutex
+	GemPortIDMgmtLock     sync.RWMutex
+	AllocIDMgmtLock       sync.RWMutex
+	tpInstanceMap         map[string]*tp_pb.TechProfileInstance // Map of tp path to tp instance
+	tpInstanceMapLock     sync.RWMutex
+	eponTpInstanceMap     map[string]*tp_pb.EponTechProfileInstance // Map of tp path to epon tp instance
+	epontpInstanceMapLock sync.RWMutex
+	tpMap                 map[uint32]*tp_pb.TechProfile // Map of tp id to tp
+	tpMapLock             sync.RWMutex
+	eponTpMap             map[uint32]*tp_pb.EponTechProfile // map of tp id to epon tp
+	eponTpMapLock         sync.RWMutex
+}
+
+func (t *TechProfileMgr) SetKVClient(ctx context.Context, pathPrefix string) *db.Backend {
+	kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-create-kv-client",
+			log.Fields{
+				"type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
+				"timeout": t.config.KVStoreTimeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+		return nil
+	}
+	return &db.Backend{
+		Client:     kvClient,
+		StoreType:  t.config.KVStoreType,
+		Address:    t.config.KVStoreAddress,
+		Timeout:    t.config.KVStoreTimeout,
+		PathPrefix: pathPrefix}
+
+	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+		  issue between kv store and backend , core is not calling NewBackend directly
+	 kv := model.NewBackend(t.config.kvStoreType, t.config.KVStoreHost, t.config.KVStorePort,
+								  t.config.KVStoreTimeout,  kvStoreTechProfilePathPrefix)
+	*/
+}
+
+func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, kvStoreType string, kvStoreAddress string, basePathKvStore string) (*TechProfileMgr, error) {
+	var techprofileObj TechProfileMgr
+	logger.Debug(ctx, "initializing-techprofile-mananger")
+	techprofileObj.config = NewTechProfileFlags(kvStoreType, kvStoreAddress, basePathKvStore)
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.TPKVPathPrefix)
+	techprofileObj.config.DefaultTpKVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.defaultTpKvPathPrefix)
+	if techprofileObj.config.KVBackend == nil {
+		logger.Error(ctx, "failed-to-initialize-backend")
+		return nil, errors.New("kv-backend-init-failed")
+	}
+	techprofileObj.config.ResourceInstanceKVBacked = techprofileObj.SetKVClient(ctx, techprofileObj.config.ResourceInstanceKVPathPrefix)
+	if techprofileObj.config.ResourceInstanceKVBacked == nil {
+		logger.Error(ctx, "failed-to-initialize-resource-instance-kv-backend")
+		return nil, errors.New("resource-instance-kv-backend-init-failed")
+	}
+	techprofileObj.resourceMgr = resourceMgr
+	techprofileObj.tpInstanceMap = make(map[string]*tp_pb.TechProfileInstance)
+	techprofileObj.eponTpInstanceMap = make(map[string]*tp_pb.EponTechProfileInstance)
+	techprofileObj.tpMap = make(map[uint32]*tp_pb.TechProfile)
+	techprofileObj.eponTpMap = make(map[uint32]*tp_pb.EponTechProfile)
+	logger.Debug(ctx, "reconcile-tp-instance-cache-start")
+	if err := techprofileObj.reconcileTpInstancesToCache(ctx); err != nil {
+		logger.Errorw(ctx, "failed-to-reconcile-tp-instances", log.Fields{"err": err})
+		return nil, err
+	}
+	logger.Debug(ctx, "reconcile-tp-instance-cache-end")
+	logger.Debug(ctx, "initializing-tech-profile-manager-object-success")
+	return &techprofileObj, nil
+}
+
+// GetTechProfileInstanceKey returns the tp instance key that is used to reference TP Instance Map
+func (t *TechProfileMgr) GetTechProfileInstanceKey(ctx context.Context, tpID uint32, uniPortName string) string {
+	logger.Debugw(ctx, "get-tp-instance-kv-key", log.Fields{
+		"uniPortName": uniPortName,
+		"tpId":        tpID,
+	})
+	// Make sure the uniPortName is as per format olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+	if !uniPortNameFormatRegexp.Match([]byte(uniPortName)) {
+		logger.Warnw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+	}
+	// The key path prefix (like service/voltha/technology_profiles or service/voltha_voltha/technology_profiles)
+	// is expected to be attached by the components that use this path as part of the KVBackend configuration.
+	resourceInstanceKvPathSuffix := "%s/%d/%s" // <technology>/<tpID>/<uni-port-name>
+	// <uni-port-name> must be of the format pon-{\d+}/onu-{\d+}/uni-{\d+}
+	return fmt.Sprintf(resourceInstanceKvPathSuffix, t.resourceMgr.GetTechnology(), tpID, uniPortName)
+}
+
+// GetTPInstance gets TP instance from cache if found
+func (t *TechProfileMgr) GetTPInstance(ctx context.Context, path string) (interface{}, error) {
+	tech := t.resourceMgr.GetTechnology()
+	switch tech {
+	case xgspon, xgpon, gpon:
+		t.tpInstanceMapLock.RLock()
+		defer t.tpInstanceMapLock.RUnlock()
+		tpInst, ok := t.tpInstanceMap[path]
+		if !ok {
+			return nil, fmt.Errorf("tp-instance-not-found-tp-path-%v", path)
+		}
+		return tpInst, nil
+	case epon:
+		t.epontpInstanceMapLock.RLock()
+		defer t.epontpInstanceMapLock.RUnlock()
+		tpInst, ok := t.eponTpInstanceMap[path]
+		if !ok {
+			return nil, fmt.Errorf("tp-instance-not-found-tp-path-%v", path)
+		}
+		return tpInst, nil
+	default:
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": tech})
+		return nil, fmt.Errorf("unknown-tech-%s-tp-path-%v", tech, path)
+	}
+}
+
+// CreateTechProfileInstance creates a new TP instance.
+func (t *TechProfileMgr) CreateTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string, intfID uint32) (interface{}, error) {
+	var tpInstance *tp_pb.TechProfileInstance
+	var eponTpInstance *tp_pb.EponTechProfileInstance
+
+	logger.Infow(ctx, "creating-tp-instance", log.Fields{"tpID": tpID, "uni": uniPortName, "intId": intfID})
+
+	// Make sure the uniPortName is as per format olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+	if !uniPortNameFormatRegexp.Match([]byte(uniPortName)) {
+		logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		return nil, fmt.Errorf("uni-port-name-not-confirming-to-format-%s", uniPortName)
+	}
+	tpInstancePathSuffix := t.GetTechProfileInstanceKey(ctx, tpID, uniPortName)
+
+	if t.resourceMgr.GetTechnology() == epon {
+		tp := t.getEponTPFromKVStore(ctx, tpID)
+		if tp != nil {
+			if err := t.validateInstanceControlAttr(ctx, *tp.InstanceControl); err != nil {
+				logger.Error(ctx, "invalid-instance-ctrl-attr-using-default-tp")
+				tp = t.getDefaultEponProfile(ctx)
+			} else {
+				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpID": tpID})
+			}
+		} else {
+			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+			tp = t.getDefaultEponProfile(ctx)
+		}
+		// Store TP in cache
+		t.eponTpMapLock.Lock()
+		t.eponTpMap[tpID] = tp
+		t.eponTpMapLock.Unlock()
+
+		if eponTpInstance = t.allocateEponTPInstance(ctx, uniPortName, tp, intfID, tpInstancePathSuffix); eponTpInstance == nil {
+			logger.Error(ctx, "tp-instance-allocation-failed")
+			return nil, errors.New("tp-instance-allocation-failed")
+		}
+		t.epontpInstanceMapLock.Lock()
+		t.eponTpInstanceMap[tpInstancePathSuffix] = eponTpInstance
+		t.epontpInstanceMapLock.Unlock()
+		resInst := tp_pb.ResourceInstance{
+			TpId:                 tpID,
+			ProfileType:          eponTpInstance.ProfileType,
+			SubscriberIdentifier: eponTpInstance.SubscriberIdentifier,
+			AllocId:              eponTpInstance.AllocId,
+		}
+		for _, usQAttr := range eponTpInstance.UpstreamQueueAttributeList {
+			resInst.GemportIds = append(resInst.GemportIds, usQAttr.GemportId)
+		}
+
+		logger.Infow(ctx, "epon-tp-instance-created-successfully",
+			log.Fields{"tpID": tpID, "uni": uniPortName, "intfID": intfID})
+		if err := t.addResourceInstanceToKVStore(ctx, tpID, uniPortName, resInst); err != nil {
+			logger.Errorw(ctx, "failed-to-update-resource-instance-to-kv-store--freeing-up-resources", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+			allocIDs := make([]uint32, 0)
+			allocIDs = append(allocIDs, resInst.AllocId)
+			errList := make([]error, 0)
+			errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), allocIDs))
+			errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), resInst.GemportIds))
+			if len(errList) > 0 {
+				logger.Errorw(ctx, "failed-to-free-up-resources-on-kv-store--system-behavior-has-become-erratic", log.Fields{"tpID": tpID, "uniPortName": uniPortName, "errList": errList})
+			}
+			return nil, err
+		}
+		return eponTpInstance, nil
+	} else {
+		tp := t.getTPFromKVStore(ctx, tpID)
+		if tp != nil {
+			if err := t.validateInstanceControlAttr(ctx, *tp.InstanceControl); err != nil {
+				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
+				tp = t.getDefaultTechProfile(ctx)
+			} else {
+				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpID": tpID})
+			}
+		} else {
+			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+			tp = t.getDefaultTechProfile(ctx)
+		}
+		// Store TP in cache
+		t.tpMapLock.Lock()
+		t.tpMap[tpID] = tp
+		t.tpMapLock.Unlock()
+
+		if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfID, tpInstancePathSuffix); tpInstance == nil {
+			logger.Error(ctx, "tp-instance-allocation-failed")
+			return nil, errors.New("tp-instance-allocation-failed")
+		}
+		t.tpInstanceMapLock.Lock()
+		t.tpInstanceMap[tpInstancePathSuffix] = tpInstance
+		t.tpInstanceMapLock.Unlock()
+
+		resInst := tp_pb.ResourceInstance{
+			TpId:                 tpID,
+			ProfileType:          tpInstance.ProfileType,
+			SubscriberIdentifier: tpInstance.SubscriberIdentifier,
+			AllocId:              tpInstance.UsScheduler.AllocId,
+		}
+		for _, usQAttr := range tpInstance.UpstreamGemPortAttributeList {
+			resInst.GemportIds = append(resInst.GemportIds, usQAttr.GemportId)
+		}
+
+		logger.Infow(ctx, "tp-instance-created-successfully",
+			log.Fields{"tpID": tpID, "uni": uniPortName, "intfID": intfID})
+		if err := t.addResourceInstanceToKVStore(ctx, tpID, uniPortName, resInst); err != nil {
+			logger.Errorw(ctx, "failed-to-update-resource-instance-to-kv-store--freeing-up-resources", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+			allocIDs := make([]uint32, 0)
+			allocIDs = append(allocIDs, resInst.AllocId)
+			errList := make([]error, 0)
+			errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), allocIDs))
+			errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), resInst.GemportIds))
+			if len(errList) > 0 {
+				logger.Fatalw(ctx, "failed-to-free-up-resources-on-kv-store--system-behavior-has-become-erratic", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+			}
+			return nil, err
+		}
+
+		logger.Infow(ctx, "resource-instance-added-to-kv-store-successfully",
+			log.Fields{"tpID": tpID, "uni": uniPortName, "intfID": intfID})
+		return tpInstance, nil
+	}
+}
+
+// DeleteTechProfileInstance deletes the TP instance from the local cache as well as deletes the corresponding
+// resource instance from the KV store.
+func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string) error {
+	// Make sure the uniPortName is as per format olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+	if !uniPortNameFormatRegexp.Match([]byte(uniPortName)) {
+		logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		return fmt.Errorf("uni-port-name-not-confirming-to-format--%s", uniPortName)
+	}
+	path := t.GetTechProfileInstanceKey(ctx, tpID, uniPortName)
+	logger.Infow(ctx, "delete-tp-instance-from-cache", log.Fields{"key": path})
+	t.tpInstanceMapLock.Lock()
+	delete(t.tpInstanceMap, path)
+	t.tpInstanceMapLock.Unlock()
+	if err := t.removeResourceInstanceFromKVStore(ctx, tpID, uniPortName); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (t *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance) []*tp_pb.TrafficQueue {
+	var encryp bool
+	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
+	for Count := 0; Count < NumGemPorts; Count++ {
+		if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+			continue
+		}
+		if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+			encryp = true
+		} else {
+			encryp = false
+		}
+		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
+			Direction:     tp_pb.Direction_DOWNSTREAM,
+			GemportId:     tp.DownstreamGemPortAttributeList[Count].MulticastGemId,
+			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+			AesEncryption: encryp,
+			SchedPolicy:   tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy,
+			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQ,
+			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+			DiscardPolicy: tp.DownstreamGemPortAttributeList[Count].DiscardPolicy,
+		})
+	}
+	logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	return mcastTrafficQueues
+}
+
+func (t *TechProfileMgr) GetGemportForPbit(ctx context.Context, tp interface{}, dir tp_pb.Direction, pbit uint32) interface{} {
+	/*
+	  Function to get the Gemport mapped to a pbit.
+	*/
+	switch tp := tp.(type) {
+	case *tp_pb.TechProfileInstance:
+		if dir == tp_pb.Direction_UPSTREAM {
+			// upstream GEM ports
+			numGemPorts := len(tp.UpstreamGemPortAttributeList)
+			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+				lenOfPbitMap := len(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap)
+				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+					if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+							logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportId})
+							return tp.UpstreamGemPortAttributeList[gemCnt]
+						}
+					}
+				}
+			}
+		} else if dir == tp_pb.Direction_DOWNSTREAM {
+			//downstream GEM ports
+			numGemPorts := len(tp.DownstreamGemPortAttributeList)
+			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+				lenOfPbitMap := len(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap)
+				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+					if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+							logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportId})
+							return tp.DownstreamGemPortAttributeList[gemCnt]
+						}
+					}
+				}
+			}
+		}
+		logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+	case *openolt.EponTechProfileInstance:
+		if dir == tp_pb.Direction_UPSTREAM {
+			// upstream GEM ports
+			numGemPorts := len(tp.UpstreamQueueAttributeList)
+			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+				lenOfPbitMap := len(tp.UpstreamQueueAttributeList[gemCnt].PbitMap)
+				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+					if p, err := strconv.Atoi(string(tp.UpstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+							logger.Debugw(ctx, "Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportId})
+							return tp.UpstreamQueueAttributeList[gemCnt]
+						}
+					}
+				}
+			}
+		} else if dir == tp_pb.Direction_DOWNSTREAM {
+			//downstream GEM ports
+			numGemPorts := len(tp.DownstreamQueueAttributeList)
+			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+				lenOfPbitMap := len(tp.DownstreamQueueAttributeList[gemCnt].PbitMap)
+				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+					if p, err := strconv.Atoi(string(tp.DownstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+							logger.Debugw(ctx, "Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportId})
+							return tp.DownstreamQueueAttributeList[gemCnt]
+						}
+					}
+				}
+			}
+		}
+		logger.Errorw(ctx, "No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+	default:
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tp": tp})
+	}
+	return nil
+}
+
+// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
+func (t *TechProfileMgr) FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, intfID uint32, onuID uint32) interface{} {
+	onuTpInstancePathSuffix := fmt.Sprintf("%s/%d/olt-{%s}/pon-{%d}/onu-{%d}", t.resourceMgr.GetTechnology(), tpID, oltDeviceID, intfID, onuID)
+	tech := t.resourceMgr.GetTechnology()
+	if tech == xgspon || tech == xgpon || tech == gpon {
+		t.tpInstanceMapLock.RLock()
+		defer t.tpInstanceMapLock.RUnlock()
+		tpInstancesTech := make([]tp_pb.TechProfileInstance, 0)
+		for i := 0; i < MaxUniPortPerOnu; i++ {
+			key := onuTpInstancePathSuffix + fmt.Sprintf("/uni-{%d}", i)
+			if tpInst, ok := t.tpInstanceMap[key]; ok {
+				tpInstancesTech = append(tpInstancesTech, *tpInst)
+			}
+		}
+		return tpInstancesTech
+	} else if tech == epon {
+		t.epontpInstanceMapLock.RLock()
+		defer t.epontpInstanceMapLock.RUnlock()
+		tpInstancesTech := make([]tp_pb.EponTechProfileInstance, 0)
+		for i := 0; i < MaxUniPortPerOnu; i++ {
+			key := onuTpInstancePathSuffix + fmt.Sprintf("/uni-{%d}", i)
+			if tpInst, ok := t.eponTpInstanceMap[key]; ok {
+				tpInstancesTech = append(tpInstancesTech, *tpInst)
+			}
+		}
+		return tpInstancesTech
+	} else {
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": tech, "tpID": tpID, "onuID": onuID, "intfID": intfID})
+	}
+	return nil
+}
+
+func (t *TechProfileMgr) GetResourceID(ctx context.Context, intfID uint32, resourceType string, numIDs uint32) ([]uint32, error) {
+	logger.Debugw(ctx, "getting-resource-id", log.Fields{
+		"intf-id":       intfID,
+		"resource-type": resourceType,
+		"num":           numIDs,
+	})
+	var err error
+	var ids []uint32
+	switch resourceType {
+	case t.resourceMgr.GetResourceTypeAllocID():
+		t.AllocIDMgmtLock.Lock()
+		ids, err = t.resourceMgr.GetResourceID(ctx, intfID, resourceType, numIDs)
+		t.AllocIDMgmtLock.Unlock()
+	case t.resourceMgr.GetResourceTypeGemPortID():
+		t.GemPortIDMgmtLock.Lock()
+		ids, err = t.resourceMgr.GetResourceID(ctx, intfID, resourceType, numIDs)
+		t.GemPortIDMgmtLock.Unlock()
+	case t.resourceMgr.GetResourceTypeOnuID():
+		t.OnuIDMgmtLock.Lock()
+		ids, err = t.resourceMgr.GetResourceID(ctx, intfID, resourceType, numIDs)
+		t.OnuIDMgmtLock.Unlock()
+	default:
+		return nil, fmt.Errorf("resourceType %s not supported", resourceType)
+	}
+	if err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+func (t *TechProfileMgr) FreeResourceID(ctx context.Context, intfID uint32, resourceType string, ReleaseContent []uint32) error {
+	logger.Debugw(ctx, "freeing-resource-id", log.Fields{
+		"intf-id":         intfID,
+		"resource-type":   resourceType,
+		"release-content": ReleaseContent,
+	})
+	var err error
+	switch resourceType {
+	case t.resourceMgr.GetResourceTypeAllocID():
+		t.AllocIDMgmtLock.Lock()
+		err = t.resourceMgr.FreeResourceID(ctx, intfID, resourceType, ReleaseContent)
+		t.AllocIDMgmtLock.Unlock()
+	case t.resourceMgr.GetResourceTypeGemPortID():
+		t.GemPortIDMgmtLock.Lock()
+		err = t.resourceMgr.FreeResourceID(ctx, intfID, resourceType, ReleaseContent)
+		t.GemPortIDMgmtLock.Unlock()
+	case t.resourceMgr.GetResourceTypeOnuID():
+		t.OnuIDMgmtLock.Lock()
+		err = t.resourceMgr.FreeResourceID(ctx, intfID, resourceType, ReleaseContent)
+		t.OnuIDMgmtLock.Unlock()
+	default:
+		return fmt.Errorf("resourceType %s not supported", resourceType)
+	}
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig {
+	return &tp_pb.SchedulerConfig{
+		Direction:    tpInstance.UsScheduler.Direction,
+		AdditionalBw: tpInstance.UsScheduler.AdditionalBw,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
+		SchedPolicy:  tpInstance.UsScheduler.QSchedPolicy}
+}
+
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig {
+	return &tp_pb.SchedulerConfig{
+		Direction:    tpInstance.DsScheduler.Direction,
+		AdditionalBw: tpInstance.DsScheduler.AdditionalBw,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
+		SchedPolicy:  tpInstance.DsScheduler.QSchedPolicy}
+}
+
+func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *tp_pb.TechProfileInstance, SchedCfg *tp_pb.SchedulerConfig,
+	ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
+
+	tSched := &tp_pb.TrafficScheduler{
+		Direction:          SchedCfg.Direction,
+		AllocId:            tpInstance.UsScheduler.AllocId,
+		TrafficShapingInfo: ShapingCfg,
+		Scheduler:          SchedCfg}
+
+	return tSched
+}
+
+func (t *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance, direction tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+
+	var encryp bool
+	if direction == tp_pb.Direction_UPSTREAM {
+		// upstream GEM ports
+		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+		GemPorts := make([]*tp_pb.TrafficQueue, 0)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
+				encryp = true
+			} else {
+				encryp = false
+			}
+
+			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+				Direction:     direction,
+				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportId,
+				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
+				AesEncryption: encryp,
+				SchedPolicy:   tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy,
+				Priority:      tp.UpstreamGemPortAttributeList[Count].PriorityQ,
+				Weight:        tp.UpstreamGemPortAttributeList[Count].Weight,
+				DiscardPolicy: tp.UpstreamGemPortAttributeList[Count].DiscardPolicy,
+			})
+		}
+		logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		return GemPorts, nil
+	} else if direction == tp_pb.Direction_DOWNSTREAM {
+		//downstream GEM ports
+		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+		GemPorts := make([]*tp_pb.TrafficQueue, 0)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+				//do not take multicast GEM ports. They are handled separately.
+				continue
+			}
+			if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+				encryp = true
+			} else {
+				encryp = false
+			}
+
+			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+				Direction:     direction,
+				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportId,
+				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+				AesEncryption: encryp,
+				SchedPolicy:   tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy,
+				Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQ,
+				Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+				DiscardPolicy: tp.DownstreamGemPortAttributeList[Count].DiscardPolicy,
+			})
+		}
+		logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		return GemPorts, nil
+	}
+
+	logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", direction)
+	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", direction)
+}
+
+func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl tp_pb.InstanceControl) error {
+	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
+		logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		return errors.New("invalid-onu-instance-ctl-attr")
+	}
+
+	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
+		logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		return errors.New("invalid-uni-instance-ctl-attr")
+	}
+
+	if instCtl.Uni == "multi-instance" {
+		logger.Error(ctx, "uni-multi-instance-tp-not-supported")
+		return errors.New("uni-multi-instance-tp-not-supported")
+	}
+
+	return nil
+}
+
+// allocateTPInstance for GPON, XGPON and XGS-PON technology
+func (t *TechProfileMgr) allocateTPInstance(ctx context.Context, uniPortName string, tp *tp_pb.TechProfile, intfID uint32, tpInstPathSuffix string) *tp_pb.TechProfileInstance {
+
+	var usGemPortAttributeList []*tp_pb.GemPortAttributes
+	var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+	var dsMulticastGemAttributeList []*tp_pb.GemPortAttributes
+	var dsUnicastGemAttributeList []*tp_pb.GemPortAttributes
+	var tcontIDs []uint32
+	var gemPorts []uint32
+	var err error
+
+	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfID": intfID, "numGem": tp.NumGemPorts})
+
+	if tp.InstanceControl.Onu == "multi-instance" {
+		tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1)
+		if err != nil {
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"err": err, "intfID": intfID})
+			return nil
+		}
+	} else { // "single-instance"
+		if tpInst := t.getSingleInstanceTp(ctx, tpInstPathSuffix); tpInst == nil {
+			// No "single-instance" tp found on one any uni port for the given TP ID
+			// Allocate a new TcontID or AllocID
+			tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1)
+			if err != nil {
+				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"err": err, "intfID": intfID})
+				return nil
+			}
+		} else {
+			// Use the alloc-id from the existing TpInstance
+			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocId)
+		}
+	}
+	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	gemPorts, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts)
+	if err != nil {
+		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"err": err, "intfID": intfID, "numGemports": tp.NumGemPorts})
+		return nil
+	}
+	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			&tp_pb.GemPortAttributes{GemportId: gemPorts[index],
+				MaxQSize:         tp.UpstreamGemPortAttributeList[index].MaxQSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQ:        tp.UpstreamGemPortAttributeList[index].PriorityQ,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+	}
+
+	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	//put multicast and unicast downstream GEM port attributes in different lists first
+	for index := 0; index < len(tp.DownstreamGemPortAttributeList); index++ {
+		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+				&tp_pb.GemPortAttributes{
+					MulticastGemId:           tp.DownstreamGemPortAttributeList[index].MulticastGemId,
+					MaxQSize:                 tp.DownstreamGemPortAttributeList[index].MaxQSize,
+					PbitMap:                  tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:            tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy:         tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQ:                tp.DownstreamGemPortAttributeList[index].PriorityQ,
+					Weight:                   tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:            tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:            tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+					IsMulticast:              tp.DownstreamGemPortAttributeList[index].IsMulticast,
+					DynamicAccessControlList: tp.DownstreamGemPortAttributeList[index].DynamicAccessControlList,
+					StaticAccessControlList:  tp.DownstreamGemPortAttributeList[index].StaticAccessControlList})
+		} else {
+			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+				&tp_pb.GemPortAttributes{
+					MaxQSize:         tp.DownstreamGemPortAttributeList[index].MaxQSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQ:        tp.DownstreamGemPortAttributeList[index].PriorityQ,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+		}
+	}
+	//add unicast downstream GEM ports to dsGemPortAttributeList
+	if dsUnicastGemAttributeList != nil {
+		for index := 0; index < int(tp.NumGemPorts); index++ {
+			dsGemPortAttributeList = append(dsGemPortAttributeList,
+				&tp_pb.GemPortAttributes{GemportId: gemPorts[index],
+					MaxQSize:         dsUnicastGemAttributeList[index].MaxQSize,
+					PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
+					AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
+					SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+					PriorityQ:        dsUnicastGemAttributeList[index].PriorityQ,
+					Weight:           dsUnicastGemAttributeList[index].Weight,
+					DiscardPolicy:    dsUnicastGemAttributeList[index].DiscardPolicy,
+					DiscardConfig:    dsUnicastGemAttributeList[index].DiscardConfig})
+		}
+	}
+	//add multicast GEM ports to dsGemPortAttributeList afterwards
+	for k := range dsMulticastGemAttributeList {
+		dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+	}
+
+	return &tp_pb.TechProfileInstance{
+		SubscriberIdentifier: uniPortName,
+		Name:                 tp.Name,
+		ProfileType:          tp.ProfileType,
+		Version:              tp.Version,
+		NumGemPorts:          tp.NumGemPorts,
+		InstanceControl:      tp.InstanceControl,
+		UsScheduler: &tp_pb.SchedulerAttributes{
+			AllocId:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+		DsScheduler: &tp_pb.SchedulerAttributes{
+			AllocId:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// allocateTPInstance function for EPON
+func (t *TechProfileMgr) allocateEponTPInstance(ctx context.Context, uniPortName string, tp *tp_pb.EponTechProfile, intfID uint32, tpInstPath string) *tp_pb.EponTechProfileInstance {
+
+	var usQueueAttributeList []*tp_pb.EPONQueueAttributes
+	var dsQueueAttributeList []*tp_pb.EPONQueueAttributes
+	var tcontIDs []uint32
+	var gemPorts []uint32
+	var err error
+
+	logger.Infow(ctx, "allocating-tp-instance-from-tp-template", log.Fields{"uniPortName": uniPortName, "intfID": intfID, "numGem": tp.NumGemPorts})
+
+	if tp.InstanceControl.Onu == "multi-instance" {
+		if tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"err": err, "intfID": intfID})
+			return nil
+		}
+	} else { // "single-instance"
+		if tpInst := t.getSingleInstanceEponTp(ctx, tpInstPath); tpInst == nil {
+			// No "single-instance" tp found on one any uni port for the given TP ID
+			// Allocate a new TcontID or AllocID
+			if tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+				logger.Errorw(ctx, "error-getting-alloc-id-from-resource-mgr", log.Fields{"err": err, "intfID": intfID})
+				return nil
+			}
+		} else {
+			// Use the alloc-id from the existing TpInstance
+			tcontIDs = append(tcontIDs, tpInst.AllocId)
+		}
+	}
+	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	if gemPorts, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
+		logger.Errorw(ctx, "error-getting-gemport-id-from-resource-mgr", log.Fields{"err": err, "intfID": intfID, "numGemports": tp.NumGemPorts})
+		return nil
+	}
+	logger.Infow(ctx, "allocated-alloc-id-and-gemport-successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usQueueAttributeList = append(usQueueAttributeList,
+			&tp_pb.EPONQueueAttributes{GemportId: gemPorts[index],
+				MaxQSize:                  tp.UpstreamQueueAttributeList[index].MaxQSize,
+				PbitMap:                   tp.UpstreamQueueAttributeList[index].PbitMap,
+				AesEncryption:             tp.UpstreamQueueAttributeList[index].AesEncryption,
+				TrafficType:               tp.UpstreamQueueAttributeList[index].TrafficType,
+				UnsolicitedGrantSize:      tp.UpstreamQueueAttributeList[index].UnsolicitedGrantSize,
+				NominalInterval:           tp.UpstreamQueueAttributeList[index].NominalInterval,
+				ToleratedPollJitter:       tp.UpstreamQueueAttributeList[index].ToleratedPollJitter,
+				RequestTransmissionPolicy: tp.UpstreamQueueAttributeList[index].RequestTransmissionPolicy,
+				NumQSets:                  tp.UpstreamQueueAttributeList[index].NumQSets,
+				QThresholds:               tp.UpstreamQueueAttributeList[index].QThresholds,
+				SchedulingPolicy:          tp.UpstreamQueueAttributeList[index].SchedulingPolicy,
+				PriorityQ:                 tp.UpstreamQueueAttributeList[index].PriorityQ,
+				Weight:                    tp.UpstreamQueueAttributeList[index].Weight,
+				DiscardPolicy:             tp.UpstreamQueueAttributeList[index].DiscardPolicy,
+				DiscardConfig:             tp.UpstreamQueueAttributeList[index].DiscardConfig})
+	}
+
+	logger.Info(ctx, "length-of-downstream-gemport-attribute-list", len(tp.DownstreamQueueAttributeList))
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		dsQueueAttributeList = append(dsQueueAttributeList,
+			&tp_pb.EPONQueueAttributes{GemportId: gemPorts[index],
+				MaxQSize:         tp.DownstreamQueueAttributeList[index].MaxQSize,
+				PbitMap:          tp.DownstreamQueueAttributeList[index].PbitMap,
+				AesEncryption:    tp.DownstreamQueueAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.DownstreamQueueAttributeList[index].SchedulingPolicy,
+				PriorityQ:        tp.DownstreamQueueAttributeList[index].PriorityQ,
+				Weight:           tp.DownstreamQueueAttributeList[index].Weight,
+				DiscardPolicy:    tp.DownstreamQueueAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.DownstreamQueueAttributeList[index].DiscardConfig})
+	}
+
+	return &tp_pb.EponTechProfileInstance{
+		SubscriberIdentifier:         uniPortName,
+		Name:                         tp.Name,
+		ProfileType:                  tp.ProfileType,
+		Version:                      tp.Version,
+		NumGemPorts:                  tp.NumGemPorts,
+		InstanceControl:              tp.InstanceControl,
+		PackageType:                  tp.PackageType,
+		AllocId:                      tcontIDs[0],
+		UpstreamQueueAttributeList:   usQueueAttributeList,
+		DownstreamQueueAttributeList: dsQueueAttributeList}
+}
+
+// getSingleInstanceTp returns another TpInstance (GPON, XGPON, XGS-PON) for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceTp(ctx context.Context, tpPathSuffix string) *tp_pb.TechProfileInstance {
+
+	// For example:
+	// tpPathSuffix like "XGS-PON/64/olt-{1234}/pon-{0}/onu-{1}/uni-{1}"
+	// is broken into ["XGS-PON/64/olt-{1234}/pon-{0}/onu-{1}" ""]
+	uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPathSuffix, 2)
+
+	t.tpInstanceMapLock.RLock()
+	defer t.tpInstanceMapLock.RUnlock()
+	for i := 0; i < MaxUniPortPerOnu; i++ {
+		key := fmt.Sprintf(uniPathSlice[0]+"/uni-{%d}", i)
+		if tpInst, ok := t.tpInstanceMap[key]; ok {
+			logger.Debugw(ctx, "found-single-instance-tp", log.Fields{"key": key})
+			return tpInst
+		}
+	}
+	return nil
+}
+
+// getSingleInstanceTp returns another TpInstance (EPON) for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceEponTp(ctx context.Context, tpPathSuffix string) *tp_pb.EponTechProfileInstance {
+	// For example:
+	// tpPathSuffix like "EPON/64/olt-{1234}/pon-{0}/onu-{1}/uni-{1}"
+	// is broken into ["EPON/64/-{1234}/pon-{0}/onu-{1}" ""]
+	uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPathSuffix, 2)
+
+	t.epontpInstanceMapLock.RLock()
+	defer t.epontpInstanceMapLock.RUnlock()
+	for i := 0; i < MaxUniPortPerOnu; i++ {
+		key := fmt.Sprintf(uniPathSlice[0]+"/uni-{%d}", i)
+		if tpInst, ok := t.eponTpInstanceMap[key]; ok {
+			logger.Debugw(ctx, "found-single-instance-tp", log.Fields{"key": key})
+			return tpInst
+		}
+	}
+	return nil
+}
+
+// getDefaultTechProfile returns a default TechProfile for GPON, XGPON, XGS-PON
+func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *tp_pb.TechProfile {
+	var usGemPortAttributeList []*tp_pb.GemPortAttributes
+	var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+
+	for _, pbit := range t.config.DefaultPbits {
+		logger.Debugw(ctx, "creating-gem-port-profile-profile", log.Fields{"pbit": pbit})
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			&tp_pb.GemPortAttributes{
+				MaxQSize:         defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+				PriorityQ:        defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    tp_pb.DiscardPolicy_TailDrop,
+				DiscardConfigV2: &tp_pb.DiscardConfig{
+					DiscardPolicy: tp_pb.DiscardPolicy_Red,
+					DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+						RedDiscardConfig: &tp_pb.RedDiscardConfig{
+							MinThreshold:   defaultMinThreshold,
+							MaxThreshold:   defaultMaxThreshold,
+							MaxProbability: defaultMaxProbability,
+						},
+					},
+				},
+				DiscardConfig: &tp_pb.RedDiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability,
+				},
+			})
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			&tp_pb.GemPortAttributes{
+				MaxQSize:         defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+				PriorityQ:        defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    tp_pb.DiscardPolicy_TailDrop,
+				DiscardConfigV2: &tp_pb.DiscardConfig{
+					DiscardPolicy: tp_pb.DiscardPolicy_Red,
+					DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+						RedDiscardConfig: &tp_pb.RedDiscardConfig{
+							MinThreshold:   defaultMinThreshold,
+							MaxThreshold:   defaultMaxThreshold,
+							MaxProbability: defaultMaxProbability,
+						},
+					},
+				},
+				DiscardConfig: &tp_pb.RedDiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability,
+				},
+				IsMulticast:              defaultIsMulticast,
+				DynamicAccessControlList: defaultAccessControlList,
+				StaticAccessControlList:  defaultAccessControlList,
+				MulticastGemId:           defaultMcastGemID})
+	}
+	return &tp_pb.TechProfile{
+		Name:        t.config.DefaultTPName,
+		ProfileType: t.resourceMgr.GetTechnology(),
+		Version:     t.config.TPVersion,
+		NumGemPorts: uint32(len(usGemPortAttributeList)),
+		InstanceControl: &tp_pb.InstanceControl{
+			Onu:               defaultOnuInstance,
+			Uni:               defaultUniInstance,
+			MaxGemPayloadSize: defaultGemPayloadSize},
+		UsScheduler: &tp_pb.SchedulerAttributes{
+			Direction:    tp_pb.Direction_UPSTREAM,
+			AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_BestEffort,
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: tp_pb.SchedulingPolicy_Hybrid},
+		DsScheduler: &tp_pb.SchedulerAttributes{
+			Direction:    tp_pb.Direction_DOWNSTREAM,
+			AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_BestEffort,
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: tp_pb.SchedulingPolicy_Hybrid},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// getDefaultEponProfile returns a default TechProfile for EPON
+func (t *TechProfileMgr) getDefaultEponProfile(ctx context.Context) *tp_pb.EponTechProfile {
+
+	var usQueueAttributeList []*tp_pb.EPONQueueAttributes
+	var dsQueueAttributeList []*tp_pb.EPONQueueAttributes
+
+	for _, pbit := range t.config.DefaultPbits {
+		logger.Debugw(ctx, "Creating Queue", log.Fields{"pbit": pbit})
+		usQueueAttributeList = append(usQueueAttributeList,
+			&tp_pb.EPONQueueAttributes{
+				MaxQSize:                  defaultMaxQueueSize,
+				PbitMap:                   pbit,
+				AesEncryption:             defaultAESEncryption,
+				TrafficType:               defaultTrafficType,
+				UnsolicitedGrantSize:      defaultUnsolicitedGrantSize,
+				NominalInterval:           defaultNominalInterval,
+				ToleratedPollJitter:       defaultToleratedPollJitter,
+				RequestTransmissionPolicy: defaultRequestTransmissionPolicy,
+				NumQSets:                  defaultNumQueueSet,
+				QThresholds: &tp_pb.QThresholds{
+					QThreshold1: defaultQThreshold1,
+					QThreshold2: defaultQThreshold2,
+					QThreshold3: defaultQThreshold3,
+					QThreshold4: defaultQThreshold4,
+					QThreshold5: defaultQThreshold5,
+					QThreshold6: defaultQThreshold6,
+					QThreshold7: defaultQThreshold7},
+				SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+				PriorityQ:        defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    tp_pb.DiscardPolicy_TailDrop,
+				DiscardConfigV2: &tp_pb.DiscardConfig{
+					DiscardPolicy: tp_pb.DiscardPolicy_Red,
+					DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+						RedDiscardConfig: &tp_pb.RedDiscardConfig{
+							MinThreshold:   defaultMinThreshold,
+							MaxThreshold:   defaultMaxThreshold,
+							MaxProbability: defaultMaxProbability,
+						},
+					},
+				},
+				DiscardConfig: &tp_pb.RedDiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability,
+				}})
+		dsQueueAttributeList = append(dsQueueAttributeList,
+			&tp_pb.EPONQueueAttributes{
+				MaxQSize:         defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+				PriorityQ:        defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    tp_pb.DiscardPolicy_TailDrop,
+				DiscardConfigV2: &tp_pb.DiscardConfig{
+					DiscardPolicy: tp_pb.DiscardPolicy_Red,
+					DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+						RedDiscardConfig: &tp_pb.RedDiscardConfig{
+							MinThreshold:   defaultMinThreshold,
+							MaxThreshold:   defaultMaxThreshold,
+							MaxProbability: defaultMaxProbability,
+						},
+					},
+				},
+				DiscardConfig: &tp_pb.RedDiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability,
+				}})
+	}
+	return &tp_pb.EponTechProfile{
+		Name:        t.config.DefaultTPName,
+		ProfileType: t.resourceMgr.GetTechnology(),
+		Version:     t.config.TPVersion,
+		NumGemPorts: uint32(len(usQueueAttributeList)),
+		InstanceControl: &tp_pb.InstanceControl{
+			Onu:               defaultOnuInstance,
+			Uni:               defaultUniInstance,
+			MaxGemPayloadSize: defaultGemPayloadSize},
+		PackageType:                  defaultPakageType,
+		UpstreamQueueAttributeList:   usQueueAttributeList,
+		DownstreamQueueAttributeList: dsQueueAttributeList}
+}
+
+//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
+func isMulticastGem(isMulticastAttrValue string) bool {
+	return isMulticastAttrValue != "" &&
+		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
+}
+
+func (t *TechProfileMgr) addResourceInstanceToKVStore(ctx context.Context, tpID uint32, uniPortName string, resInst tp_pb.ResourceInstance) error {
+	logger.Debugw(ctx, "adding-resource-instance-to-kv-store", log.Fields{"tpID": tpID, "uniPortName": uniPortName, "resInst": resInst})
+	val, err := proto.Marshal(&resInst)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-marshall-resource-instance", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName, "resInst": resInst})
+		return err
+	}
+	err = t.config.ResourceInstanceKVBacked.Put(ctx, fmt.Sprintf("%s/%d/%s", t.resourceMgr.GetTechnology(), tpID, uniPortName), val)
+	return err
+}
+
+func (t *TechProfileMgr) removeResourceInstanceFromKVStore(ctx context.Context, tpID uint32, uniPortName string) error {
+	logger.Debugw(ctx, "removing-resource-instance-to-kv-store", log.Fields{"tpID": tpID, "uniPortName": uniPortName})
+	if err := t.config.ResourceInstanceKVBacked.Delete(ctx, fmt.Sprintf("%s/%d/%s", t.resourceMgr.GetTechnology(), tpID, uniPortName)); err != nil {
+		logger.Errorw(ctx, "error-removing-resource-instance-to-kv-store", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+		return err
+	}
+	return nil
+}
+
+func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, tpID uint32) *tp_pb.TechProfile {
+	var tp *tp_pb.TechProfile
+	t.tpMapLock.RLock()
+	tp, ok := t.tpMap[tpID]
+	t.tpMapLock.RUnlock()
+	if ok {
+		logger.Debugw(ctx, "found-tp-in-cache", log.Fields{"tpID": tpID})
+		return tp
+	}
+	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), tpID)
+	logger.Debugw(ctx, "getting-tp-from-kv-store", log.Fields{"tpID": tpID, "Key": key})
+	kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
+	if err != nil {
+		logger.Errorw(ctx, "error-fetching-from-kv-store", log.Fields{"err": err, "key": key})
+		return nil
+	}
+	if kvresult != nil {
+		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+			lTp := &tp_pb.TechProfile{}
+			reader := bytes.NewReader(value)
+			if err = jsonpb.Unmarshal(reader, lTp); err != nil {
+				logger.Errorw(ctx, "error-unmarshalling-tp-from-kv-store", log.Fields{"err": err, "tpID": tpID, "error": err})
+				return nil
+			}
+
+			logger.Debugw(ctx, "success-fetched-tp-from-kv-store", log.Fields{"tpID": tpID, "value": *lTp})
+			return lTp
+		} else {
+			logger.Errorw(ctx, "error-decoding-tp", log.Fields{"err": err, "tpID": tpID})
+			// We we create a default profile in this case.
+		}
+	}
+
+	return nil
+}
+
+func (t *TechProfileMgr) getEponTPFromKVStore(ctx context.Context, tpID uint32) *tp_pb.EponTechProfile {
+	var eponTp *tp_pb.EponTechProfile
+	t.eponTpMapLock.RLock()
+	eponTp, ok := t.eponTpMap[tpID]
+	t.eponTpMapLock.RUnlock()
+	if ok {
+		logger.Debugw(ctx, "found-tp-in-cache", log.Fields{"tpID": tpID})
+		return eponTp
+	}
+	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), tpID)
+	logger.Debugw(ctx, "getting-epon-tp-from-kv-store", log.Fields{"tpID": tpID, "Key": key})
+	kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
+	if err != nil {
+		logger.Errorw(ctx, "error-fetching-from-kv-store", log.Fields{"err": err, "key": key})
+		return nil
+	}
+	if kvresult != nil {
+		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+			lEponTp := &tp_pb.EponTechProfile{}
+			reader := bytes.NewReader(value)
+			if err = jsonpb.Unmarshal(reader, lEponTp); err != nil {
+				logger.Errorw(ctx, "error-unmarshalling-epon-tp-from-kv-store", log.Fields{"err": err, "tpID": tpID, "error": err})
+				return nil
+			}
+
+			logger.Debugw(ctx, "success-fetching-epon-tp-from-kv-store", log.Fields{"tpID": tpID, "value": *lEponTp})
+			return lEponTp
+		}
+	}
+	return nil
+}
+
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+
+	logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
+	switch storeType {
+	case "etcd":
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+// buildTpInstanceFromResourceInstance for GPON, XGPON and XGS-PON technology - build TpInstance from TechProfile template and ResourceInstance
+func (t *TechProfileMgr) buildTpInstanceFromResourceInstance(ctx context.Context, tp *tp_pb.TechProfile, resInst *tp_pb.ResourceInstance) *tp_pb.TechProfileInstance {
+
+	var usGemPortAttributeList []*tp_pb.GemPortAttributes
+	var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+	var dsMulticastGemAttributeList []*tp_pb.GemPortAttributes
+	var dsUnicastGemAttributeList []*tp_pb.GemPortAttributes
+
+	if len(resInst.GemportIds) != int(tp.NumGemPorts) {
+		logger.Errorw(ctx, "mismatch-in-number-of-gemports-between-template-and-resource-instance",
+			log.Fields{"tpID": resInst.TpId, "totalResInstGemPortIDs": len(resInst.GemportIds), "totalTpTemplateGemPorts": tp.NumGemPorts})
+		return nil
+	}
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			&tp_pb.GemPortAttributes{GemportId: resInst.GemportIds[index],
+				MaxQSize:         tp.UpstreamGemPortAttributeList[index].MaxQSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQ:        tp.UpstreamGemPortAttributeList[index].PriorityQ,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+	}
+
+	//put multicast and unicast downstream GEM port attributes in different lists first
+	for index := 0; index < len(tp.DownstreamGemPortAttributeList); index++ {
+		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+				&tp_pb.GemPortAttributes{
+					MulticastGemId:           tp.DownstreamGemPortAttributeList[index].MulticastGemId,
+					MaxQSize:                 tp.DownstreamGemPortAttributeList[index].MaxQSize,
+					PbitMap:                  tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:            tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy:         tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQ:                tp.DownstreamGemPortAttributeList[index].PriorityQ,
+					Weight:                   tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:            tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:            tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+					IsMulticast:              tp.DownstreamGemPortAttributeList[index].IsMulticast,
+					DynamicAccessControlList: tp.DownstreamGemPortAttributeList[index].DynamicAccessControlList,
+					StaticAccessControlList:  tp.DownstreamGemPortAttributeList[index].StaticAccessControlList})
+		} else {
+			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+				&tp_pb.GemPortAttributes{
+					MaxQSize:         tp.DownstreamGemPortAttributeList[index].MaxQSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQ:        tp.DownstreamGemPortAttributeList[index].PriorityQ,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+		}
+	}
+	//add unicast downstream GEM ports to dsGemPortAttributeList
+	if dsUnicastGemAttributeList != nil {
+		for index := 0; index < int(tp.NumGemPorts); index++ {
+			dsGemPortAttributeList = append(dsGemPortAttributeList,
+				&tp_pb.GemPortAttributes{GemportId: resInst.GemportIds[index],
+					MaxQSize:         dsUnicastGemAttributeList[index].MaxQSize,
+					PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
+					AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
+					SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+					PriorityQ:        dsUnicastGemAttributeList[index].PriorityQ,
+					Weight:           dsUnicastGemAttributeList[index].Weight,
+					DiscardPolicy:    dsUnicastGemAttributeList[index].DiscardPolicy,
+					DiscardConfig:    dsUnicastGemAttributeList[index].DiscardConfig})
+		}
+	}
+	//add multicast GEM ports to dsGemPortAttributeList afterwards
+	for k := range dsMulticastGemAttributeList {
+		dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+	}
+
+	return &tp_pb.TechProfileInstance{
+		SubscriberIdentifier: resInst.SubscriberIdentifier,
+		Name:                 tp.Name,
+		ProfileType:          tp.ProfileType,
+		Version:              tp.Version,
+		NumGemPorts:          tp.NumGemPorts,
+		InstanceControl:      tp.InstanceControl,
+		UsScheduler: &tp_pb.SchedulerAttributes{
+			AllocId:      resInst.AllocId,
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+		DsScheduler: &tp_pb.SchedulerAttributes{
+			AllocId:      resInst.AllocId,
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// buildEponTpInstanceFromResourceInstance for EPON technology - build EponTpInstance from EponTechProfile template and ResourceInstance
+func (t *TechProfileMgr) buildEponTpInstanceFromResourceInstance(ctx context.Context, tp *tp_pb.EponTechProfile, resInst *tp_pb.ResourceInstance) *tp_pb.EponTechProfileInstance {
+
+	var usQueueAttributeList []*tp_pb.EPONQueueAttributes
+	var dsQueueAttributeList []*tp_pb.EPONQueueAttributes
+
+	if len(resInst.GemportIds) != int(tp.NumGemPorts) {
+		logger.Errorw(ctx, "mismatch-in-number-of-gemports-between-epon-tp-template-and-resource-instance",
+			log.Fields{"tpID": resInst.TpId, "totalResInstGemPortIDs": len(resInst.GemportIds), "totalTpTemplateGemPorts": tp.NumGemPorts})
+		return nil
+	}
+
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usQueueAttributeList = append(usQueueAttributeList,
+			&tp_pb.EPONQueueAttributes{GemportId: resInst.GemportIds[index],
+				MaxQSize:                  tp.UpstreamQueueAttributeList[index].MaxQSize,
+				PbitMap:                   tp.UpstreamQueueAttributeList[index].PbitMap,
+				AesEncryption:             tp.UpstreamQueueAttributeList[index].AesEncryption,
+				TrafficType:               tp.UpstreamQueueAttributeList[index].TrafficType,
+				UnsolicitedGrantSize:      tp.UpstreamQueueAttributeList[index].UnsolicitedGrantSize,
+				NominalInterval:           tp.UpstreamQueueAttributeList[index].NominalInterval,
+				ToleratedPollJitter:       tp.UpstreamQueueAttributeList[index].ToleratedPollJitter,
+				RequestTransmissionPolicy: tp.UpstreamQueueAttributeList[index].RequestTransmissionPolicy,
+				NumQSets:                  tp.UpstreamQueueAttributeList[index].NumQSets,
+				QThresholds:               tp.UpstreamQueueAttributeList[index].QThresholds,
+				SchedulingPolicy:          tp.UpstreamQueueAttributeList[index].SchedulingPolicy,
+				PriorityQ:                 tp.UpstreamQueueAttributeList[index].PriorityQ,
+				Weight:                    tp.UpstreamQueueAttributeList[index].Weight,
+				DiscardPolicy:             tp.UpstreamQueueAttributeList[index].DiscardPolicy,
+				DiscardConfig:             tp.UpstreamQueueAttributeList[index].DiscardConfig})
+	}
+
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		dsQueueAttributeList = append(dsQueueAttributeList,
+			&tp_pb.EPONQueueAttributes{GemportId: resInst.GemportIds[index],
+				MaxQSize:         tp.DownstreamQueueAttributeList[index].MaxQSize,
+				PbitMap:          tp.DownstreamQueueAttributeList[index].PbitMap,
+				AesEncryption:    tp.DownstreamQueueAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.DownstreamQueueAttributeList[index].SchedulingPolicy,
+				PriorityQ:        tp.DownstreamQueueAttributeList[index].PriorityQ,
+				Weight:           tp.DownstreamQueueAttributeList[index].Weight,
+				DiscardPolicy:    tp.DownstreamQueueAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.DownstreamQueueAttributeList[index].DiscardConfig})
+	}
+
+	return &tp_pb.EponTechProfileInstance{
+		SubscriberIdentifier:         resInst.SubscriberIdentifier,
+		Name:                         tp.Name,
+		ProfileType:                  tp.ProfileType,
+		Version:                      tp.Version,
+		NumGemPorts:                  tp.NumGemPorts,
+		InstanceControl:              tp.InstanceControl,
+		PackageType:                  tp.PackageType,
+		AllocId:                      resInst.AllocId,
+		UpstreamQueueAttributeList:   usQueueAttributeList,
+		DownstreamQueueAttributeList: dsQueueAttributeList}
+}
+
+func (t *TechProfileMgr) getTpInstanceFromResourceInstance(ctx context.Context, resInst *tp_pb.ResourceInstance) *tp_pb.TechProfileInstance {
+	if resInst == nil {
+		logger.Error(ctx, "resource-instance-nil")
+		return nil
+	}
+	tp := t.getTPFromKVStore(ctx, resInst.TpId)
+	if tp == nil {
+		logger.Warnw(ctx, "tp-not-found-on-kv--creating-default-tp", log.Fields{"tpID": resInst.TpId})
+		tp = t.getDefaultTechProfile(ctx)
+	}
+	return t.buildTpInstanceFromResourceInstance(ctx, tp, resInst)
+}
+
+func (t *TechProfileMgr) getEponTpInstanceFromResourceInstance(ctx context.Context, resInst *tp_pb.ResourceInstance) *tp_pb.EponTechProfileInstance {
+	if resInst == nil {
+		logger.Error(ctx, "resource-instance-nil")
+		return nil
+	}
+	eponTp := t.getEponTPFromKVStore(ctx, resInst.TpId)
+	if eponTp == nil {
+		logger.Warnw(ctx, "tp-not-found-on-kv--creating-default-tp", log.Fields{"tpID": resInst.TpId})
+		eponTp = t.getDefaultEponProfile(ctx)
+	}
+	return t.buildEponTpInstanceFromResourceInstance(ctx, eponTp, resInst)
+}
+
+func (t *TechProfileMgr) reconcileTpInstancesToCache(ctx context.Context) error {
+
+	tech := t.resourceMgr.GetTechnology()
+	newCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
+	defer cancel()
+	kvPairs, _ := t.config.ResourceInstanceKVBacked.List(newCtx, tech)
+
+	if tech == xgspon || tech == xgpon || tech == gpon {
+		for keyPath, kvPair := range kvPairs {
+			logger.Debugw(ctx, "attempting-to-reconcile-tp-instance-from-resource-instance", log.Fields{"resourceInstPath": keyPath})
+			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+				var resInst tp_pb.ResourceInstance
+				if err = proto.Unmarshal(value, &resInst); err != nil {
+					logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"err": err, "keyPath": keyPath, "value": value})
+					continue
+				} else {
+					if tpInst := t.getTpInstanceFromResourceInstance(ctx, &resInst); tpInst != nil {
+						// Trim the kv path by removing the default prefix part and get only the suffix part to reference the internal cache
+						keySuffixSlice := regexp.MustCompile(t.config.ResourceInstanceKVPathPrefix+"/").Split(keyPath, 2)
+						if len(keySuffixSlice) == 2 {
+							keySuffixFormatRegexp := regexp.MustCompile(`^[a-zA-Z\-]+/[0-9]+/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+							// Make sure the keySuffixSlice is as per format [a-zA-Z-+]/[\d+]/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+							if !keySuffixFormatRegexp.Match([]byte(keySuffixSlice[1])) {
+								logger.Errorw(ctx, "kv-path-not-confirming-to-format", log.Fields{"kvPath": keySuffixSlice[1]})
+								continue
+							}
+						} else {
+							logger.Errorw(ctx, "kv-instance-key-path-not-in-the-expected-format", log.Fields{"kvPath": keyPath})
+							continue
+						}
+						t.tpInstanceMapLock.Lock()
+						t.tpInstanceMap[keySuffixSlice[1]] = tpInst
+						t.tpInstanceMapLock.Unlock()
+						logger.Debugw(ctx, "reconciled-tp-success", log.Fields{"keyPath": keyPath})
+					}
+				}
+			} else {
+				logger.Errorw(ctx, "error-converting-kv-pair-value-to-byte", log.Fields{"err": err})
+			}
+		}
+	} else if tech == epon {
+		for keyPath, kvPair := range kvPairs {
+			logger.Debugw(ctx, "attempting-to-reconcile-epon-tp-instance", log.Fields{"keyPath": keyPath})
+			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+				var resInst tp_pb.ResourceInstance
+				if err = proto.Unmarshal(value, &resInst); err != nil {
+					logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+					continue
+				} else {
+					if eponTpInst := t.getEponTpInstanceFromResourceInstance(ctx, &resInst); eponTpInst != nil {
+						// Trim the kv path by removing the default prefix part and get only the suffix part to reference the internal cache
+						keySuffixSlice := regexp.MustCompile(t.config.ResourceInstanceKVPathPrefix+"/").Split(keyPath, 2)
+						if len(keySuffixSlice) == 2 {
+							keySuffixFormatRegexp := regexp.MustCompile(`^[a-zA-Z\-]+/[0-9]+/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+							// Make sure the keySuffixSlice is as per format [a-zA-Z-+]/[\d+]/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+							if !keySuffixFormatRegexp.Match([]byte(keySuffixSlice[1])) {
+								logger.Errorw(ctx, "kv-path-not-confirming-to-format", log.Fields{"kvPath": keySuffixSlice[1]})
+								continue
+							}
+						} else {
+							logger.Errorw(ctx, "kv-instance-key-path-not-in-the-expected-format", log.Fields{"kvPath": keyPath})
+							continue
+						}
+						t.epontpInstanceMapLock.Lock()
+						t.eponTpInstanceMap[keySuffixSlice[1]] = eponTpInst
+						t.epontpInstanceMapLock.Unlock()
+						logger.Debugw(ctx, "reconciled-epon-tp-success", log.Fields{"keyPath": keyPath})
+					}
+				}
+			} else {
+				logger.Errorw(ctx, "error-converting-kv-pair-value-to-byte", log.Fields{"err": err})
+			}
+		}
+	} else {
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": tech})
+		return fmt.Errorf("unknown-tech-%v", tech)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile_if.go
new file mode 100644
index 0000000..5622345
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile_if.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"context"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+)
+
+type TechProfileIf interface {
+	SetKVClient(ctx context.Context, pathPrefix string) *db.Backend
+	GetTechProfileInstanceKey(ctx context.Context, tpID uint32, uniPortName string) string
+	GetTPInstance(ctx context.Context, path string) (interface{}, error)
+	CreateTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string, intfID uint32) (interface{}, error)
+	DeleteTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string) error
+	GetUsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig
+	GetDsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig
+	GetTrafficScheduler(tpInstance *tp_pb.TechProfileInstance, SchedCfg *tp_pb.SchedulerConfig, ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
+	GetTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance) []*tp_pb.TrafficQueue
+	GetGemportForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) interface{}
+	FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, ponIntf uint32, onuID uint32) interface{}
+	GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
+	FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
new file mode 100644
index 0000000..49c0b10
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package version
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Default build-time variable.
+// These values can (should) be overridden via ldflags when built with
+// `make`
+var (
+	version   = "unknown-version"
+	goVersion = "unknown-goversion"
+	vcsRef    = "unknown-vcsref"
+	vcsDirty  = "unknown-vcsdirty"
+	buildTime = "unknown-buildtime"
+	os        = "unknown-os"
+	arch      = "unknown-arch"
+)
+
+type VersionInfoType struct {
+	Version   string `json:"version"`
+	GoVersion string `json:"goversion"`
+	VcsRef    string `json:"vcsref"`
+	VcsDirty  string `json:"vcsdirty"`
+	BuildTime string `json:"buildtime"`
+	Os        string `json:"os"`
+	Arch      string `json:"arch"`
+}
+
+var VersionInfo VersionInfoType
+
+func init() {
+	VersionInfo = VersionInfoType{
+		Version:   version,
+		VcsRef:    vcsRef,
+		VcsDirty:  vcsDirty,
+		GoVersion: goVersion,
+		Os:        os,
+		Arch:      arch,
+		BuildTime: buildTime,
+	}
+}
+
+func (v VersionInfoType) String(indent string) string {
+	builder := strings.Builder{}
+
+	builder.WriteString(fmt.Sprintf("%sVersion:      %s\n", indent, VersionInfo.Version))
+	builder.WriteString(fmt.Sprintf("%sGoVersion:    %s\n", indent, VersionInfo.GoVersion))
+	builder.WriteString(fmt.Sprintf("%sVCS Ref:      %s\n", indent, VersionInfo.VcsRef))
+	builder.WriteString(fmt.Sprintf("%sVCS Dirty:    %s\n", indent, VersionInfo.VcsDirty))
+	builder.WriteString(fmt.Sprintf("%sBuilt:        %s\n", indent, VersionInfo.BuildTime))
+	builder.WriteString(fmt.Sprintf("%sOS/Arch:      %s/%s\n", indent, VersionInfo.Os, VersionInfo.Arch))
+	return builder.String()
+}