VOL-1596 Add Support for handling multicast groups in OpenOLT Adapter.
VOL-1595 Add Support for handling multicast flows in OpenOLT Adapter.

Depends voltha-protos from the patch below:
https://gerrit.opencord.org/#/c/16690/

Change-Id: I1cc9900bd6400bb31aed11beda674138838a21d2
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
new file mode 100644
index 0000000..de5cfc0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"context"
+
+	"github.com/golang/protobuf/proto"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+)
+
+// AdapterProxy interface for AdapterProxy implementation.
+type AdapterProxy interface {
+	SendInterAdapterMessage(ctx context.Context,
+		msg proto.Message,
+		msgType ic.InterAdapterMessageType_Types,
+		fromAdapter string,
+		toAdapter string,
+		toDeviceID string,
+		proxyDeviceID string,
+		messageID string) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
new file mode 100644
index 0000000..dbf3418
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"context"
+
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+// CoreProxy interface for voltha-go coreproxy.
+type CoreProxy interface {
+	UpdateCoreReference(deviceID string, coreReference string)
+	DeleteCoreReference(deviceID string)
+	// getCoreTopic(deviceID string) kafka.Topic
+	//GetAdapterTopic(args ...string) kafka.Topic
+	// getAdapterTopic(args ...string) kafka.Topic
+	RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error
+	DeviceUpdate(ctx context.Context, device *voltha.Device) error
+	PortCreated(ctx context.Context, deviceID string, port *voltha.Port) error
+	PortsStateUpdate(ctx context.Context, deviceID string, operStatus voltha.OperStatus_Types) error
+	DeleteAllPorts(ctx context.Context, deviceID string) error
+	DeviceStateUpdate(ctx context.Context, deviceID string,
+		connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error
+
+	DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error
+	ChildDeviceDetected(ctx context.Context, parentDeviceID string, parentPortNo int,
+		childDeviceType string, channelID int, vendorID string, serialNumber string, onuID int64) (*voltha.Device, error)
+
+	ChildDevicesLost(ctx context.Context, parentDeviceID string) error
+	ChildDevicesDetected(ctx context.Context, parentDeviceID string) error
+	GetDevice(ctx context.Context, parentDeviceID string, deviceID string) (*voltha.Device, error)
+	GetChildDevice(ctx context.Context, parentDeviceID string, kwargs map[string]interface{}) (*voltha.Device, error)
+	GetChildDevices(ctx context.Context, parentDeviceID string) (*voltha.Devices, error)
+	SendPacketIn(ctx context.Context, deviceID string, port uint32, pktPayload []byte) error
+	DeviceReasonUpdate(ctx context.Context, deviceID string, deviceReason string) error
+	PortStateUpdate(ctx context.Context, deviceID string, pType voltha.Port_PortType, portNo uint32,
+		operStatus voltha.OperStatus_Types) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
new file mode 100644
index 0000000..c144935
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package adapterif
+
+import (
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+// EventProxy interface for eventproxy
+type EventProxy interface {
+	SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category EventCategory,
+		subCategory EventSubCategory, raisedTs int64) error
+	SendKpiEvent(id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
+		subCategory EventSubCategory, raisedTs int64) error
+}
+
+const (
+	EventTypeVersion = "0.1"
+)
+
+type (
+	EventType        = voltha.EventType_Types
+	EventCategory    = voltha.EventCategory_Types
+	EventSubCategory = voltha.EventSubCategory_Types
+)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
new file mode 100644
index 0000000..b302214
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+)
+
+type AdapterProxy struct {
+	kafkaICProxy *kafka.InterContainerProxy
+	adapterTopic string
+	coreTopic    string
+}
+
+func NewAdapterProxy(kafkaProxy *kafka.InterContainerProxy, adapterTopic string, coreTopic string) *AdapterProxy {
+	var proxy AdapterProxy
+	proxy.kafkaICProxy = kafkaProxy
+	proxy.adapterTopic = adapterTopic
+	proxy.coreTopic = coreTopic
+	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	return &proxy
+}
+
+func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
+	msg proto.Message,
+	msgType ic.InterAdapterMessageType_Types,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string,
+	messageId string) error {
+	logger.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	//Marshal the message
+	var marshalledMsg *any.Any
+	var err error
+	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
+		logger.Warnw("cannot-marshal-msg", log.Fields{"error": err})
+		return err
+	}
+
+	//Build the inter adapter message
+	header := &ic.InterAdapterHeader{
+		Type:          msgType,
+		FromTopic:     fromAdapter,
+		ToTopic:       toAdapter,
+		ToDeviceId:    toDeviceId,
+		ProxyDeviceId: proxyDeviceId,
+	}
+	if messageId != "" {
+		header.Id = messageId
+	} else {
+		header.Id = uuid.New().String()
+	}
+	header.Timestamp = time.Now().Unix()
+	iaMsg := &ic.InterAdapterMessage{
+		Header: header,
+		Body:   marshalledMsg,
+	}
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: iaMsg,
+	}
+
+	// Set up the required rpc arguments
+	topic := kafka.Topic{Name: toAdapter}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_inter_adapter_message"
+
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(rpc, "", success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
new file mode 100644
index 0000000..acf818c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "common"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
new file mode 100644
index 0000000..9b46c28
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"sync"
+
+	"github.com/golang/protobuf/ptypes"
+	a "github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type CoreProxy struct {
+	kafkaICProxy        *kafka.InterContainerProxy
+	adapterTopic        string
+	coreTopic           string
+	deviceIdCoreMap     map[string]string
+	lockDeviceIdCoreMap sync.RWMutex
+}
+
+func NewCoreProxy(kafkaProxy *kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
+	var proxy CoreProxy
+	proxy.kafkaICProxy = kafkaProxy
+	proxy.adapterTopic = adapterTopic
+	proxy.coreTopic = coreTopic
+	proxy.deviceIdCoreMap = make(map[string]string)
+	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
+	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+
+	return &proxy
+}
+
+func unPackResponse(rpc string, deviceId string, success bool, response *a.Any) error {
+	if success {
+		return nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
+	}
+}
+
+// UpdateCoreReference adds or update a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) UpdateCoreReference(deviceId string, coreReference string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	ap.deviceIdCoreMap[deviceId] = coreReference
+}
+
+// DeleteCoreReference removes a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) DeleteCoreReference(deviceId string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	delete(ap.deviceIdCoreMap, deviceId)
+}
+
+func (ap *CoreProxy) getCoreTopic(deviceId string) kafka.Topic {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+
+	if t, exist := ap.deviceIdCoreMap[deviceId]; exist {
+		return kafka.Topic{Name: t}
+	}
+
+	return kafka.Topic{Name: ap.coreTopic}
+}
+
+func (ap *CoreProxy) getAdapterTopic(args ...string) kafka.Topic {
+	return kafka.Topic{Name: ap.adapterTopic}
+}
+
+func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
+	logger.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	rpc := "Register"
+	topic := kafka.Topic{Name: ap.coreTopic}
+	replyToTopic := ap.getAdapterTopic()
+	args := make([]*kafka.KVArg, 2)
+	args[0] = &kafka.KVArg{
+		Key:   "adapter",
+		Value: adapter,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "deviceTypes",
+		Value: deviceTypes,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
+	logger.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(rpc, "", success, result)
+}
+
+func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
+	logger.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
+	rpc := "DeviceUpdate"
+	toTopic := ap.getCoreTopic(device.Id)
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, device.Id, args...)
+	logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
+	return unPackResponse(rpc, device.Id, success, result)
+}
+
+func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
+	logger.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
+	rpc := "PortCreated"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: port,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
+	log.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
+	rpc := "PortsStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
+	logger.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
+	rpc := "DeleteAllPorts"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
+	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
+	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
+	rpc := "DeviceStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+	cStatus := &ic.IntType{Val: int64(connStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "connect_status",
+		Value: cStatus,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
+	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
+	logger.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
+	rpc := "ChildDeviceDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 7)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+	ppn := &ic.IntType{Val: int64(parentPortNo)}
+	args[1] = &kafka.KVArg{
+		Key:   "parent_port_no",
+		Value: ppn,
+	}
+	cdt := &ic.StrType{Val: childDeviceType}
+	args[2] = &kafka.KVArg{
+		Key:   "child_device_type",
+		Value: cdt,
+	}
+	channel := &ic.IntType{Val: int64(channelId)}
+	args[3] = &kafka.KVArg{
+		Key:   "channel_id",
+		Value: channel,
+	}
+	vId := &ic.StrType{Val: vendorId}
+	args[4] = &kafka.KVArg{
+		Key:   "vendor_id",
+		Value: vId,
+	}
+	sNo := &ic.StrType{Val: serialNumber}
+	args[5] = &kafka.KVArg{
+		Key:   "serial_number",
+		Value: sNo,
+	}
+	oId := &ic.IntType{Val: int64(onuId)}
+	args[6] = &kafka.KVArg{
+		Key:   "onu_id",
+		Value: oId,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		// TODO: Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+
+}
+
+func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+	rpc := "ChildDevicesLost"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+	rpc := "ChildDevicesDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
+	logger.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
+	rpc := "GetDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
+	logger.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
+	rpc := "GetChildDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 4)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	var cnt uint8 = 0
+	for k, v := range kwargs {
+		cnt += 1
+		if k == "serial_number" {
+			val := &ic.StrType{Val: v.(string)}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "onu_id" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "parent_port_no" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		}
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
+	logger.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	rpc := "GetChildDevices"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevices := &voltha.Devices{}
+		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return volthaDevices, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
+	logger.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+	rpc := "PacketIn"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	portNo := &ic.IntType{Val: int64(port)}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: portNo,
+	}
+	pkt := &ic.Packet{Payload: pktPayload}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: pkt,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
+	logger.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
+	rpc := "DeviceReasonUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	reason := &ic.StrType{Val: deviceReason}
+	args[1] = &kafka.KVArg{
+		Key:   "device_reason",
+		Value: reason,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
+	logger.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	rpc := "DevicePMConfigUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(pmConfigs.Id)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device_pm_config",
+		Value: pmConfigs,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
+	logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
+	return unPackResponse(rpc, pmConfigs.Id, success, result)
+}
+
+func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	rpc := "ReconcileChildDevices"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{
+		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
+	operStatus voltha.OperStatus_Types) error {
+	logger.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	rpc := "PortStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 4)
+	deviceID := &voltha.ID{Id: deviceId}
+	portNo := &ic.IntType{Val: int64(portNum)}
+	portType := &ic.IntType{Val: int64(pType)}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: deviceID,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "port_type",
+		Value: portType,
+	}
+	args[3] = &kafka.KVArg{
+		Key:   "port_no",
+		Value: portNo,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
new file mode 100644
index 0000000..034de8e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+type EventProxy struct {
+	kafkaClient kafka.Client
+	eventTopic  kafka.Topic
+}
+
+func NewEventProxy(opts ...EventProxyOption) *EventProxy {
+	var proxy EventProxy
+	for _, option := range opts {
+		option(&proxy)
+	}
+	return &proxy
+}
+
+type EventProxyOption func(*EventProxy)
+
+func MsgClient(client kafka.Client) EventProxyOption {
+	return func(args *EventProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func MsgTopic(topic kafka.Topic) EventProxyOption {
+	return func(args *EventProxy) {
+		args.eventTopic = topic
+	}
+}
+
+func (ep *EventProxy) formatId(eventName string) string {
+	return fmt.Sprintf("Voltha.openolt.%s.%s", eventName, strconv.FormatInt(time.Now().UnixNano(), 10))
+}
+
+func (ep *EventProxy) getEventHeader(eventName string, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, eventType adapterif.EventType, raisedTs int64) *voltha.EventHeader {
+	var header voltha.EventHeader
+	if strings.Contains(eventName, "_") {
+		eventName = strings.Join(strings.Split(eventName, "_")[:len(strings.Split(eventName, "_"))-2], "_")
+	} else {
+		eventName = "UNKNOWN_EVENT"
+	}
+	/* Populating event header */
+	header.Id = ep.formatId(eventName)
+	header.Category = category
+	header.SubCategory = subCategory
+	header.Type = eventType
+	header.TypeVersion = adapterif.EventTypeVersion
+	header.RaisedTs = float32(raisedTs)
+	header.ReportedTs = float32(time.Now().UnixNano())
+	return &header
+}
+
+/* Send out device events*/
+func (ep *EventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+	if deviceEvent == nil {
+		logger.Error("Recieved empty device event")
+		return errors.New("Device event nil")
+	}
+	var event voltha.Event
+	var de voltha.Event_DeviceEvent
+	de.DeviceEvent = deviceEvent
+	event.Header = ep.getEventHeader(deviceEvent.DeviceEventName, category, subCategory, voltha.EventType_DEVICE_EVENT, raisedTs)
+	event.EventType = &de
+	if err := ep.sendEvent(&event); err != nil {
+		logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+		return err
+	}
+	logger.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
+		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
+		"DeviceEventName": deviceEvent.DeviceEventName})
+
+	return nil
+
+}
+
+// SendKpiEvent is to send kpi events to voltha.event topic
+func (ep *EventProxy) SendKpiEvent(id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+	if kpiEvent == nil {
+		logger.Error("Recieved empty kpi event")
+		return errors.New("KPI event nil")
+	}
+	var event voltha.Event
+	var de voltha.Event_KpiEvent2
+	de.KpiEvent2 = kpiEvent
+	event.Header = ep.getEventHeader(id, category, subCategory, voltha.EventType_KPI_EVENT2, raisedTs)
+	event.EventType = &de
+	if err := ep.sendEvent(&event); err != nil {
+		logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+		return err
+	}
+	logger.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
+		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
+
+	return nil
+
+}
+
+/* TODO: Send out KPI events*/
+
+func (ep *EventProxy) sendEvent(event *voltha.Event) error {
+	if err := ep.kafkaClient.Send(event, &ep.eventTopic); err != nil {
+		return err
+	}
+	logger.Debugw("Sent event to kafka", log.Fields{"event": event})
+
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
new file mode 100644
index 0000000..7697c05
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+type PmMetrics struct {
+	deviceId          string
+	frequency         uint32
+	grouped           bool
+	frequencyOverride bool
+	metrics           map[string]*voltha.PmConfig
+}
+
+type PmMetricsOption func(*PmMetrics)
+
+func Frequency(frequency uint32) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequency = frequency
+	}
+}
+
+func Grouped(grouped bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.grouped = grouped
+	}
+}
+
+func FrequencyOverride(frequencyOverride bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequencyOverride = frequencyOverride
+	}
+}
+
+func Metrics(pmNames []string) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.metrics = make(map[string]*voltha.PmConfig)
+		for _, name := range pmNames {
+			args.metrics[name] = &voltha.PmConfig{
+				Name:    name,
+				Type:    voltha.PmConfig_COUNTER,
+				Enabled: true,
+			}
+		}
+	}
+}
+
+func NewPmMetrics(deviceId string, opts ...PmMetricsOption) *PmMetrics {
+	pm := &PmMetrics{deviceId: deviceId}
+	for _, option := range opts {
+		option(pm)
+	}
+	return pm
+}
+
+func (pm *PmMetrics) ToPmConfigs() *voltha.PmConfigs {
+	pmConfigs := &voltha.PmConfigs{
+		Id:           pm.deviceId,
+		DefaultFreq:  pm.frequency,
+		Grouped:      pm.grouped,
+		FreqOverride: pm.frequencyOverride,
+	}
+	for _, v := range pm.metrics {
+		pmConfigs.Metrics = append(pmConfigs.Metrics, &voltha.PmConfig{Name: v.Name, Type: v.Type, Enabled: v.Enabled})
+	}
+	return pmConfigs
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
new file mode 100644
index 0000000..414116b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
@@ -0,0 +1,615 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"errors"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type RequestHandlerProxy struct {
+	TestMode       bool
+	coreInstanceId string
+	adapter        adapters.IAdapter
+	coreProxy      adapterif.CoreProxy
+}
+
+func NewRequestHandlerProxy(coreInstanceId string, iadapter adapters.IAdapter, cProxy adapterif.CoreProxy) *RequestHandlerProxy {
+	var proxy RequestHandlerProxy
+	proxy.coreInstanceId = coreInstanceId
+	proxy.adapter = iadapter
+	proxy.coreProxy = cProxy
+	return &proxy
+}
+
+func (rhp *RequestHandlerProxy) Adapter_descriptor() (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Device_types() (*voltha.DeviceTypes, error) {
+	return nil, nil
+}
+
+func (rhp *RequestHandlerProxy) Health() (*voltha.HealthStatus, error) {
+	return nil, nil
+}
+
+func (rhp *RequestHandlerProxy) Adopt_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
+
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Adopt_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reconcile_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+
+	//Invoke the reconcile device API on the adapter
+	if err := rhp.adapter.Reconcile_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Abandon_device(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Disable_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Disable_device API on the adapter
+	if err := rhp.adapter.Disable_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reenable_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Reenable_device API on the adapter
+	if err := rhp.adapter.Reenable_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Reboot_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the Reboot_device API on the adapter
+	if err := rhp.adapter.Reboot_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+
+}
+
+func (rhp *RequestHandlerProxy) Self_test_device(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	fromTopic := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.FromTopic:
+			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	//Update the core reference for that device
+	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
+	//Invoke the delete_device API on the adapter
+	if err := rhp.adapter.Delete_device(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Get_device_details(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_flows_bulk(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug("Update_flows_bulk")
+	if len(args) < 5 {
+		logger.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	flows := &voltha.Flows{}
+	flowMetadata := &voltha.FlowMetadata{}
+	groups := &voltha.FlowGroups{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flows":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "groups":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_metadata":
+			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
+				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	//Invoke the bulk flow update API of the adapter
+	if err := rhp.adapter.Update_flows_bulk(device, flows, groups, flowMetadata); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug("Update_flows_incrementally")
+	if len(args) < 5 {
+		logger.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	flows := &openflow_13.FlowChanges{}
+	flowMetadata := &voltha.FlowMetadata{}
+	groups := &openflow_13.FlowGroupChanges{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
+				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				return nil, err
+			}
+		case "group_changes":
+			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
+				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				return nil, err
+			}
+		case "flow_metadata":
+			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
+				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	//Invoke the incremental flow update API of the adapter
+	if err := rhp.adapter.Update_flows_incrementally(device, flows, groups, flowMetadata); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Update_pm_config(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug("Update_pm_config")
+	if len(args) < 2 {
+		logger.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	pmConfigs := &voltha.PmConfigs{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "pm_configs":
+			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
+				logger.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
+	//Invoke the pm config update API of the adapter
+	if err := rhp.adapter.Update_pm_config(device, pmConfigs); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debugw("Receive_packet_out", log.Fields{"args": args})
+	if len(args) < 3 {
+		logger.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &ic.StrType{}
+	egressPort := &ic.IntType{}
+	packet := &openflow_13.OfpPacketOut{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "deviceId":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				logger.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+				return nil, err
+			}
+		case "outPort":
+			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
+				logger.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+				return nil, err
+			}
+		case "packet":
+			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
+				logger.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	//Invoke the adopt device on the adapter
+	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Suppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Unsuppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Get_ofp_device_info(args []*ic.Argument) (*ic.SwitchCapability, error) {
+	if len(args) < 2 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+
+	var cap *ic.SwitchCapability
+	var err error
+	if cap, err = rhp.adapter.Get_ofp_device_info(device); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	logger.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
+	return cap, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_ofp_port_info(args []*ic.Argument) (*ic.PortCapability, error) {
+	if len(args) < 3 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	pNo := &ic.IntType{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device":
+			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case "port_no":
+			if err := ptypes.UnmarshalAny(arg.Value, pNo); err != nil {
+				logger.Warnw("cannot-unmarshal-port-no", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+	logger.Debugw("Get_ofp_port_info", log.Fields{"deviceId": device.Id, "portNo": pNo.Val})
+	var cap *ic.PortCapability
+	var err error
+	if cap, err = rhp.adapter.Get_ofp_port_info(device, pNo.Val); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+	return cap, nil
+}
+
+func (rhp *RequestHandlerProxy) Process_inter_adapter_message(args []*ic.Argument) (*empty.Empty, error) {
+	if len(args) < 2 {
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	iaMsg := &ic.InterAdapterMessage{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "msg":
+			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+
+	//Invoke the inter adapter API on the handler
+	if err := rhp.adapter.Process_inter_adapter_message(iaMsg); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	}
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *RequestHandlerProxy) Download_image(args []*ic.Argument) (*voltha.ImageDownload, error) {
+	return &voltha.ImageDownload{}, nil
+}
+
+func (rhp *RequestHandlerProxy) Get_image_download_status(args []*ic.Argument) (*voltha.ImageDownload, error) {
+	return &voltha.ImageDownload{}, nil
+}
+
+func (rhp *RequestHandlerProxy) Cancel_image_download(args []*ic.Argument) (*voltha.ImageDownload, error) {
+	return &voltha.ImageDownload{}, nil
+}
+
+func (rhp *RequestHandlerProxy) Activate_image_update(args []*ic.Argument) (*voltha.ImageDownload, error) {
+	return &voltha.ImageDownload{}, nil
+}
+
+func (rhp *RequestHandlerProxy) Revert_image_update(args []*ic.Argument) (*voltha.ImageDownload, error) {
+	return &voltha.ImageDownload{}, nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
new file mode 100644
index 0000000..d3c562a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"fmt"
+	"math/rand"
+	"time"
+)
+
+//GetRandomSerialNumber returns a serial number formatted as "HOST:PORT"
+func GetRandomSerialNumber() string {
+	rand.Seed(time.Now().UnixNano())
+	return fmt.Sprintf("%d.%d.%d.%d:%d",
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(255),
+		rand.Intn(9000)+1000,
+	)
+}
+
+//GetRandomMacAddress returns a random mac address
+func GetRandomMacAddress() string {
+	rand.Seed(time.Now().UnixNano())
+	return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+		rand.Intn(128),
+	)
+}
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+const (
+	letterIdxBits = 6                    // 6 bits to represent a letter index
+	letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+	letterIdxMax  = 63 / letterIdxBits   // # of letter indices fitting in 63 bits
+)
+
+var src = rand.NewSource(time.Now().UnixNano())
+
+func GetRandomString(n int) string {
+	b := make([]byte, n)
+	// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
+	for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+		if remain == 0 {
+			cache, remain = src.Int63(), letterIdxMax
+		}
+		if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+			b[i] = letterBytes[idx]
+			i--
+		}
+		cache >>= letterIdxBits
+		remain--
+	}
+	return string(b)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
new file mode 100644
index 0000000..3b86ac5
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package adapters
+
+import (
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+//IAdapter represents the set of APIs a voltha adapter has to support.
+type IAdapter interface {
+	Adapter_descriptor() error
+	Device_types() (*voltha.DeviceTypes, error)
+	Health() (*voltha.HealthStatus, error)
+	Adopt_device(device *voltha.Device) error
+	Reconcile_device(device *voltha.Device) error
+	Abandon_device(device *voltha.Device) error
+	Disable_device(device *voltha.Device) error
+	Reenable_device(device *voltha.Device) error
+	Reboot_device(device *voltha.Device) error
+	Self_test_device(device *voltha.Device) error
+	Delete_device(device *voltha.Device) error
+	Get_device_details(device *voltha.Device) error
+	Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
+	Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
+	Update_pm_config(device *voltha.Device, pm_configs *voltha.PmConfigs) error
+	Receive_packet_out(deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
+	Suppress_event(filter *voltha.EventFilter) error
+	Unsuppress_event(filter *voltha.EventFilter) error
+	Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error)
+	Get_ofp_port_info(device *voltha.Device, port_no int64) (*ic.PortCapability, error)
+	Process_inter_adapter_message(msg *ic.InterAdapterMessage) error
+	Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
new file mode 100644
index 0000000..23ad5a0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package db
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// Default Minimal Interval for posting alive state of backend kvstore on Liveness Channel
+	DefaultLivenessChannelInterval = time.Second * 30
+)
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	sync.RWMutex
+	Client                  kvstore.Client
+	StoreType               string
+	Host                    string
+	Port                    int
+	Timeout                 int
+	PathPrefix              string
+	alive                   bool          // Is this backend connection alive?
+	liveness                chan bool     // channel to post alive state
+	LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
+	lastLivenessTime        time.Time     // Instant of last alive state push
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(storeType string, host string, port int, timeout int, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:               storeType,
+		Host:                    host,
+		Port:                    port,
+		Timeout:                 timeout,
+		LivenessChannelInterval: DefaultLivenessChannelInterval,
+		PathPrefix:              pathPrefix,
+		alive:                   false, // connection considered down at start
+	}
+
+	address := host + ":" + strconv.Itoa(port)
+	if b.Client, err = b.newClient(address, timeout); err != nil {
+		logger.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "host": host, "port": port,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(address string, timeout int) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+func (b *Backend) updateLiveness(alive bool) {
+	// Periodically push stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Push alive state if the
+	// last push to channel was beyond livenessChannelInterval
+	if b.liveness != nil {
+
+		if b.alive != alive {
+			logger.Debug("update-liveness-channel-reason-change")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		} else if time.Now().Sub(b.lastLivenessTime) > b.LivenessChannelInterval {
+			logger.Debug("update-liveness-channel-reason-interval")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Emit log message only for alive state change
+	if b.alive != alive {
+		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+		b.alive = alive
+	}
+}
+
+// Perform a dummy Key Lookup on kvstore to test Connection Liveness and
+// post on Liveness channel
+func (b *Backend) PerformLivenessCheck(timeout int) bool {
+	alive := b.Client.IsConnectionUp(timeout)
+	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+
+	b.updateLiveness(alive)
+	return alive
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every kvstore operation which indicates whether
+// or not the connection is still Live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (b *Backend) EnableLivenessChannel() chan bool {
+	logger.Debug("enable-kvstore-liveness-channel")
+
+	if b.liveness == nil {
+		logger.Debug("create-kvstore-liveness-channel")
+
+		// Channel size of 10 to avoid any possibility of blocking in Load conditions
+		b.liveness = make(chan bool, 10)
+
+		// Post initial alive state
+		b.liveness <- b.alive
+		b.lastLivenessTime = time.Now()
+	}
+
+	return b.liveness
+}
+
+// Extract Alive status of Kvstore based on type of error
+func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
+	// Alive unless observed an error indicating so
+	alive := true
+
+	if err != nil {
+
+		// timeout indicates kvstore not reachable/alive
+		if err == context.DeadlineExceeded {
+			alive = false
+		}
+
+		// Need to analyze client-specific errors based on backend type
+		if b.StoreType == "etcd" {
+
+			// For etcd backend, consider not-alive only for errors indicating
+			// timedout request or unavailable/corrupted cluster. For all remaining
+			// error codes listed in https://godoc.org/google.golang.org/grpc/codes#Code,
+			// we would not infer a not-alive backend because such a error may also
+			// occur due to bad client requests or sequence of operations
+			switch status.Code(err) {
+			case codes.DeadlineExceeded:
+				fallthrough
+			case codes.Unavailable:
+				fallthrough
+			case codes.DataLoss:
+				alive = false
+			}
+
+			//} else {
+			// TODO: Implement for consul backend; would it be needed ever?
+		}
+	}
+
+	return alive
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(key string) (map[string]*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.List(formattedPath, b.Timeout)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return pair, err
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(key string) (*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.Get(formattedPath, b.Timeout)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return pair, err
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(key string, value interface{}) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath})
+
+	err := b.Client.Put(formattedPath, value, b.Timeout)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return err
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(key string) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Delete(formattedPath, b.Timeout)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return err
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(key string) chan *kvstore.Event {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(formattedPath)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
new file mode 100644
index 0000000..a5a79ae
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package db
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
new file mode 100644
index 0000000..088593a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+const (
+	// Default timeout in seconds when making a kvstore request
+	defaultKVGetTimeout = 5
+	// Maximum channel buffer between publisher/subscriber goroutines
+	maxClientChannelBufferSize = 10
+)
+
+// These constants represent the event types returned by the KV client
+const (
+	PUT = iota
+	DELETE
+	CONNECTIONDOWN
+	UNKNOWN
+)
+
+// KVPair is a common wrapper for key-value pairs returned from the KV store
+type KVPair struct {
+	Key     string
+	Value   interface{}
+	Version int64
+	Session string
+	Lease   int64
+}
+
+// NewKVPair creates a new KVPair object
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
+	kv := new(KVPair)
+	kv.Key = key
+	kv.Value = value
+	kv.Session = session
+	kv.Lease = lease
+	kv.Version = version
+	return kv
+}
+
+// Event is generated by the KV client when a key change is detected
+type Event struct {
+	EventType int
+	Key       interface{}
+	Value     interface{}
+	Version   int64
+}
+
+// NewEvent creates a new Event object
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
+	evnt := new(Event)
+	evnt.EventType = eventType
+	evnt.Key = key
+	evnt.Value = value
+	evnt.Version = version
+
+	return evnt
+}
+
+// Client represents the set of APIs a KV Client must implement
+type Client interface {
+	List(key string, timeout int) (map[string]*KVPair, error)
+	Get(key string, timeout int) (*KVPair, error)
+	Put(key string, value interface{}, timeout int) error
+	Delete(key string, timeout int) error
+	Reserve(key string, value interface{}, ttl int64) (interface{}, error)
+	ReleaseReservation(key string) error
+	ReleaseAllReservations() error
+	RenewReservation(key string) error
+	Watch(key string) chan *Event
+	AcquireLock(lockName string, timeout int) error
+	ReleaseLock(lockName string) error
+	IsConnectionUp(timeout int) bool // timeout in second
+	CloseWatch(key string, ch chan *Event)
+	Close()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
new file mode 100644
index 0000000..2d2a6a6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kvstore"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
new file mode 100644
index 0000000..e391293
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
@@ -0,0 +1,513 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	log "github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"sync"
+	"time"
+	//log "ciena.com/coordinator/common"
+	consulapi "github.com/hashicorp/consul/api"
+)
+
+type channelContextMap struct {
+	ctx     context.Context
+	channel chan *Event
+	cancel  context.CancelFunc
+}
+
+// ConsulClient represents the consul KV store client
+type ConsulClient struct {
+	session                *consulapi.Session
+	sessionID              string
+	consul                 *consulapi.Client
+	doneCh                 *chan int
+	keyReservations        map[string]interface{}
+	watchedChannelsContext map[string][]*channelContextMap
+	writeLock              sync.Mutex
+}
+
+// NewConsulClient returns a new client for the Consul KV store
+func NewConsulClient(addr string, timeout int) (*ConsulClient, error) {
+
+	duration := GetDuration(timeout)
+
+	config := consulapi.DefaultConfig()
+	config.Address = addr
+	config.WaitTime = duration
+	consul, err := consulapi.NewClient(config)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+
+	doneCh := make(chan int, 1)
+	wChannelsContext := make(map[string][]*channelContextMap)
+	reservations := make(map[string]interface{})
+	return &ConsulClient{consul: consul, doneCh: &doneCh, watchedChannelsContext: wChannelsContext, keyReservations: reservations}, nil
+}
+
+// IsConnectionUp returns whether the connection to the Consul KV store is up
+func (c *ConsulClient) IsConnectionUp(timeout int) bool {
+	logger.Error("Unimplemented function")
+	return false
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) List(key string, timeout int) (map[string]*KVPair, error) {
+	duration := GetDuration(timeout)
+
+	kv := c.consul.KV()
+	var queryOptions consulapi.QueryOptions
+	queryOptions.WaitTime = duration
+	// For now we ignore meta data
+	kvps, _, err := kv.List(key, &queryOptions)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, kvp := range kvps {
+		m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) Get(key string, timeout int) (*KVPair, error) {
+
+	duration := GetDuration(timeout)
+
+	kv := c.consul.KV()
+	var queryOptions consulapi.QueryOptions
+	queryOptions.WaitTime = duration
+	// For now we ignore meta data
+	kvp, _, err := kv.Get(key, &queryOptions)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	if kvp != nil {
+		return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1), nil
+	}
+
+	return nil, nil
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the consul API
+// accepts only a []byte as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) Put(key string, value interface{}, timeout int) error {
+
+	// Validate that we can create a byte array from the value as consul API expects a byte array
+	var val []byte
+	var er error
+	if val, er = ToByte(value); er != nil {
+		logger.Error(er)
+		return er
+	}
+
+	// Create a key value pair
+	kvp := consulapi.KVPair{Key: key, Value: val}
+	kv := c.consul.KV()
+	var writeOptions consulapi.WriteOptions
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	_, err := kv.Put(&kvp, &writeOptions)
+	if err != nil {
+		logger.Error(err)
+		return err
+	}
+	return nil
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) Delete(key string, timeout int) error {
+	kv := c.consul.KV()
+	var writeOptions consulapi.WriteOptions
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	_, err := kv.Delete(key, &writeOptions)
+	if err != nil {
+		logger.Error(err)
+		return err
+	}
+	return nil
+}
+
+func (c *ConsulClient) deleteSession() {
+	if c.sessionID != "" {
+		logger.Debug("cleaning-up-session")
+		session := c.consul.Session()
+		_, err := session.Destroy(c.sessionID, nil)
+		if err != nil {
+			logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+		}
+	}
+	c.sessionID = ""
+	c.session = nil
+}
+
+func (c *ConsulClient) createSession(ttl int64, retries int) (*consulapi.Session, string, error) {
+	session := c.consul.Session()
+	entry := &consulapi.SessionEntry{
+		Behavior: consulapi.SessionBehaviorDelete,
+		TTL:      "10s", // strconv.FormatInt(ttl, 10) + "s", // disable ttl
+	}
+
+	for {
+		id, meta, err := session.Create(entry, nil)
+		if err != nil {
+			logger.Errorw("create-session-error", log.Fields{"error": err})
+			if retries == 0 {
+				return nil, "", err
+			}
+		} else if meta.RequestTime == 0 {
+			logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
+			if retries == 0 {
+				return nil, "", errors.New("bad-meta-data")
+			}
+		} else if id == "" {
+			logger.Error("create-session-nil-id")
+			if retries == 0 {
+				return nil, "", errors.New("ID-nil")
+			}
+		} else {
+			return session, id, nil
+		}
+		// If retry param is -1 we will retry indefinitely
+		if retries > 0 {
+			retries--
+		}
+		logger.Debug("retrying-session-create-after-a-second-delay")
+		time.Sleep(time.Duration(1) * time.Second)
+	}
+}
+
+// Helper function to verify mostly whether the content of two interface types are the same.  Focus is []byte and
+// string types
+func isEqual(val1 interface{}, val2 interface{}) bool {
+	b1, err := ToByte(val1)
+	b2, er := ToByte(val2)
+	if err == nil && er == nil {
+		return bytes.Equal(b1, b2)
+	}
+	return val1 == val2
+}
+
+// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
+// the consul API accepts only a []byte.  Timeout defines how long the function will wait for a response.  TTL
+// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
+// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
+// then the value assigned to that key will be returned.
+func (c *ConsulClient) Reserve(key string, value interface{}, ttl int64) (interface{}, error) {
+
+	// Validate that we can create a byte array from the value as consul API expects a byte array
+	var val []byte
+	var er error
+	if val, er = ToByte(value); er != nil {
+		logger.Error(er)
+		return nil, er
+	}
+
+	// Cleanup any existing session and recreate new ones.  A key is reserved against a session
+	if c.sessionID != "" {
+		c.deleteSession()
+	}
+
+	// Clear session if reservation is not successful
+	reservationSuccessful := false
+	defer func() {
+		if !reservationSuccessful {
+			logger.Debug("deleting-session")
+			c.deleteSession()
+		}
+	}()
+
+	session, sessionID, err := c.createSession(ttl, -1)
+	if err != nil {
+		logger.Errorw("no-session-created", log.Fields{"error": err})
+		return "", errors.New("no-session-created")
+	}
+	logger.Debugw("session-created", log.Fields{"session-id": sessionID})
+	c.sessionID = sessionID
+	c.session = session
+
+	// Try to grap the Key using the session
+	kv := c.consul.KV()
+	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
+	result, _, err := kv.Acquire(&kvp, nil)
+	if err != nil {
+		logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
+		return nil, err
+	}
+
+	logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
+
+	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
+	m, err := c.Get(key, defaultKVGetTimeout)
+	if err != nil {
+		return nil, err
+	}
+	if m != nil {
+		logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+		if m.Key == key && isEqual(m.Value, value) {
+			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
+			// per session.
+			reservationSuccessful = true
+			c.writeLock.Lock()
+			c.keyReservations[key] = m.Value
+			c.writeLock.Unlock()
+			return m.Value, nil
+		}
+		// My reservation has failed.  Return the owner of that key
+		return m.Value, nil
+	}
+	return nil, nil
+}
+
+// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
+func (c *ConsulClient) ReleaseAllReservations() error {
+	kv := c.consul.KV()
+	var kvp consulapi.KVPair
+	var result bool
+	var err error
+
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	for key, value := range c.keyReservations {
+		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
+		result, _, err = kv.Release(&kvp, nil)
+		if err != nil {
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			return err
+		}
+		if !result {
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// ReleaseReservation releases reservation for a specific key.
+func (c *ConsulClient) ReleaseReservation(key string) error {
+	var ok bool
+	var reservedValue interface{}
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if reservedValue, ok = c.keyReservations[key]; !ok {
+		return errors.New("key-not-reserved:" + key)
+	}
+	// Release the reservation
+	kv := c.consul.KV()
+	kvp := consulapi.KVPair{Key: key, Value: reservedValue.([]byte), Session: c.sessionID}
+
+	result, _, er := kv.Release(&kvp, nil)
+	if er != nil {
+		return er
+	}
+	// Remove that key entry on success
+	if result {
+		delete(c.keyReservations, key)
+		return nil
+	}
+	return errors.New("key-cannot-be-unreserved")
+}
+
+// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
+// period specified when reserving the key
+func (c *ConsulClient) RenewReservation(key string) error {
+	// In the case of Consul, renew reservation of a reserve key only require renewing the client session.
+
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	// Verify the key was reserved
+	if _, ok := c.keyReservations[key]; !ok {
+		return errors.New("key-not-reserved")
+	}
+
+	if c.session == nil {
+		return errors.New("no-session-exist")
+	}
+
+	var writeOptions consulapi.WriteOptions
+	if _, _, err := c.session.Renew(c.sessionID, &writeOptions); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *ConsulClient) Watch(key string) chan *Event {
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Create a context to track this request
+	watchContext, cFunc := context.WithCancel(context.Background())
+
+	// Save the channel and context reference for later
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	ccm := channelContextMap{channel: ch, ctx: watchContext, cancel: cFunc}
+	c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key], &ccm)
+
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(watchContext, key, ch)
+
+	return ch
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *ConsulClient) CloseWatch(key string, ch chan *Event) {
+	// First close the context
+	var ok bool
+	var watchedChannelsContexts []*channelContextMap
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
+		logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chCtxMap := range watchedChannelsContexts {
+		if chCtxMap.channel == ch {
+			logger.Debug("channel-found")
+			chCtxMap.cancel()
+			//close the channel
+			close(ch)
+			pos = i
+			break
+		}
+	}
+	// Remove that entry if present
+	if pos >= 0 {
+		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
+	}
+	logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+}
+
+func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
+	if (kv1 == nil) && (kv2 == nil) {
+		return true
+	} else if (kv1 == nil) || (kv2 == nil) {
+		return false
+	}
+	// Both the KV should be non-null here
+	if kv1.Key != kv2.Key ||
+		!bytes.Equal(kv1.Value, kv2.Value) ||
+		kv1.Session != kv2.Session ||
+		kv1.LockIndex != kv2.LockIndex ||
+		kv1.ModifyIndex != kv2.ModifyIndex {
+		return false
+	}
+	return true
+}
+
+func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
+	logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
+
+	defer c.CloseWatch(key, ch)
+	duration := GetDuration(defaultKVGetTimeout)
+	kv := c.consul.KV()
+	var queryOptions consulapi.QueryOptions
+	queryOptions.WaitTime = duration
+
+	// Get the existing value, if any
+	previousKVPair, meta, err := kv.Get(key, &queryOptions)
+	if err != nil {
+		logger.Debug(err)
+	}
+	lastIndex := meta.LastIndex
+
+	// Wait for change.  Push any change onto the channel and keep waiting for new update
+	//var waitOptions consulapi.QueryOptions
+	var pair *consulapi.KVPair
+	//watchContext, _ := context.WithCancel(context.Background())
+	waitOptions := queryOptions.WithContext(watchContext)
+	for {
+		//waitOptions = consulapi.QueryOptions{WaitIndex: lastIndex}
+		waitOptions.WaitIndex = lastIndex
+		pair, meta, err = kv.Get(key, waitOptions)
+		select {
+		case <-watchContext.Done():
+			logger.Debug("done-event-received-exiting")
+			return
+		default:
+			if err != nil {
+				logger.Warnw("error-from-watch", log.Fields{"error": err})
+				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
+			} else {
+				logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+			}
+		}
+		if err != nil {
+			logger.Debug(err)
+			// On error, block for 10 milliseconds to prevent endless loop
+			time.Sleep(10 * time.Millisecond)
+		} else if meta.LastIndex <= lastIndex {
+			logger.Info("no-index-change-or-negative")
+		} else {
+			logger.Debugw("update-received", log.Fields{"pair": pair})
+			if pair == nil {
+				ch <- NewEvent(DELETE, key, []byte(""), -1)
+			} else if !c.isKVEqual(pair, previousKVPair) {
+				// Push the change onto the channel if the data has changed
+				// For now just assume it's a PUT change
+				logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
+			}
+			previousKVPair = pair
+			lastIndex = meta.LastIndex
+		}
+	}
+}
+
+// Close closes the KV store client
+func (c *ConsulClient) Close() {
+	var writeOptions consulapi.WriteOptions
+	// Inform any goroutine it's time to say goodbye.
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if c.doneCh != nil {
+		close(*c.doneCh)
+	}
+
+	// Clear the sessionID
+	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
+	}
+}
+
+func (c *ConsulClient) AcquireLock(lockName string, timeout int) error {
+	return nil
+}
+
+func (c *ConsulClient) ReleaseLock(lockName string) error {
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..7096748
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	"sync"
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+	ectdAPI          *v3Client.Client
+	leaderRev        v3Client.Client
+	keyReservations  map[string]*v3Client.LeaseID
+	watchedChannels  sync.Map
+	writeLock        sync.Mutex
+	lockToMutexMap   map[string]*v3Concurrency.Mutex
+	lockToSessionMap map[string]*v3Concurrency.Session
+	lockToMutexLock  sync.Mutex
+}
+
+// Connection Timeout in Seconds
+var connTimeout int = 2
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(addr string, timeout int) (*EtcdClient, error) {
+	duration := GetDuration(timeout)
+
+	c, err := v3Client.New(v3Client.Config{
+		Endpoints:   []string{addr},
+		DialTimeout: duration,
+	})
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+
+	reservations := make(map[string]*v3Client.LeaseID)
+	lockMutexMap := make(map[string]*v3Concurrency.Mutex)
+	lockSessionMap := make(map[string]*v3Concurrency.Session)
+
+	return &EtcdClient{ectdAPI: c, keyReservations: reservations, lockToMutexMap: lockMutexMap,
+		lockToSessionMap: lockSessionMap}, nil
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(timeout int) bool {
+	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
+	if _, err := c.Get("non-existent-key", timeout); err != nil {
+		return false
+	}
+	return true
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(key string, timeout int) (map[string]*KVPair, error) {
+	duration := GetDuration(timeout)
+
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+
+	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
+	cancel()
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, ev := range resp.Kvs {
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(key string, timeout int) (*KVPair, error) {
+	duration := GetDuration(timeout)
+
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+
+	resp, err := c.ectdAPI.Get(ctx, key)
+	cancel()
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	for _, ev := range resp.Kvs {
+		// Only one value is returned
+		return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+	}
+	return nil, nil
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(key string, value interface{}, timeout int) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	duration := GetDuration(timeout)
+
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	var err error
+	// Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
+	// that KV key permanent instead of automatically removing it after a lease expiration
+	if leaseID, ok := c.keyReservations[key]; ok {
+		_, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
+	} else {
+		_, err = c.ectdAPI.Put(ctx, key, val)
+	}
+	cancel()
+	if err != nil {
+		switch err {
+		case context.Canceled:
+			logger.Warnw("context-cancelled", log.Fields{"error": err})
+		case context.DeadlineExceeded:
+			logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
+		case v3rpcTypes.ErrEmptyKey:
+			logger.Warnw("etcd-client-error", log.Fields{"error": err})
+		default:
+			logger.Warnw("bad-endpoints", log.Fields{"error": err})
+		}
+		return err
+	}
+	return nil
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(key string, timeout int) error {
+
+	duration := GetDuration(timeout)
+
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+
+	defer cancel()
+
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	// delete the key
+	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
+		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
+		return err
+	}
+	logger.Debugw("key(s)-deleted", log.Fields{"key": key})
+	return nil
+}
+
+// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
+// the etcd API accepts only a string.  Timeout defines how long the function will wait for a response.  TTL
+// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
+// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
+// then the value assigned to that key will be returned.
+func (c *EtcdClient) Reserve(key string, value interface{}, ttl int64) (interface{}, error) {
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return nil, fmt.Errorf("unexpected-type%T", value)
+	}
+
+	duration := GetDuration(connTimeout)
+
+	// Create a lease
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+	defer cancel()
+
+	resp, err := c.ectdAPI.Grant(ctx, ttl)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	// Register the lease id
+	c.writeLock.Lock()
+	c.keyReservations[key] = &resp.ID
+	c.writeLock.Unlock()
+
+	// Revoke lease if reservation is not successful
+	reservationSuccessful := false
+	defer func() {
+		if !reservationSuccessful {
+			if err = c.ReleaseReservation(key); err != nil {
+				logger.Error("cannot-release-lease")
+			}
+		}
+	}()
+
+	// Try to grap the Key with the above lease
+	c.ectdAPI.Txn(context.Background())
+	txn := c.ectdAPI.Txn(context.Background())
+	txn = txn.If(v3Client.Compare(v3Client.Version(key), "=", 0))
+	txn = txn.Then(v3Client.OpPut(key, val, v3Client.WithLease(resp.ID)))
+	txn = txn.Else(v3Client.OpGet(key))
+	result, er := txn.Commit()
+	if er != nil {
+		return nil, er
+	}
+
+	if !result.Succeeded {
+		// Verify whether we are already the owner of that Key
+		if len(result.Responses) > 0 &&
+			len(result.Responses[0].GetResponseRange().Kvs) > 0 {
+			kv := result.Responses[0].GetResponseRange().Kvs[0]
+			if string(kv.Value) == val {
+				reservationSuccessful = true
+				return value, nil
+			}
+			return kv.Value, nil
+		}
+	} else {
+		// Read the Key to ensure this is our Key
+		m, err := c.Get(key, defaultKVGetTimeout)
+		if err != nil {
+			return nil, err
+		}
+		if m != nil {
+			if m.Key == key && isEqual(m.Value, value) {
+				// My reservation is successful - register it.  For now, support is only for 1 reservation per key
+				// per session.
+				reservationSuccessful = true
+				return value, nil
+			}
+			// My reservation has failed.  Return the owner of that key
+			return m.Value, nil
+		}
+	}
+	return nil, nil
+}
+
+// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
+func (c *EtcdClient) ReleaseAllReservations() error {
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	duration := GetDuration(connTimeout)
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+	defer cancel()
+
+	for key, leaseID := range c.keyReservations {
+		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
+		if err != nil {
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			return err
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// ReleaseReservation releases reservation for a specific key.
+func (c *EtcdClient) ReleaseReservation(key string) error {
+	// Get the leaseid using the key
+	logger.Debugw("Release-reservation", log.Fields{"key": key})
+	var ok bool
+	var leaseID *v3Client.LeaseID
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if leaseID, ok = c.keyReservations[key]; !ok {
+		return nil
+	}
+	duration := GetDuration(connTimeout)
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+	defer cancel()
+
+	if leaseID != nil {
+		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
+		if err != nil {
+			logger.Error(err)
+			return err
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
+// period specified when reserving the key
+func (c *EtcdClient) RenewReservation(key string) error {
+	// Get the leaseid using the key
+	var ok bool
+	var leaseID *v3Client.LeaseID
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if leaseID, ok = c.keyReservations[key]; !ok {
+		return errors.New("key-not-reserved")
+	}
+	duration := GetDuration(connTimeout)
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+	defer cancel()
+
+	if leaseID != nil {
+		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
+		if err != nil {
+			logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
+			return err
+		}
+	} else {
+		return errors.New("lease-expired")
+	}
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(key string) chan *Event {
+	w := v3Client.NewWatcher(c.ectdAPI)
+	ctx, cancel := context.WithCancel(context.Background())
+	channel := w.Watch(ctx, key)
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap[ch] = w
+
+	channelMaps := c.addChannelMap(key, channelMap)
+
+	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+	// json format.
+	logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(channel, ch, cancel)
+
+	return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+	} else {
+		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var ok bool
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug("channel-found")
+			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+	logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug("start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for resp := range channel {
+		for _, ev := range resp.Events {
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+		}
+	}
+	logger.Debug("stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+	switch event.Type {
+	case v3Client.EventTypePut:
+		return PUT
+	case v3Client.EventTypeDelete:
+		return DELETE
+	}
+	return UNKNOWN
+}
+
+// Close closes the KV store client
+func (c *EtcdClient) Close() {
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if err := c.ectdAPI.Close(); err != nil {
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
+	}
+}
+
+func (c *EtcdClient) addLockName(lockName string, lock *v3Concurrency.Mutex, session *v3Concurrency.Session) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	c.lockToMutexMap[lockName] = lock
+	c.lockToSessionMap[lockName] = session
+}
+
+func (c *EtcdClient) deleteLockName(lockName string) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	delete(c.lockToMutexMap, lockName)
+	delete(c.lockToSessionMap, lockName)
+}
+
+func (c *EtcdClient) getLock(lockName string) (*v3Concurrency.Mutex, *v3Concurrency.Session) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	var lock *v3Concurrency.Mutex
+	var session *v3Concurrency.Session
+	if l, exist := c.lockToMutexMap[lockName]; exist {
+		lock = l
+	}
+	if s, exist := c.lockToSessionMap[lockName]; exist {
+		session = s
+	}
+	return lock, session
+}
+
+func (c *EtcdClient) AcquireLock(lockName string, timeout int) error {
+	duration := GetDuration(timeout)
+	ctx, cancel := context.WithTimeout(context.Background(), duration)
+	defer cancel()
+	session, _ := v3Concurrency.NewSession(c.ectdAPI, v3Concurrency.WithContext(ctx))
+	mu := v3Concurrency.NewMutex(session, "/devicelock_"+lockName)
+	if err := mu.Lock(context.Background()); err != nil {
+		cancel()
+		return err
+	}
+	c.addLockName(lockName, mu, session)
+	return nil
+}
+
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+	lock, session := c.getLock(lockName)
+	var err error
+	if lock != nil {
+		if e := lock.Unlock(context.Background()); e != nil {
+			err = e
+		}
+	}
+	if session != nil {
+		if e := session.Close(); e != nil {
+			err = e
+		}
+	}
+	c.deleteLockName(lockName)
+
+	return err
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
new file mode 100644
index 0000000..cf9a95c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"fmt"
+	"time"
+)
+
+// GetDuration converts a timeout value from int to duration.  If the timeout value is
+// either not set of -ve then we default KV timeout (configurable) is used.
+func GetDuration(timeout int) time.Duration {
+	if timeout <= 0 {
+		return defaultKVGetTimeout * time.Second
+	}
+	return time.Duration(timeout) * time.Second
+}
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
new file mode 100644
index 0000000..b4fe7ec
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "flowsUtils"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
new file mode 100644
index 0000000..b9981e6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
@@ -0,0 +1,1364 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"bytes"
+	"crypto/md5"
+	"fmt"
+	"github.com/cevaris/ordered_map"
+	"github.com/gogo/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"math/big"
+	"strings"
+)
+
+var (
+	// Instructions shortcut
+	APPLY_ACTIONS  = ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS
+	WRITE_METADATA = ofp.OfpInstructionType_OFPIT_WRITE_METADATA
+	METER_ACTION   = ofp.OfpInstructionType_OFPIT_METER
+
+	//OFPAT_* shortcuts
+	OUTPUT       = ofp.OfpActionType_OFPAT_OUTPUT
+	COPY_TTL_OUT = ofp.OfpActionType_OFPAT_COPY_TTL_OUT
+	COPY_TTL_IN  = ofp.OfpActionType_OFPAT_COPY_TTL_IN
+	SET_MPLS_TTL = ofp.OfpActionType_OFPAT_SET_MPLS_TTL
+	DEC_MPLS_TTL = ofp.OfpActionType_OFPAT_DEC_MPLS_TTL
+	PUSH_VLAN    = ofp.OfpActionType_OFPAT_PUSH_VLAN
+	POP_VLAN     = ofp.OfpActionType_OFPAT_POP_VLAN
+	PUSH_MPLS    = ofp.OfpActionType_OFPAT_PUSH_MPLS
+	POP_MPLS     = ofp.OfpActionType_OFPAT_POP_MPLS
+	SET_QUEUE    = ofp.OfpActionType_OFPAT_SET_QUEUE
+	GROUP        = ofp.OfpActionType_OFPAT_GROUP
+	SET_NW_TTL   = ofp.OfpActionType_OFPAT_SET_NW_TTL
+	NW_TTL       = ofp.OfpActionType_OFPAT_DEC_NW_TTL
+	SET_FIELD    = ofp.OfpActionType_OFPAT_SET_FIELD
+	PUSH_PBB     = ofp.OfpActionType_OFPAT_PUSH_PBB
+	POP_PBB      = ofp.OfpActionType_OFPAT_POP_PBB
+	EXPERIMENTER = ofp.OfpActionType_OFPAT_EXPERIMENTER
+
+	//OFPXMT_OFB_* shortcuts (incomplete)
+	IN_PORT         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+	IN_PHY_PORT     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT
+	METADATA        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_METADATA
+	ETH_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST
+	ETH_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC
+	ETH_TYPE        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE
+	VLAN_VID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID
+	VLAN_PCP        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP
+	IP_DSCP         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP
+	IP_ECN          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN
+	IP_PROTO        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO
+	IPV4_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC
+	IPV4_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST
+	TCP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC
+	TCP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST
+	UDP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC
+	UDP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST
+	SCTP_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC
+	SCTP_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST
+	ICMPV4_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE
+	ICMPV4_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE
+	ARP_OP          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP
+	ARP_SPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA
+	ARP_TPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA
+	ARP_SHA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA
+	ARP_THA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA
+	IPV6_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC
+	IPV6_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST
+	IPV6_FLABEL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL
+	ICMPV6_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE
+	ICMPV6_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE
+	IPV6_ND_TARGET  = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET
+	OFB_IPV6_ND_SLL = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL
+	IPV6_ND_TLL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL
+	MPLS_LABEL      = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL
+	MPLS_TC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC
+	MPLS_BOS        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS
+	PBB_ISID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID
+	TUNNEL_ID       = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID
+	IPV6_EXTHDR     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR
+)
+
+//ofp_action_* shortcuts
+
+func Output(port uint32, maxLen ...ofp.OfpControllerMaxLen) *ofp.OfpAction {
+	maxLength := ofp.OfpControllerMaxLen_OFPCML_MAX
+	if len(maxLen) > 0 {
+		maxLength = maxLen[0]
+	}
+	return &ofp.OfpAction{Type: OUTPUT, Action: &ofp.OfpAction_Output{Output: &ofp.OfpActionOutput{Port: port, MaxLen: uint32(maxLength)}}}
+}
+
+func MplsTtl(ttl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: SET_MPLS_TTL, Action: &ofp.OfpAction_MplsTtl{MplsTtl: &ofp.OfpActionMplsTtl{MplsTtl: ttl}}}
+}
+
+func PushVlan(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: PUSH_VLAN, Action: &ofp.OfpAction_Push{Push: &ofp.OfpActionPush{Ethertype: ethType}}}
+}
+
+func PopVlan() *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_VLAN}
+}
+
+func PopMpls(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_MPLS, Action: &ofp.OfpAction_PopMpls{PopMpls: &ofp.OfpActionPopMpls{Ethertype: ethType}}}
+}
+
+func Group(groupId uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: GROUP, Action: &ofp.OfpAction_Group{Group: &ofp.OfpActionGroup{GroupId: groupId}}}
+}
+
+func NwTtl(nwTtl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: NW_TTL, Action: &ofp.OfpAction_NwTtl{NwTtl: &ofp.OfpActionNwTtl{NwTtl: nwTtl}}}
+}
+
+func SetField(field *ofp.OfpOxmOfbField) *ofp.OfpAction {
+	actionSetField := &ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: &ofp.OfpOxmField_OfbField{OfbField: field}}
+	return &ofp.OfpAction{Type: SET_FIELD, Action: &ofp.OfpAction_SetField{SetField: &ofp.OfpActionSetField{Field: actionSetField}}}
+}
+
+func Experimenter(experimenter uint32, data []byte) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: EXPERIMENTER, Action: &ofp.OfpAction_Experimenter{Experimenter: &ofp.OfpActionExperimenter{Experimenter: experimenter, Data: data}}}
+}
+
+//ofb_field generators (incomplete set)
+
+func InPort(inPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPort}}
+}
+
+func InPhyPort(inPhyPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PHY_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPhyPort}}
+}
+
+func Metadata_ofp(tableMetadata uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: METADATA, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: tableMetadata}}
+}
+
+// should Metadata_ofp used here ?????
+func EthDst(ethDst uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_DST, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethDst}}
+}
+
+// should Metadata_ofp used here ?????
+func EthSrc(ethSrc uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_SRC, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethSrc}}
+}
+
+func EthType(ethType uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_TYPE, Value: &ofp.OfpOxmOfbField_EthType{EthType: ethType}}
+}
+
+func VlanVid(vlanVid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_VID, Value: &ofp.OfpOxmOfbField_VlanVid{VlanVid: vlanVid}}
+}
+
+func VlanPcp(vlanPcp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_PCP, Value: &ofp.OfpOxmOfbField_VlanPcp{VlanPcp: vlanPcp}}
+}
+
+func IpDscp(ipDscp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_DSCP, Value: &ofp.OfpOxmOfbField_IpDscp{IpDscp: ipDscp}}
+}
+
+func IpEcn(ipEcn uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_ECN, Value: &ofp.OfpOxmOfbField_IpEcn{IpEcn: ipEcn}}
+}
+
+func IpProto(ipProto uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_PROTO, Value: &ofp.OfpOxmOfbField_IpProto{IpProto: ipProto}}
+}
+
+func Ipv4Src(ipv4Src uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_SRC, Value: &ofp.OfpOxmOfbField_Ipv4Src{Ipv4Src: ipv4Src}}
+}
+
+func Ipv4Dst(ipv4Dst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_DST, Value: &ofp.OfpOxmOfbField_Ipv4Dst{Ipv4Dst: ipv4Dst}}
+}
+
+func TcpSrc(tcpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_SRC, Value: &ofp.OfpOxmOfbField_TcpSrc{TcpSrc: tcpSrc}}
+}
+
+func TcpDst(tcpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_DST, Value: &ofp.OfpOxmOfbField_TcpDst{TcpDst: tcpDst}}
+}
+
+func UdpSrc(udpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_SRC, Value: &ofp.OfpOxmOfbField_UdpSrc{UdpSrc: udpSrc}}
+}
+
+func UdpDst(udpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_DST, Value: &ofp.OfpOxmOfbField_UdpDst{UdpDst: udpDst}}
+}
+
+func SctpSrc(sctpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_SRC, Value: &ofp.OfpOxmOfbField_SctpSrc{SctpSrc: sctpSrc}}
+}
+
+func SctpDst(sctpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_DST, Value: &ofp.OfpOxmOfbField_SctpDst{SctpDst: sctpDst}}
+}
+
+func Icmpv4Type(icmpv4Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv4Type{Icmpv4Type: icmpv4Type}}
+}
+
+func Icmpv4Code(icmpv4Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_CODE, Value: &ofp.OfpOxmOfbField_Icmpv4Code{Icmpv4Code: icmpv4Code}}
+}
+
+func ArpOp(arpOp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_OP, Value: &ofp.OfpOxmOfbField_ArpOp{ArpOp: arpOp}}
+}
+
+func ArpSpa(arpSpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SPA, Value: &ofp.OfpOxmOfbField_ArpSpa{ArpSpa: arpSpa}}
+}
+
+func ArpTpa(arpTpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_TPA, Value: &ofp.OfpOxmOfbField_ArpTpa{ArpTpa: arpTpa}}
+}
+
+func ArpSha(arpSha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SHA, Value: &ofp.OfpOxmOfbField_ArpSha{ArpSha: arpSha}}
+}
+
+func ArpTha(arpTha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_THA, Value: &ofp.OfpOxmOfbField_ArpTha{ArpTha: arpTha}}
+}
+
+func Ipv6Src(ipv6Src []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_SRC, Value: &ofp.OfpOxmOfbField_Ipv6Src{Ipv6Src: ipv6Src}}
+}
+
+func Ipv6Dst(ipv6Dst []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_DST, Value: &ofp.OfpOxmOfbField_Ipv6Dst{Ipv6Dst: ipv6Dst}}
+}
+
+func Ipv6Flabel(ipv6Flabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_FLABEL, Value: &ofp.OfpOxmOfbField_Ipv6Flabel{Ipv6Flabel: ipv6Flabel}}
+}
+
+func Icmpv6Type(icmpv6Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv6Type{Icmpv6Type: icmpv6Type}}
+}
+
+func Icmpv6Code(icmpv6Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_CODE, Value: &ofp.OfpOxmOfbField_Icmpv6Code{Icmpv6Code: icmpv6Code}}
+}
+
+func Ipv6NdTarget(ipv6NdTarget []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TARGET, Value: &ofp.OfpOxmOfbField_Ipv6NdTarget{Ipv6NdTarget: ipv6NdTarget}}
+}
+
+func OfbIpv6NdSll(ofbIpv6NdSll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: OFB_IPV6_ND_SLL, Value: &ofp.OfpOxmOfbField_Ipv6NdSsl{Ipv6NdSsl: ofbIpv6NdSll}}
+}
+
+func Ipv6NdTll(ipv6NdTll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TLL, Value: &ofp.OfpOxmOfbField_Ipv6NdTll{Ipv6NdTll: ipv6NdTll}}
+}
+
+func MplsLabel(mplsLabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_LABEL, Value: &ofp.OfpOxmOfbField_MplsLabel{MplsLabel: mplsLabel}}
+}
+
+func MplsTc(mplsTc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_TC, Value: &ofp.OfpOxmOfbField_MplsTc{MplsTc: mplsTc}}
+}
+
+func MplsBos(mplsBos uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_BOS, Value: &ofp.OfpOxmOfbField_MplsBos{MplsBos: mplsBos}}
+}
+
+func PbbIsid(pbbIsid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: PBB_ISID, Value: &ofp.OfpOxmOfbField_PbbIsid{PbbIsid: pbbIsid}}
+}
+
+func TunnelId(tunnelId uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TUNNEL_ID, Value: &ofp.OfpOxmOfbField_TunnelId{TunnelId: tunnelId}}
+}
+
+func Ipv6Exthdr(ipv6Exthdr uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_EXTHDR, Value: &ofp.OfpOxmOfbField_Ipv6Exthdr{Ipv6Exthdr: ipv6Exthdr}}
+}
+
+//frequently used extractors
+
+func excludeAction(action *ofp.OfpAction, exclude ...ofp.OfpActionType) bool {
+	for _, actionToExclude := range exclude {
+		if action.Type == actionToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetActions(flow *ofp.OfpFlowStats, exclude ...ofp.OfpActionType) []*ofp.OfpAction {
+	if flow == nil {
+		return nil
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			if len(exclude) == 0 {
+				return instActions.Actions
+			} else {
+				filteredAction := make([]*ofp.OfpAction, 0)
+				for _, action := range instActions.Actions {
+					if !excludeAction(action, exclude...) {
+						filteredAction = append(filteredAction, action)
+					}
+				}
+				return filteredAction
+			}
+		}
+	}
+	return nil
+}
+
+func UpdateOutputPortByActionType(flow *ofp.OfpFlowStats, actionType uint32, toPort uint32) *ofp.OfpFlowStats {
+	if flow == nil {
+		return nil
+	}
+	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	nFlow.Instructions = nil
+	nInsts := make([]*ofp.OfpInstruction, 0)
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == actionType {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			nActions := make([]*ofp.OfpAction, 0)
+			for _, action := range instActions.Actions {
+				if action.GetOutput() != nil {
+					nActions = append(nActions, Output(toPort))
+				} else {
+					nActions = append(nActions, action)
+				}
+			}
+			instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: nActions}}
+			nInsts = append(nInsts, &ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction})
+		} else {
+			nInsts = append(nInsts, instruction)
+		}
+	}
+	nFlow.Instructions = nInsts
+	return nFlow
+}
+
+func excludeOxmOfbField(field *ofp.OfpOxmOfbField, exclude ...ofp.OxmOfbFieldTypes) bool {
+	for _, fieldToExclude := range exclude {
+		if field.Type == fieldToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetOfbFields(flow *ofp.OfpFlowStats, exclude ...ofp.OxmOfbFieldTypes) []*ofp.OfpOxmOfbField {
+	if flow == nil || flow.Match == nil || flow.Match.Type != ofp.OfpMatchType_OFPMT_OXM {
+		return nil
+	}
+	ofbFields := make([]*ofp.OfpOxmOfbField, 0)
+	for _, field := range flow.Match.OxmFields {
+		if field.OxmClass == ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
+			ofbFields = append(ofbFields, field.GetOfbField())
+		}
+	}
+	if len(exclude) == 0 {
+		return ofbFields
+	} else {
+		filteredFields := make([]*ofp.OfpOxmOfbField, 0)
+		for _, ofbField := range ofbFields {
+			if !excludeOxmOfbField(ofbField, exclude...) {
+				filteredFields = append(filteredFields, ofbField)
+			}
+		}
+		return filteredFields
+	}
+}
+
+func GetPacketOutPort(packet *ofp.OfpPacketOut) uint32 {
+	if packet == nil {
+		return 0
+	}
+	for _, action := range packet.GetActions() {
+		if action.Type == OUTPUT {
+			return action.GetOutput().Port
+		}
+	}
+	return 0
+}
+
+func GetOutPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == OUTPUT {
+			out := action.GetOutput()
+			if out == nil {
+				return 0
+			}
+			return out.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetInPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == IN_PORT {
+			return field.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetGotoTableId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE) {
+			gotoTable := instruction.GetGotoTable()
+			if gotoTable == nil {
+				return 0
+			}
+			return gotoTable.GetTableId()
+		}
+	}
+	return 0
+}
+
+func GetMeterId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_METER) {
+			MeterInstruction := instruction.GetMeter()
+			if MeterInstruction == nil {
+				return 0
+			}
+			return MeterInstruction.GetMeterId()
+		}
+	}
+	return 0
+}
+
+func GetVlanVid(flow *ofp.OfpFlowStats) *uint32 {
+	if flow == nil {
+		return nil
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == VLAN_VID {
+			ret := field.GetVlanVid()
+			return &ret
+		}
+	}
+	// Dont return 0 if the field is missing as vlan id value 0 has meaning and cannot be overloaded as "not found"
+	return nil
+}
+
+func GetTunnelId(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == TUNNEL_ID {
+			return field.GetTunnelId()
+		}
+	}
+	return 0
+}
+
+//GetMetaData - legacy get method (only want lower 32 bits)
+func GetMetaData(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
+		}
+	}
+	logger.Debug("No-metadata-present")
+	return 0
+}
+
+func GetMetaData64Bit(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return field.GetTableMetadata()
+		}
+	}
+	logger.Debug("No-metadata-present")
+	return 0
+}
+
+// function returns write metadata value from write_metadata action field
+func GetMetadataFromWriteMetadataAction(flow *ofp.OfpFlowStats) uint64 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(WRITE_METADATA) {
+				if writeMetadata := instruction.GetWriteMetadata(); writeMetadata != nil {
+					return writeMetadata.GetMetadata()
+				}
+			}
+		}
+	}
+	logger.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
+	return 0
+}
+
+func GetTechProfileIDFromWriteMetaData(metadata uint64) uint16 {
+	/*
+	   Write metadata instruction value (metadata) is 8 bytes:
+	   MS 2 bytes: C Tag
+	   Next 2 bytes: Technology Profile Id
+	   Next 4 bytes: Port number (uni or nni)
+
+	   This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var tpId uint16 = 0
+	logger.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	if metadata != 0 {
+		tpId = uint16((metadata >> 32) & 0xFFFF)
+		logger.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+	}
+	return tpId
+}
+
+func GetEgressPortNumberFromWriteMetadata(flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var uniPort uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(flow)
+	logger.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	if md != 0 {
+		uniPort = uint32(md & 0xFFFFFFFF)
+		logger.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+	}
+	return uniPort
+
+}
+
+func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint16 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var innerTag uint16 = 0
+	md := GetMetadataFromWriteMetadataAction(flow)
+	if md != 0 {
+		innerTag = uint16((md >> 48) & 0xFFFF)
+		logger.Debugw("Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+	}
+	return innerTag
+}
+
+//GetInnerTagFromMetaData retrieves the inner tag from the Metadata_ofp. The port number (UNI on ONU) is in the
+// lower 32-bits of Metadata_ofp and the inner_tag is in the upper 32-bits. This is set in the ONOS OltPipeline as
+//// a Metadata_ofp field
+/*func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint64 {
+	md := GetMetaData64Bit(flow)
+	if md == 0 {
+		return 0
+	}
+	if md <= 0xffffffff {
+		logger.Debugw("onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		return md
+	}
+	return (md >> 32) & 0xffffffff
+}*/
+
+// Extract the child device port from a flow that contains the parent device peer port.  Typically the UNI port of an
+// ONU child device.  Per TST agreement this will be the lower 32 bits of tunnel id reserving upper 32 bits for later
+// use
+func GetChildPortFromTunnelId(flow *ofp.OfpFlowStats) uint32 {
+	tid := GetTunnelId(flow)
+	if tid == 0 {
+		return 0
+	}
+	// Per TST agreement we are keeping any child port id (uni port id) in the lower 32 bits
+	return uint32(tid & 0xffffffff)
+}
+
+func HasNextTable(flow *ofp.OfpFlowStats) bool {
+	if flow == nil {
+		return false
+	}
+	return GetGotoTableId(flow) != 0
+}
+
+func GetGroup(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == GROUP {
+			grp := action.GetGroup()
+			if grp == nil {
+				return 0
+			}
+			return grp.GetGroupId()
+		}
+	}
+	return 0
+}
+
+func HasGroup(flow *ofp.OfpFlowStats) bool {
+	return GetGroup(flow) != 0
+}
+
+// GetNextTableId returns the next table ID if the "table_id" is present in the map, otherwise return nil
+func GetNextTableId(kw OfpFlowModArgs) *uint32 {
+	if val, exist := kw["table_id"]; exist {
+		ret := uint32(val)
+		return &ret
+	}
+	return nil
+}
+
+// GetMeterIdFlowModArgs returns the meterId if the "meter_id" is present in the map, otherwise return 0
+func GetMeterIdFlowModArgs(kw OfpFlowModArgs) uint32 {
+	if val, exist := kw["meter_id"]; exist {
+		return uint32(val)
+	}
+	return 0
+}
+
+// Function returns the metadata if the "write_metadata" is present in the map, otherwise return nil
+func GetMetadataFlowModArgs(kw OfpFlowModArgs) uint64 {
+	if val, exist := kw["write_metadata"]; exist {
+		ret := uint64(val)
+		return ret
+	}
+	return 0
+}
+
+// Return unique 64-bit integer hash for flow covering the following attributes:
+// 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
+func HashFlowStats(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil { // Should never happen
+		return 0
+	}
+	// Create string with the instructions field first
+	var instructionString bytes.Buffer
+	for _, instruction := range flow.Instructions {
+		instructionString.WriteString(instruction.String())
+	}
+	var flowString = fmt.Sprintf("%d%d%d%d%s%s", flow.TableId, flow.Priority, flow.Flags, flow.Cookie, flow.Match.String(), instructionString.String())
+	h := md5.New()
+	h.Write([]byte(flowString))
+	hash := big.NewInt(0)
+	hash.SetBytes(h.Sum(nil))
+	return hash.Uint64()
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func FlowStatsEntryFromFlowModMessage(mod *ofp.OfpFlowMod) *ofp.OfpFlowStats {
+	flow := &ofp.OfpFlowStats{}
+	if mod == nil {
+		return flow
+	}
+	flow.TableId = mod.TableId
+	flow.Priority = mod.Priority
+	flow.IdleTimeout = mod.IdleTimeout
+	flow.HardTimeout = mod.HardTimeout
+	flow.Flags = mod.Flags
+	flow.Cookie = mod.Cookie
+	flow.Match = mod.Match
+	flow.Instructions = mod.Instructions
+	flow.Id = HashFlowStats(flow)
+	return flow
+}
+
+func GroupEntryFromGroupMod(mod *ofp.OfpGroupMod) *ofp.OfpGroupEntry {
+	group := &ofp.OfpGroupEntry{}
+	if mod == nil {
+		return group
+	}
+	group.Desc = &ofp.OfpGroupDesc{Type: mod.Type, GroupId: mod.GroupId, Buckets: mod.Buckets}
+	group.Stats = &ofp.OfpGroupStats{GroupId: mod.GroupId}
+	//TODO do we need to instantiate bucket bins?
+	return group
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func MeterEntryFromMeterMod(meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+	bandStats := make([]*ofp.OfpMeterBandStats, 0)
+	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
+		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
+	if meterMod == nil {
+		logger.Error("Invalid meter mod command")
+		return meter
+	}
+	// config init
+	meter.Config.MeterId = meterMod.MeterId
+	meter.Config.Flags = meterMod.Flags
+	meter.Config.Bands = meterMod.Bands
+	// meter stats init
+	meter.Stats.MeterId = meterMod.MeterId
+	meter.Stats.FlowCount = 0
+	meter.Stats.PacketInCount = 0
+	meter.Stats.ByteInCount = 0
+	meter.Stats.DurationSec = 0
+	meter.Stats.DurationNsec = 0
+	// band stats init
+	for _, _ = range meterMod.Bands {
+		band := &ofp.OfpMeterBandStats{}
+		band.PacketBandCount = 0
+		band.ByteBandCount = 0
+		bandStats = append(bandStats, band)
+	}
+	meter.Stats.BandStats = bandStats
+	logger.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
+	return meter
+
+}
+
+func GetMeterIdFromFlow(flow *ofp.OfpFlowStats) uint32 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					return meterInst.GetMeterId()
+				}
+			}
+		}
+	}
+
+	return uint32(0)
+}
+
+func MkOxmFields(matchFields []ofp.OfpOxmField) []*ofp.OfpOxmField {
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	return oxmFields
+}
+
+func MkInstructionsFromActions(actions []*ofp.OfpAction) []*ofp.OfpInstruction {
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+	return instructions
+}
+
+// Convenience function to generare ofp_flow_mod message with OXM BASIC match composed from the match_fields, and
+// single APPLY_ACTIONS instruction with a list if ofp_action objects.
+func MkSimpleFlowMod(matchFields []*ofp.OfpOxmField, actions []*ofp.OfpAction, command *ofp.OfpFlowModCommand, kw OfpFlowModArgs) *ofp.OfpFlowMod {
+
+	// Process actions instructions
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+
+	// Process next table
+	if tableId := GetNextTableId(kw); tableId != nil {
+		var instGotoTable ofp.OfpInstruction_GotoTable
+		instGotoTable.GotoTable = &ofp.OfpInstructionGotoTable{TableId: *tableId}
+		inst := ofp.OfpInstruction{Type: uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE), Data: &instGotoTable}
+		instructions = append(instructions, &inst)
+	}
+	// Process meter action
+	if meterId := GetMeterIdFlowModArgs(kw); meterId != 0 {
+		var instMeter ofp.OfpInstruction_Meter
+		instMeter.Meter = &ofp.OfpInstructionMeter{MeterId: meterId}
+		inst := ofp.OfpInstruction{Type: uint32(METER_ACTION), Data: &instMeter}
+		instructions = append(instructions, &inst)
+	}
+	//process write_metadata action
+	if metadata := GetMetadataFlowModArgs(kw); metadata != 0 {
+		var instWriteMetadata ofp.OfpInstruction_WriteMetadata
+		instWriteMetadata.WriteMetadata = &ofp.OfpInstructionWriteMetadata{Metadata: metadata}
+		inst := ofp.OfpInstruction{Type: uint32(WRITE_METADATA), Data: &instWriteMetadata}
+		instructions = append(instructions, &inst)
+	}
+
+	// Process match fields
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	var match ofp.OfpMatch
+	match.Type = ofp.OfpMatchType_OFPMT_OXM
+	match.OxmFields = oxmFields
+
+	// Create ofp_flow_message
+	msg := &ofp.OfpFlowMod{}
+	if command == nil {
+		msg.Command = ofp.OfpFlowModCommand_OFPFC_ADD
+	} else {
+		msg.Command = *command
+	}
+	msg.Instructions = instructions
+	msg.Match = &match
+
+	// Set the variadic argument values
+	msg = setVariadicModAttributes(msg, kw)
+
+	return msg
+}
+
+func MkMulticastGroupMod(groupId uint32, buckets []*ofp.OfpBucket, command *ofp.OfpGroupModCommand) *ofp.OfpGroupMod {
+	group := &ofp.OfpGroupMod{}
+	if command == nil {
+		group.Command = ofp.OfpGroupModCommand_OFPGC_ADD
+	} else {
+		group.Command = *command
+	}
+	group.Type = ofp.OfpGroupType_OFPGT_ALL
+	group.GroupId = groupId
+	group.Buckets = buckets
+	return group
+}
+
+//SetVariadicModAttributes sets only uint64 or uint32 fields of the ofp_flow_mod message
+func setVariadicModAttributes(mod *ofp.OfpFlowMod, args OfpFlowModArgs) *ofp.OfpFlowMod {
+	if args == nil {
+		return mod
+	}
+	for key, val := range args {
+		switch key {
+		case "cookie":
+			mod.Cookie = val
+		case "cookie_mask":
+			mod.CookieMask = val
+		case "table_id":
+			mod.TableId = uint32(val)
+		case "idle_timeout":
+			mod.IdleTimeout = uint32(val)
+		case "hard_timeout":
+			mod.HardTimeout = uint32(val)
+		case "priority":
+			mod.Priority = uint32(val)
+		case "buffer_id":
+			mod.BufferId = uint32(val)
+		case "out_port":
+			mod.OutPort = uint32(val)
+		case "out_group":
+			mod.OutGroup = uint32(val)
+		case "flags":
+			mod.Flags = uint32(val)
+		}
+	}
+	return mod
+}
+
+func MkPacketIn(port uint32, packet []byte) *ofp.OfpPacketIn {
+	packetIn := &ofp.OfpPacketIn{
+		Reason: ofp.OfpPacketInReason_OFPR_ACTION,
+		Match: &ofp.OfpMatch{
+			Type: ofp.OfpMatchType_OFPMT_OXM,
+			OxmFields: []*ofp.OfpOxmField{
+				{
+					OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC,
+					Field: &ofp.OfpOxmField_OfbField{
+						OfbField: InPort(port)},
+				},
+			},
+		},
+		Data: packet,
+	}
+	return packetIn
+}
+
+// MkFlowStat is a helper method to build flows
+func MkFlowStat(fa *FlowArgs) *ofp.OfpFlowStats {
+	//Build the match-fields
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range fa.MatchFields {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return FlowStatsEntryFromFlowModMessage(MkSimpleFlowMod(matchFields, fa.Actions, fa.Command, fa.KV))
+}
+
+func MkGroupStat(ga *GroupArgs) *ofp.OfpGroupEntry {
+	return GroupEntryFromGroupMod(MkMulticastGroupMod(ga.GroupId, ga.Buckets, ga.Command))
+}
+
+type OfpFlowModArgs map[string]uint64
+
+type FlowArgs struct {
+	MatchFields []*ofp.OfpOxmOfbField
+	Actions     []*ofp.OfpAction
+	Command     *ofp.OfpFlowModCommand
+	Priority    uint32
+	KV          OfpFlowModArgs
+}
+
+type GroupArgs struct {
+	GroupId uint32
+	Buckets []*ofp.OfpBucket
+	Command *ofp.OfpGroupModCommand
+}
+
+type FlowsAndGroups struct {
+	Flows  *ordered_map.OrderedMap
+	Groups *ordered_map.OrderedMap
+}
+
+func NewFlowsAndGroups() *FlowsAndGroups {
+	var fg FlowsAndGroups
+	fg.Flows = ordered_map.NewOrderedMap()
+	fg.Groups = ordered_map.NewOrderedMap()
+	return &fg
+}
+
+func (fg *FlowsAndGroups) Copy() *FlowsAndGroups {
+	copyFG := NewFlowsAndGroups()
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			copyFG.Flows.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			copyFG.Groups.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	return copyFG
+}
+
+func (fg *FlowsAndGroups) GetFlow(index int) *ofp.OfpFlowStats {
+	iter := fg.Flows.IterFunc()
+	pos := 0
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if pos == index {
+			if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+				return protoMsg
+			}
+			return nil
+		}
+		pos += 1
+	}
+	return nil
+}
+
+func (fg *FlowsAndGroups) ListFlows() []*ofp.OfpFlowStats {
+	flows := make([]*ofp.OfpFlowStats, 0)
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			flows = append(flows, protoMsg)
+		}
+	}
+	return flows
+}
+
+func (fg *FlowsAndGroups) ListGroups() []*ofp.OfpGroupEntry {
+	groups := make([]*ofp.OfpGroupEntry, 0)
+	iter := fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			groups = append(groups, protoMsg)
+		}
+	}
+	return groups
+}
+
+func (fg *FlowsAndGroups) String() string {
+	var buffer bytes.Buffer
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			buffer.WriteString("\nFlow:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			buffer.WriteString("\nGroup:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	return buffer.String()
+}
+
+func (fg *FlowsAndGroups) AddFlow(flow *ofp.OfpFlowStats) {
+	if flow == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add flow only if absent
+	if _, exist := fg.Flows.Get(flow.Id); !exist {
+		fg.Flows.Set(flow.Id, flow)
+	}
+}
+
+func (fg *FlowsAndGroups) AddGroup(group *ofp.OfpGroupEntry) {
+	if group == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add group only if absent
+	if _, exist := fg.Groups.Get(group.Desc.GroupId); !exist {
+		fg.Groups.Set(group.Desc.GroupId, group)
+	}
+}
+
+//AddFrom add flows and groups from the argument into this structure only if they do not already exist
+func (fg *FlowsAndGroups) AddFrom(from *FlowsAndGroups) {
+	iter := from.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			if _, exist := fg.Flows.Get(protoMsg.Id); !exist {
+				fg.Flows.Set(protoMsg.Id, protoMsg)
+			}
+		}
+	}
+	iter = from.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			if _, exist := fg.Groups.Get(protoMsg.Stats.GroupId); !exist {
+				fg.Groups.Set(protoMsg.Stats.GroupId, protoMsg)
+			}
+		}
+	}
+}
+
+type DeviceRules struct {
+	Rules map[string]*FlowsAndGroups
+}
+
+func NewDeviceRules() *DeviceRules {
+	var dr DeviceRules
+	dr.Rules = make(map[string]*FlowsAndGroups)
+	return &dr
+}
+
+func (dr *DeviceRules) Copy() *DeviceRules {
+	copyDR := NewDeviceRules()
+	if dr != nil {
+		for key, val := range dr.Rules {
+			if val != nil {
+				copyDR.Rules[key] = val.Copy()
+			}
+		}
+	}
+	return copyDR
+}
+
+func (dr *DeviceRules) ClearFlows(deviceId string) {
+	if _, exist := dr.Rules[deviceId]; exist {
+		dr.Rules[deviceId].Flows = ordered_map.NewOrderedMap()
+	}
+}
+
+func (dr *DeviceRules) FilterRules(deviceIds map[string]string) *DeviceRules {
+	filteredDR := NewDeviceRules()
+	for key, val := range dr.Rules {
+		if _, exist := deviceIds[key]; exist {
+			filteredDR.Rules[key] = val.Copy()
+		}
+	}
+	return filteredDR
+}
+
+func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
+	if _, exist := dr.Rules[deviceId]; !exist {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId].AddFlow(flow)
+}
+
+func (dr *DeviceRules) GetRules() map[string]*FlowsAndGroups {
+	return dr.Rules
+}
+
+func (dr *DeviceRules) String() string {
+	var buffer bytes.Buffer
+	for key, value := range dr.Rules {
+		buffer.WriteString("DeviceId:")
+		buffer.WriteString(key)
+		buffer.WriteString(value.String())
+		buffer.WriteString("\n\n")
+	}
+	return buffer.String()
+}
+
+func (dr *DeviceRules) AddFlowsAndGroup(deviceId string, fg *FlowsAndGroups) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId] = fg
+}
+
+// CreateEntryIfNotExist creates a new deviceId in the Map if it does not exist and assigns an
+// empty FlowsAndGroups to it.  Otherwise, it does nothing.
+func (dr *DeviceRules) CreateEntryIfNotExist(deviceId string) {
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+}
+
+/*
+ *  Common flow routines
+ */
+
+//FindOverlappingFlows return a list of overlapping flow(s) where mod is the flow request
+func FindOverlappingFlows(flows []*ofp.OfpFlowStats, mod *ofp.OfpFlowMod) []*ofp.OfpFlowStats {
+	return nil //TODO - complete implementation
+}
+
+// FindFlowById returns the index of the flow in the flows array if present. Otherwise, it returns -1
+func FindFlowById(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if flow.Id == f.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+// FindFlows returns the index in flows where flow if present.  Otherwise, it returns -1
+func FindFlows(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if FlowMatch(f, flow) {
+			return idx
+		}
+	}
+	return -1
+}
+
+//FlowMatch returns true if two flows matches on the following flow attributes:
+//TableId, Priority, Flags, Cookie, Match
+func FlowMatch(f1 *ofp.OfpFlowStats, f2 *ofp.OfpFlowStats) bool {
+	if f1 == nil || f2 == nil {
+		return false
+	}
+	keysMatter := []string{"TableId", "Priority", "Flags", "Cookie", "Match"}
+	for _, key := range keysMatter {
+		switch key {
+		case "TableId":
+			if f1.TableId != f2.TableId {
+				return false
+			}
+		case "Priority":
+			if f1.Priority != f2.Priority {
+				return false
+			}
+		case "Flags":
+			if f1.Flags != f2.Flags {
+				return false
+			}
+		case "Cookie":
+			if f1.Cookie != f2.Cookie {
+				return false
+			}
+		case "Match":
+			if strings.Compare(f1.Match.String(), f2.Match.String()) != 0 {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+//FlowMatchesMod returns True if given flow is "covered" by the wildcard flow_mod, taking into consideration of
+//both exact matches as well as masks-based match fields if any. Otherwise return False
+func FlowMatchesMod(flow *ofp.OfpFlowStats, mod *ofp.OfpFlowMod) bool {
+	if flow == nil || mod == nil {
+		return false
+	}
+	//Check if flow.cookie is covered by mod.cookie and mod.cookie_mask
+	if (flow.Cookie & mod.CookieMask) != (mod.Cookie & mod.CookieMask) {
+		return false
+	}
+
+	//Check if flow.table_id is covered by flow_mod.table_id
+	if mod.TableId != uint32(ofp.OfpTable_OFPTT_ALL) && flow.TableId != mod.TableId {
+		return false
+	}
+
+	//Check out_port
+	if (mod.OutPort&0x7fffffff) != uint32(ofp.OfpPortNo_OFPP_ANY) && !FlowHasOutPort(flow, mod.OutPort) {
+		return false
+	}
+
+	//	Check out_group
+	if (mod.OutGroup&0x7fffffff) != uint32(ofp.OfpGroup_OFPG_ANY) && !FlowHasOutGroup(flow, mod.OutGroup) {
+		return false
+	}
+
+	//Priority is ignored
+
+	//Check match condition
+	//If the flow_mod match field is empty, that is a special case and indicates the flow entry matches
+	if (mod.Match == nil) || (mod.Match.OxmFields == nil) || (len(mod.Match.OxmFields) == 0) {
+		//If we got this far and the match is empty in the flow spec, than the flow matches
+		return true
+	} // TODO : implement the flow match analysis
+	return false
+
+}
+
+//FlowHasOutPort returns True if flow has a output command with the given out_port
+func FlowHasOutPort(flow *ofp.OfpFlowStats, outPort uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_OUTPUT {
+					if (action.GetOutput() != nil) && (action.GetOutput().Port == outPort) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FlowHasOutGroup return True if flow has a output command with the given out_group
+func FlowHasOutGroup(flow *ofp.OfpFlowStats, groupID uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_GROUP {
+					if (action.GetGroup() != nil) && (action.GetGroup().GroupId == groupID) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FindGroup returns index of group if found, else returns -1
+func FindGroup(groups []*ofp.OfpGroupEntry, groupId uint32) int {
+	for idx, group := range groups {
+		if group.Desc.GroupId == groupId {
+			return idx
+		}
+	}
+	return -1
+}
+
+func FlowsDeleteByGroupId(flows []*ofp.OfpFlowStats, groupId uint32) (bool, []*ofp.OfpFlowStats) {
+	toKeep := make([]*ofp.OfpFlowStats, 0)
+
+	for _, f := range flows {
+		if !FlowHasOutGroup(f, groupId) {
+			toKeep = append(toKeep, f)
+		}
+	}
+	return len(toKeep) < len(flows), toKeep
+}
+
+func ToOfpOxmField(from []*ofp.OfpOxmOfbField) []*ofp.OfpOxmField {
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range from {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return matchFields
+}
+
+//IsMulticastIp returns true if the ip starts with the byte sequence of 1110;
+//false otherwise.
+func IsMulticastIp(ip uint32) bool {
+	return ip>>28 == 14
+}
+
+//ConvertToMulticastMacInt returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacInt(ip uint32) uint64 {
+	//get last 23 bits of ip address by ip & 00000000011111111111111111111111
+	theLast23BitsOfIp := ip & 8388607
+	// perform OR with 0x1005E000000 to build mcast mac address
+	return 1101088686080 | uint64(theLast23BitsOfIp)
+}
+
+//ConvertToMulticastMacBytes returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacBytes(ip uint32) []byte {
+	mac := ConvertToMulticastMacInt(ip)
+	var b bytes.Buffer
+	// catalyze (48 bits) in binary:111111110000000000000000000000000000000000000000
+	catalyze := uint64(280375465082880)
+	//convert each octet to decimal
+	for i := 0; i < 6; i++ {
+		if i != 0 {
+			catalyze = catalyze >> 8
+		}
+		octet := mac & catalyze
+		octetDecimal := octet >> uint8(40-i*8)
+		b.WriteByte(byte(octetDecimal))
+	}
+	return b.Bytes()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
new file mode 100644
index 0000000..6289043
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"time"
+
+	ca "github.com/opencord/voltha-protos/v3/go/inter_container"
+)
+
+const (
+	PartitionConsumer = iota
+	GroupCustomer     = iota
+)
+
+const (
+	OffsetNewest = -1
+	OffsetOldest = -2
+)
+
+const (
+	GroupIdKey = "groupId"
+	Offset     = "offset"
+)
+
+const (
+	DefaultKafkaHost                = "127.0.0.1"
+	DefaultKafkaPort                = 9092
+	DefaultGroupName                = "voltha"
+	DefaultSleepOnError             = 1
+	DefaultProducerFlushFrequency   = 10
+	DefaultProducerFlushMessages    = 10
+	DefaultProducerFlushMaxmessages = 100
+	DefaultProducerReturnSuccess    = true
+	DefaultProducerReturnErrors     = true
+	DefaultProducerRetryMax         = 3
+	DefaultProducerRetryBackoff     = time.Millisecond * 100
+	DefaultConsumerMaxwait          = 100
+	DefaultMaxProcessingTime        = 100
+	DefaultConsumerType             = PartitionConsumer
+	DefaultNumberPartitions         = 3
+	DefaultNumberReplicas           = 1
+	DefaultAutoCreateTopic          = false
+	DefaultMetadataMaxRetry         = 3
+	DefaultLivenessChannelInterval  = time.Second * 30
+)
+
+// MsgClient represents the set of APIs  a Kafka MsgClient must implement
+type Client interface {
+	Start() error
+	Stop()
+	CreateTopic(topic *Topic, numPartition int, repFactor int) error
+	DeleteTopic(topic *Topic) error
+	Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
+	UnSubscribe(topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	Send(msg interface{}, topic *Topic, keys ...string) error
+	SendLiveness() error
+	EnableLivenessChannel(enable bool) chan bool
+	EnableHealthinessChannel(enable bool) chan bool
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
new file mode 100644
index 0000000..cb6acb2
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kafka"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
new file mode 100644
index 0000000..042e121
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
@@ -0,0 +1,841 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+)
+
+const (
+	DefaultMaxRetries     = 3
+	DefaultRequestTimeout = 10000 // 10000 milliseconds - to handle a wider latency range
+)
+
+const (
+	TransactionKey = "transactionID"
+	FromTopic      = "fromTopic"
+)
+
+var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
+var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
+
+// requestHandlerChannel represents an interface associated with a channel.  Whenever, an event is
+// obtained from that channel, this interface is invoked.   This is used to handle
+// async requests into the Core via the kafka messaging bus
+type requestHandlerChannel struct {
+	requesthandlerInterface interface{}
+	ch                      <-chan *ic.InterContainerMessage
+}
+
+// transactionChannel represents a combination of a topic and a channel onto which a response received
+// on the kafka bus will be sent to
+type transactionChannel struct {
+	topic *Topic
+	ch    chan *ic.InterContainerMessage
+}
+
+// InterContainerProxy represents the messaging proxy
+type InterContainerProxy struct {
+	kafkaHost                      string
+	kafkaPort                      int
+	DefaultTopic                   *Topic
+	defaultRequestHandlerInterface interface{}
+	deviceDiscoveryTopic           *Topic
+	kafkaClient                    Client
+	doneCh                         chan int
+
+	// This map is used to map a topic to an interface and channel.   When a request is received
+	// on that channel (registered to the topic) then that interface is invoked.
+	topicToRequestHandlerChannelMap   map[string]*requestHandlerChannel
+	lockTopicRequestHandlerChannelMap sync.RWMutex
+
+	// This map is used to map a channel to a response topic.   This channel handles all responses on that
+	// channel for that topic and forward them to the appropriate consumers channel, using the
+	// transactionIdToChannelMap.
+	topicToResponseChannelMap   map[string]<-chan *ic.InterContainerMessage
+	lockTopicResponseChannelMap sync.RWMutex
+
+	// This map is used to map a transaction to a consumers channel.  This is used whenever a request has been
+	// sent out and we are waiting for a response.
+	transactionIdToChannelMap     map[string]*transactionChannel
+	lockTransactionIdToChannelMap sync.RWMutex
+}
+
+type InterContainerProxyOption func(*InterContainerProxy)
+
+func InterContainerHost(host string) InterContainerProxyOption {
+	return func(args *InterContainerProxy) {
+		args.kafkaHost = host
+	}
+}
+
+func InterContainerPort(port int) InterContainerProxyOption {
+	return func(args *InterContainerProxy) {
+		args.kafkaPort = port
+	}
+}
+
+func DefaultTopic(topic *Topic) InterContainerProxyOption {
+	return func(args *InterContainerProxy) {
+		args.DefaultTopic = topic
+	}
+}
+
+func DeviceDiscoveryTopic(topic *Topic) InterContainerProxyOption {
+	return func(args *InterContainerProxy) {
+		args.deviceDiscoveryTopic = topic
+	}
+}
+
+func RequestHandlerInterface(handler interface{}) InterContainerProxyOption {
+	return func(args *InterContainerProxy) {
+		args.defaultRequestHandlerInterface = handler
+	}
+}
+
+func MsgClient(client Client) InterContainerProxyOption {
+	return func(args *InterContainerProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func NewInterContainerProxy(opts ...InterContainerProxyOption) (*InterContainerProxy, error) {
+	proxy := &InterContainerProxy{
+		kafkaHost: DefaultKafkaHost,
+		kafkaPort: DefaultKafkaPort,
+	}
+
+	for _, option := range opts {
+		option(proxy)
+	}
+
+	// Create the locks for all the maps
+	proxy.lockTopicRequestHandlerChannelMap = sync.RWMutex{}
+	proxy.lockTransactionIdToChannelMap = sync.RWMutex{}
+	proxy.lockTopicResponseChannelMap = sync.RWMutex{}
+
+	return proxy, nil
+}
+
+func (kp *InterContainerProxy) Start() error {
+	logger.Info("Starting-Proxy")
+
+	// Kafka MsgClient should already have been created.  If not, output fatal error
+	if kp.kafkaClient == nil {
+		logger.Fatal("kafka-client-not-set")
+	}
+
+	// Create the Done channel
+	kp.doneCh = make(chan int, 1)
+
+	// Start the kafka client
+	if err := kp.kafkaClient.Start(); err != nil {
+		logger.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the topic to response channel map
+	kp.topicToResponseChannelMap = make(map[string]<-chan *ic.InterContainerMessage)
+	//
+	// Create the transactionId to Channel Map
+	kp.transactionIdToChannelMap = make(map[string]*transactionChannel)
+
+	// Create the topic to request channel map
+	kp.topicToRequestHandlerChannelMap = make(map[string]*requestHandlerChannel)
+
+	return nil
+}
+
+func (kp *InterContainerProxy) Stop() {
+	logger.Info("stopping-intercontainer-proxy")
+	kp.doneCh <- 1
+	// TODO : Perform cleanup
+	kp.kafkaClient.Stop()
+	//kp.deleteAllTopicRequestHandlerChannelMap()
+	//kp.deleteAllTopicResponseChannelMap()
+	//kp.deleteAllTransactionIdToChannelMap()
+}
+
+// DeviceDiscovered publish the discovered device onto the kafka messaging bus
+func (kp *InterContainerProxy) DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error {
+	logger.Debugw("sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
+	//	Simple validation
+	if deviceId == "" || deviceType == "" {
+		logger.Errorw("invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
+		return errors.New("invalid-parameters")
+	}
+	//	Create the device discovery message
+	header := &ic.Header{
+		Id:        uuid.New().String(),
+		Type:      ic.MessageType_DEVICE_DISCOVERED,
+		FromTopic: kp.DefaultTopic.Name,
+		ToTopic:   kp.deviceDiscoveryTopic.Name,
+		Timestamp: time.Now().UnixNano(),
+	}
+	body := &ic.DeviceDiscovered{
+		Id:         deviceId,
+		DeviceType: deviceType,
+		ParentId:   parentId,
+		Publisher:  publisher,
+	}
+
+	var marshalledData *any.Any
+	var err error
+	if marshalledData, err = ptypes.MarshalAny(body); err != nil {
+		logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+		return err
+	}
+	msg := &ic.InterContainerMessage{
+		Header: header,
+		Body:   marshalledData,
+	}
+
+	// Send the message
+	if err := kp.kafkaClient.Send(msg, kp.deviceDiscoveryTopic); err != nil {
+		logger.Errorw("cannot-send-device-discovery-message", log.Fields{"error": err})
+		return err
+	}
+	return nil
+}
+
+// InvokeRPC is used to send a request to a given topic
+func (kp *InterContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.DefaultTopic
+	}
+
+	// Encode the request
+	protoRequest, err := encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
+	if err != nil {
+		logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		return false, nil
+	}
+
+	// Subscribe for response, if needed, before sending request
+	var ch <-chan *ic.InterContainerMessage
+	if waitForResponse {
+		var err error
+		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		}
+	}
+
+	// Send request - if the topic is formatted with a device Id then we will send the request using a
+	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+	//key := GetDeviceIdFromTopic(*toTopic)
+	logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	go kp.kafkaClient.Send(protoRequest, toTopic, key)
+
+	if waitForResponse {
+		// Create a child context based on the parent context, if any
+		var cancel context.CancelFunc
+		childCtx := context.Background()
+		if ctx == nil {
+			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
+		} else {
+			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
+		}
+		defer cancel()
+
+		// Wait for response as well as timeout or cancellation
+		// Remove the subscription for a response on return
+		defer kp.unSubscribeForResponse(protoRequest.Header.Id)
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				protoError := &ic.Error{Reason: "channel-closed"}
+				var marshalledArg *any.Any
+				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+					return false, nil // Should never happen
+				}
+				return false, marshalledArg
+			}
+			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			var responseBody *ic.InterContainerResponseBody
+			var err error
+			if responseBody, err = decodeResponse(msg); err != nil {
+				logger.Errorw("decode-response-error", log.Fields{"error": err})
+			}
+			return responseBody.Success, responseBody.Result
+		case <-ctx.Done():
+			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: ctx.Err().Error()}
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-childCtx.Done():
+			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: childCtx.Err().Error()}
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-kp.doneCh:
+			logger.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			return true, nil
+		}
+	}
+	return true, nil
+}
+
+// SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
+// when a message is received on a given topic
+func (kp *InterContainerProxy) SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error {
+
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(&topic); err != nil {
+		//if ch, err = kp.Subscribe(topic); err != nil {
+		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+
+	kp.defaultRequestHandlerInterface = handler
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ch, topic, handler)
+
+	return nil
+}
+
+// SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
+// when a message is received on a given topic.  So far there is only 1 target registered per microservice
+func (kp *InterContainerProxy) SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error {
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(&topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
+		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
+
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ch, topic, kp.defaultRequestHandlerInterface)
+
+	return nil
+}
+
+func (kp *InterContainerProxy) UnSubscribeFromRequestHandler(topic Topic) error {
+	return kp.deleteFromTopicRequestHandlerChannelMap(topic.Name)
+}
+
+// setupTopicResponseChannelMap sets up single consumers channel that will act as a broadcast channel for all
+// responses from that topic.
+func (kp *InterContainerProxy) setupTopicResponseChannelMap(topic string, arg <-chan *ic.InterContainerMessage) {
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	if _, exist := kp.topicToResponseChannelMap[topic]; !exist {
+		kp.topicToResponseChannelMap[topic] = arg
+	}
+}
+
+func (kp *InterContainerProxy) isTopicSubscribedForResponse(topic string) bool {
+	kp.lockTopicResponseChannelMap.RLock()
+	defer kp.lockTopicResponseChannelMap.RUnlock()
+	_, exist := kp.topicToResponseChannelMap[topic]
+	return exist
+}
+
+func (kp *InterContainerProxy) deleteFromTopicResponseChannelMap(topic string) error {
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		var err error
+		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic})
+		}
+		delete(kp.topicToResponseChannelMap, topic)
+		return err
+	} else {
+		return errors.New(fmt.Sprintf("%s-Topic-not-found", topic))
+	}
+}
+
+func (kp *InterContainerProxy) deleteAllTopicResponseChannelMap() error {
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	var err error
+	for topic, _ := range kp.topicToResponseChannelMap {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+		}
+		delete(kp.topicToResponseChannelMap, topic)
+	}
+	return err
+}
+
+func (kp *InterContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; !exist {
+		kp.topicToRequestHandlerChannelMap[topic] = arg
+	}
+}
+
+func (kp *InterContainerProxy) deleteFromTopicRequestHandlerChannelMap(topic string) error {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
+		// Close the kafka client client first by unsubscribing to this topic
+		kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch)
+		delete(kp.topicToRequestHandlerChannelMap, topic)
+		return nil
+	} else {
+		return errors.New(fmt.Sprintf("%s-Topic-not-found", topic))
+	}
+}
+
+func (kp *InterContainerProxy) deleteAllTopicRequestHandlerChannelMap() error {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	var err error
+	for topic, _ := range kp.topicToRequestHandlerChannelMap {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+		}
+		delete(kp.topicToRequestHandlerChannelMap, topic)
+	}
+	return err
+}
+
+func (kp *InterContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if _, exist := kp.transactionIdToChannelMap[id]; !exist {
+		kp.transactionIdToChannelMap[id] = &transactionChannel{topic: topic, ch: arg}
+	}
+}
+
+func (kp *InterContainerProxy) deleteFromTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if transChannel, exist := kp.transactionIdToChannelMap[id]; exist {
+		// Close the channel first
+		close(transChannel.ch)
+		delete(kp.transactionIdToChannelMap, id)
+	}
+}
+
+func (kp *InterContainerProxy) deleteTopicTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		if value.topic.Name == id {
+			close(value.ch)
+			delete(kp.transactionIdToChannelMap, key)
+		}
+	}
+}
+
+func (kp *InterContainerProxy) deleteAllTransactionIdToChannelMap() {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		close(value.ch)
+		delete(kp.transactionIdToChannelMap, key)
+	}
+}
+
+func (kp *InterContainerProxy) DeleteTopic(topic Topic) error {
+	// If we have any consumers on that topic we need to close them
+	if err := kp.deleteFromTopicResponseChannelMap(topic.Name); err != nil {
+		logger.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+	}
+	if err := kp.deleteFromTopicRequestHandlerChannelMap(topic.Name); err != nil {
+		logger.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+	}
+	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
+
+	return kp.kafkaClient.DeleteTopic(&topic)
+}
+
+func encodeReturnedValue(returnedVal interface{}) (*any.Any, error) {
+	// Encode the response argument - needs to be a proto message
+	if returnedVal == nil {
+		return nil, nil
+	}
+	protoValue, ok := returnedVal.(proto.Message)
+	if !ok {
+		logger.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		err := errors.New("response-value-not-proto-message")
+		return nil, err
+	}
+
+	// Marshal the returned value, if any
+	var marshalledReturnedVal *any.Any
+	var err error
+	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
+		logger.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
+		return nil, err
+	}
+	return marshalledReturnedVal, nil
+}
+
+func encodeDefaultFailedResponse(request *ic.InterContainerMessage) *ic.InterContainerMessage {
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		Timestamp: time.Now().Unix(),
+	}
+	responseBody := &ic.InterContainerResponseBody{
+		Success: false,
+		Result:  nil,
+	}
+	var marshalledResponseBody *any.Any
+	var err error
+	// Error should never happen here
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}
+
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeResponse(request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
+	//logger.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		KeyTopic:  request.Header.KeyTopic,
+		Timestamp: time.Now().UnixNano(),
+	}
+
+	// Go over all returned values
+	var marshalledReturnedVal *any.Any
+	var err error
+	for _, returnVal := range returnedValues {
+		if marshalledReturnedVal, err = encodeReturnedValue(returnVal); err != nil {
+			logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+		}
+		break // for now we support only 1 returned value - (excluding the error)
+	}
+
+	responseBody := &ic.InterContainerResponseBody{
+		Success: success,
+		Result:  marshalledReturnedVal,
+	}
+
+	// Marshal the response body
+	var marshalledResponseBody *any.Any
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+		return nil, err
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}, nil
+}
+
+func CallFuncByName(myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
+	myClassValue := reflect.ValueOf(myClass)
+	// Capitalize the first letter in the funcName to workaround the first capital letters required to
+	// invoke a function from a different package
+	funcName = strings.Title(funcName)
+	m := myClassValue.MethodByName(funcName)
+	if !m.IsValid() {
+		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
+	}
+	in := make([]reflect.Value, len(params))
+	for i, param := range params {
+		in[i] = reflect.ValueOf(param)
+	}
+	out = m.Call(in)
+	return
+}
+
+func (kp *InterContainerProxy) addTransactionId(transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
+	arg := &KVArg{
+		Key:   TransactionKey,
+		Value: &ic.StrType{Val: transactionId},
+	}
+
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
+		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   arg.Key,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+func (kp *InterContainerProxy) addFromTopic(fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
+		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   FromTopic,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+func (kp *InterContainerProxy) handleMessage(msg *ic.InterContainerMessage, targetInterface interface{}) {
+
+	// First extract the header to know whether this is a request - responses are handled by a different handler
+	if msg.Header.Type == ic.MessageType_REQUEST {
+		var out []reflect.Value
+		var err error
+
+		// Get the request body
+		requestBody := &ic.InterContainerRequestBody{}
+		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
+			logger.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
+		} else {
+			logger.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			// let the callee unpack the arguments as its the only one that knows the real proto type
+			// Augment the requestBody with the message Id as it will be used in scenarios where cores
+			// are set in pairs and competing
+			requestBody.Args = kp.addTransactionId(msg.Header.Id, requestBody.Args)
+
+			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
+			// needs to send an unsollicited message to the currently requested container
+			requestBody.Args = kp.addFromTopic(msg.Header.FromTopic, requestBody.Args)
+
+			out, err = CallFuncByName(targetInterface, requestBody.Rpc, requestBody.Args)
+			if err != nil {
+				logger.Warn(err)
+			}
+		}
+		// Response required?
+		if requestBody.ResponseRequired {
+			// If we already have an error before then just return that
+			var returnError *ic.Error
+			var returnedValues []interface{}
+			var success bool
+			if err != nil {
+				returnError = &ic.Error{Reason: err.Error()}
+				returnedValues = make([]interface{}, 1)
+				returnedValues[0] = returnError
+			} else {
+				returnedValues = make([]interface{}, 0)
+				// Check for errors first
+				lastIndex := len(out) - 1
+				if out[lastIndex].Interface() != nil { // Error
+					if retError, ok := out[lastIndex].Interface().(error); ok {
+						if retError.Error() == ErrorTransactionNotAcquired.Error() {
+							logger.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							return // Ignore - process is in competing mode and ignored transaction
+						}
+						returnError = &ic.Error{Reason: retError.Error()}
+						returnedValues = append(returnedValues, returnError)
+					} else { // Should never happen
+						returnError = &ic.Error{Reason: "incorrect-error-returns"}
+						returnedValues = append(returnedValues, returnError)
+					}
+				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
+					logger.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					return // Ignore - should not happen
+				} else { // Non-error case
+					success = true
+					for idx, val := range out {
+						//logger.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						if idx != lastIndex {
+							returnedValues = append(returnedValues, val.Interface())
+						}
+					}
+				}
+			}
+
+			var icm *ic.InterContainerMessage
+			if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
+				logger.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				icm = encodeDefaultFailedResponse(msg)
+			}
+			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
+			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
+			// present then the key will be empty, hence all messages for a given topic will be sent to all
+			// partitions.
+			replyTopic := &Topic{Name: msg.Header.FromTopic}
+			key := msg.Header.KeyTopic
+			logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			// TODO: handle error response.
+			go kp.kafkaClient.Send(icm, replyTopic, key)
+		}
+	} else if msg.Header.Type == ic.MessageType_RESPONSE {
+		logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
+		go kp.dispatchResponse(msg)
+	} else {
+		logger.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
+	}
+}
+
+func (kp *InterContainerProxy) waitForMessages(ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
+	//	Wait for messages
+	for msg := range ch {
+		//logger.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		go kp.handleMessage(msg, targetInterface)
+	}
+}
+
+func (kp *InterContainerProxy) dispatchResponse(msg *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.RLock()
+	defer kp.lockTransactionIdToChannelMap.RUnlock()
+	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
+		logger.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		return
+	}
+	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
+}
+
+// subscribeForResponse allows a caller to subscribe to a given topic when waiting for a response.
+// This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
+// API. There is one response channel waiting for kafka messages before dispatching the message to the
+// corresponding waiting channel
+func (kp *InterContainerProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
+	logger.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+
+	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
+	// broadcast any message for this topic to all channels waiting on it.
+	ch := make(chan *ic.InterContainerMessage)
+	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
+
+	return ch, nil
+}
+
+func (kp *InterContainerProxy) unSubscribeForResponse(trnsId string) error {
+	logger.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+	kp.deleteFromTransactionIdToChannelMap(trnsId)
+	return nil
+}
+
+func (kp *InterContainerProxy) EnableLivenessChannel(enable bool) chan bool {
+	return kp.kafkaClient.EnableLivenessChannel(enable)
+}
+
+func (kp *InterContainerProxy) EnableHealthinessChannel(enable bool) chan bool {
+	return kp.kafkaClient.EnableHealthinessChannel(enable)
+}
+
+func (kp *InterContainerProxy) SendLiveness() error {
+	return kp.kafkaClient.SendLiveness()
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeRequest(rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
+	requestHeader := &ic.Header{
+		Id:        uuid.New().String(),
+		Type:      ic.MessageType_REQUEST,
+		FromTopic: replyTopic.Name,
+		ToTopic:   toTopic.Name,
+		KeyTopic:  key,
+		Timestamp: time.Now().UnixNano(),
+	}
+	requestBody := &ic.InterContainerRequestBody{
+		Rpc:              rpc,
+		ResponseRequired: true,
+		ReplyToTopic:     replyTopic.Name,
+	}
+
+	for _, arg := range kvArgs {
+		if arg == nil {
+			// In case the caller sends an array with empty args
+			continue
+		}
+		var marshalledArg *any.Any
+		var err error
+		// ascertain the value interface type is a proto.Message
+		protoValue, ok := arg.Value.(proto.Message)
+		if !ok {
+			logger.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			err := errors.New("argument-value-not-proto-message")
+			return nil, err
+		}
+		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
+			logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
+			return nil, err
+		}
+		protoArg := &ic.Argument{
+			Key:   arg.Key,
+			Value: marshalledArg,
+		}
+		requestBody.Args = append(requestBody.Args, protoArg)
+	}
+
+	var marshalledData *any.Any
+	var err error
+	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
+		logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
+		return nil, err
+	}
+	request := &ic.InterContainerMessage{
+		Header: requestHeader,
+		Body:   marshalledData,
+	}
+	return request, nil
+}
+
+func decodeResponse(response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
+	//	Extract the message body
+	responseBody := ic.InterContainerResponseBody{}
+	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
+		logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		return nil, err
+	}
+	//logger.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+
+	return &responseBody, nil
+
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
new file mode 100644
index 0000000..9d4ab52
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
@@ -0,0 +1,1159 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Shopify/sarama"
+	scc "github.com/bsm/sarama-cluster"
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/golang/protobuf/proto"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+)
+
+type returnErrorFunction func() error
+
+// consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
+// topic, the consumer(s) broadcasts the message to all the listening channels.   The consumer can be a partition
+//consumer or a group consumer
+type consumerChannels struct {
+	consumers []interface{}
+	channels  []chan *ic.InterContainerMessage
+}
+
+// SaramaClient represents the messaging proxy
+type SaramaClient struct {
+	cAdmin                        sarama.ClusterAdmin
+	client                        sarama.Client
+	KafkaHost                     string
+	KafkaPort                     int
+	producer                      sarama.AsyncProducer
+	consumer                      sarama.Consumer
+	groupConsumers                map[string]*scc.Consumer
+	lockOfGroupConsumers          sync.RWMutex
+	consumerGroupPrefix           string
+	consumerType                  int
+	consumerGroupName             string
+	producerFlushFrequency        int
+	producerFlushMessages         int
+	producerFlushMaxmessages      int
+	producerRetryMax              int
+	producerRetryBackOff          time.Duration
+	producerReturnSuccess         bool
+	producerReturnErrors          bool
+	consumerMaxwait               int
+	maxProcessingTime             int
+	numPartitions                 int
+	numReplicas                   int
+	autoCreateTopic               bool
+	doneCh                        chan int
+	topicToConsumerChannelMap     map[string]*consumerChannels
+	lockTopicToConsumerChannelMap sync.RWMutex
+	topicLockMap                  map[string]*sync.RWMutex
+	lockOfTopicLockMap            sync.RWMutex
+	metadataMaxRetry              int
+	alive                         bool
+	liveness                      chan bool
+	livenessChannelInterval       time.Duration
+	lastLivenessTime              time.Time
+	started                       bool
+	healthy                       bool
+	healthiness                   chan bool
+}
+
+type SaramaClientOption func(*SaramaClient)
+
+func Host(host string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.KafkaHost = host
+	}
+}
+
+func Port(port int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.KafkaPort = port
+	}
+}
+
+func ConsumerGroupPrefix(prefix string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerGroupPrefix = prefix
+	}
+}
+
+func ConsumerGroupName(name string) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerGroupName = name
+	}
+}
+
+func ConsumerType(consumer int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerType = consumer
+	}
+}
+
+func ProducerFlushFrequency(frequency int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushFrequency = frequency
+	}
+}
+
+func ProducerFlushMessages(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushMessages = num
+	}
+}
+
+func ProducerFlushMaxMessages(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerFlushMaxmessages = num
+	}
+}
+
+func ProducerMaxRetries(num int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerRetryMax = num
+	}
+}
+
+func ProducerRetryBackoff(duration time.Duration) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerRetryBackOff = duration
+	}
+}
+
+func ProducerReturnOnErrors(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerReturnErrors = opt
+	}
+}
+
+func ProducerReturnOnSuccess(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.producerReturnSuccess = opt
+	}
+}
+
+func ConsumerMaxWait(wait int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.consumerMaxwait = wait
+	}
+}
+
+func MaxProcessingTime(pTime int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.maxProcessingTime = pTime
+	}
+}
+
+func NumPartitions(number int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.numPartitions = number
+	}
+}
+
+func NumReplicas(number int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.numReplicas = number
+	}
+}
+
+func AutoCreateTopic(opt bool) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.autoCreateTopic = opt
+	}
+}
+
+func MetadatMaxRetries(retry int) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.metadataMaxRetry = retry
+	}
+}
+
+func LivenessChannelInterval(opt time.Duration) SaramaClientOption {
+	return func(args *SaramaClient) {
+		args.livenessChannelInterval = opt
+	}
+}
+
+func NewSaramaClient(opts ...SaramaClientOption) *SaramaClient {
+	client := &SaramaClient{
+		KafkaHost: DefaultKafkaHost,
+		KafkaPort: DefaultKafkaPort,
+	}
+	client.consumerType = DefaultConsumerType
+	client.producerFlushFrequency = DefaultProducerFlushFrequency
+	client.producerFlushMessages = DefaultProducerFlushMessages
+	client.producerFlushMaxmessages = DefaultProducerFlushMaxmessages
+	client.producerReturnErrors = DefaultProducerReturnErrors
+	client.producerReturnSuccess = DefaultProducerReturnSuccess
+	client.producerRetryMax = DefaultProducerRetryMax
+	client.producerRetryBackOff = DefaultProducerRetryBackoff
+	client.consumerMaxwait = DefaultConsumerMaxwait
+	client.maxProcessingTime = DefaultMaxProcessingTime
+	client.numPartitions = DefaultNumberPartitions
+	client.numReplicas = DefaultNumberReplicas
+	client.autoCreateTopic = DefaultAutoCreateTopic
+	client.metadataMaxRetry = DefaultMetadataMaxRetry
+	client.livenessChannelInterval = DefaultLivenessChannelInterval
+
+	for _, option := range opts {
+		option(client)
+	}
+
+	client.groupConsumers = make(map[string]*scc.Consumer)
+
+	client.lockTopicToConsumerChannelMap = sync.RWMutex{}
+	client.topicLockMap = make(map[string]*sync.RWMutex)
+	client.lockOfTopicLockMap = sync.RWMutex{}
+	client.lockOfGroupConsumers = sync.RWMutex{}
+
+	// healthy and alive until proven otherwise
+	client.alive = true
+	client.healthy = true
+
+	return client
+}
+
+func (sc *SaramaClient) Start() error {
+	logger.Info("Starting-kafka-sarama-client")
+
+	// Create the Done channel
+	sc.doneCh = make(chan int, 1)
+
+	var err error
+
+	// Add a cleanup in case of failure to startup
+	defer func() {
+		if err != nil {
+			sc.Stop()
+		}
+	}()
+
+	// Create the Cluster Admin
+	if err = sc.createClusterAdmin(); err != nil {
+		logger.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the Publisher
+	if err := sc.createPublisher(); err != nil {
+		logger.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
+		return err
+	}
+
+	if sc.consumerType == DefaultConsumerType {
+		// Create the master consumers
+		if err := sc.createConsumer(); err != nil {
+			logger.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
+			return err
+		}
+	}
+
+	// Create the topic to consumers/channel map
+	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
+
+	logger.Info("kafka-sarama-client-started")
+
+	sc.started = true
+
+	return nil
+}
+
+func (sc *SaramaClient) Stop() {
+	logger.Info("stopping-sarama-client")
+
+	sc.started = false
+
+	//Send a message over the done channel to close all long running routines
+	sc.doneCh <- 1
+
+	if sc.producer != nil {
+		if err := sc.producer.Close(); err != nil {
+			logger.Errorw("closing-producer-failed", log.Fields{"error": err})
+		}
+	}
+
+	if sc.consumer != nil {
+		if err := sc.consumer.Close(); err != nil {
+			logger.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
+		}
+	}
+
+	for key, val := range sc.groupConsumers {
+		logger.Debugw("closing-group-consumer", log.Fields{"topic": key})
+		if err := val.Close(); err != nil {
+			logger.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+		}
+	}
+
+	if sc.cAdmin != nil {
+		if err := sc.cAdmin.Close(); err != nil {
+			logger.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
+		}
+	}
+
+	//TODO: Clear the consumers map
+	//sc.clearConsumerChannelMap()
+
+	logger.Info("sarama-client-stopped")
+}
+
+//createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
+// the invoking function must hold the lock
+func (sc *SaramaClient) createTopic(topic *Topic, numPartition int, repFactor int) error {
+	// Set the topic details
+	topicDetail := &sarama.TopicDetail{}
+	topicDetail.NumPartitions = int32(numPartition)
+	topicDetail.ReplicationFactor = int16(repFactor)
+	topicDetail.ConfigEntries = make(map[string]*string)
+	topicDetails := make(map[string]*sarama.TopicDetail)
+	topicDetails[topic.Name] = topicDetail
+
+	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
+		if err == sarama.ErrTopicAlreadyExists {
+			//	Not an error
+			logger.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
+			return nil
+		}
+		logger.Errorw("create-topic-failure", log.Fields{"error": err})
+		return err
+	}
+	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
+	// do so.
+	logger.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	return nil
+}
+
+//CreateTopic is a public API to create a topic on the Kafka Broker.  It uses a lock on a specific topic to
+// ensure no two go routines are performing operations on the same topic
+func (sc *SaramaClient) CreateTopic(topic *Topic, numPartition int, repFactor int) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	return sc.createTopic(topic, numPartition, repFactor)
+}
+
+//DeleteTopic removes a topic from the kafka Broker
+func (sc *SaramaClient) DeleteTopic(topic *Topic) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	// Remove the topic from the broker
+	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
+		if err == sarama.ErrUnknownTopicOrPartition {
+			//	Not an error as does not exist
+			logger.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
+			return nil
+		}
+		logger.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		return err
+	}
+
+	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
+	if err := sc.clearTopicFromConsumerChannelMap(*topic); err != nil {
+		logger.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+		return err
+	}
+	return nil
+}
+
+// Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
+// messages from that topic
+func (sc *SaramaClient) Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	logger.Debugw("subscribe", log.Fields{"topic": topic.Name})
+
+	// If a consumers already exist for that topic then resuse it
+	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
+		logger.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
+		// Create a channel specific for that consumers and add it to the consumers channel map
+		ch := make(chan *ic.InterContainerMessage)
+		sc.addChannelToConsumerChannelMap(topic, ch)
+		return ch, nil
+	}
+
+	// Register for the topic and set it up
+	var consumerListeningChannel chan *ic.InterContainerMessage
+	var err error
+
+	// Use the consumerType option to figure out the type of consumer to launch
+	if sc.consumerType == PartitionConsumer {
+		if sc.autoCreateTopic {
+			if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
+				logger.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+				return nil, err
+			}
+		}
+		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(topic, getOffset(kvArgs...)); err != nil {
+			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+			return nil, err
+		}
+	} else if sc.consumerType == GroupCustomer {
+		// TODO: create topic if auto create is on.  There is an issue with the sarama cluster library that
+		// does not consume from a precreated topic in some scenarios
+		//if sc.autoCreateTopic {
+		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
+		//		logger.Errorw("create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
+		//		return nil, err
+		//	}
+		//}
+		//groupId := sc.consumerGroupName
+		groupId := getGroupId(kvArgs...)
+		// Include the group prefix
+		if groupId != "" {
+			groupId = sc.consumerGroupPrefix + groupId
+		} else {
+			// Need to use a unique group Id per topic
+			groupId = sc.consumerGroupPrefix + topic.Name
+		}
+		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(topic, groupId, getOffset(kvArgs...)); err != nil {
+			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+			return nil, err
+		}
+
+	} else {
+		logger.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		return nil, errors.New("unknown-consumer-type")
+	}
+
+	return consumerListeningChannel, nil
+}
+
+//UnSubscribe unsubscribe a consumer from a given topic
+func (sc *SaramaClient) UnSubscribe(topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+	sc.lockTopic(topic)
+	defer sc.unLockTopic(topic)
+
+	logger.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	var err error
+	if err = sc.removeChannelFromConsumerChannelMap(*topic, ch); err != nil {
+		logger.Errorw("failed-removing-channel", log.Fields{"error": err})
+	}
+	if err = sc.deleteFromGroupConsumers(topic.Name); err != nil {
+		logger.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
+	}
+	return err
+}
+
+func (sc *SaramaClient) updateLiveness(alive bool) {
+	// Post a consistent stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Production of liveness
+	// events to the channel is rate-limited by livenessChannelInterval.
+	if sc.liveness != nil {
+		if sc.alive != alive {
+			logger.Info("update-liveness-channel-because-change")
+			sc.liveness <- alive
+			sc.lastLivenessTime = time.Now()
+		} else if time.Now().Sub(sc.lastLivenessTime) > sc.livenessChannelInterval {
+			logger.Info("update-liveness-channel-because-interval")
+			sc.liveness <- alive
+			sc.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Only emit a log message when the state changes
+	if sc.alive != alive {
+		logger.Info("set-client-alive", log.Fields{"alive": alive})
+		sc.alive = alive
+	}
+}
+
+// Once unhealthy, we never go back
+func (sc *SaramaClient) setUnhealthy() {
+	sc.healthy = false
+	if sc.healthiness != nil {
+		logger.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		sc.healthiness <- sc.healthy
+	}
+}
+
+func (sc *SaramaClient) isLivenessError(err error) bool {
+	// Sarama producers and consumers encapsulate the error inside
+	// a ProducerError or ConsumerError struct.
+	if prodError, ok := err.(*sarama.ProducerError); ok {
+		err = prodError.Err
+	} else if consumerError, ok := err.(*sarama.ConsumerError); ok {
+		err = consumerError.Err
+	}
+
+	// Sarama-Cluster will compose the error into a ClusterError struct,
+	// which we can't do a compare by reference. To handle that, we the
+	// best we can do is compare the error strings.
+
+	switch err.Error() {
+	case context.DeadlineExceeded.Error():
+		logger.Info("is-liveness-error-timeout")
+		return true
+	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
+		logger.Info("is-liveness-error-no-brokers")
+		return true
+	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
+		logger.Info("is-liveness-error-shutting-down")
+		return true
+	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
+		logger.Info("is-liveness-error-not-available")
+		return true
+	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
+		logger.Info("is-liveness-error-circuit-breaker-open")
+		return true
+	}
+
+	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
+		logger.Info("is-liveness-error-connection-refused")
+		return true
+	}
+
+	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
+		logger.Info("is-liveness-error-io-timeout")
+		return true
+	}
+
+	// Other errors shouldn't trigger a loss of liveness
+
+	logger.Infow("is-liveness-error-ignored", log.Fields{"err": err})
+
+	return false
+}
+
+// send formats and sends the request onto the kafka messaging bus.
+func (sc *SaramaClient) Send(msg interface{}, topic *Topic, keys ...string) error {
+
+	// Assert message is a proto message
+	var protoMsg proto.Message
+	var ok bool
+	// ascertain the value interface type is a proto.Message
+	if protoMsg, ok = msg.(proto.Message); !ok {
+		logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
+		return errors.New(fmt.Sprintf("not-a-proto-msg-%s", msg))
+	}
+
+	var marshalled []byte
+	var err error
+	//	Create the Sarama producer message
+	if marshalled, err = proto.Marshal(protoMsg); err != nil {
+		logger.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		return err
+	}
+	key := ""
+	if len(keys) > 0 {
+		key = keys[0] // Only the first key is relevant
+	}
+	kafkaMsg := &sarama.ProducerMessage{
+		Topic: topic.Name,
+		Key:   sarama.StringEncoder(key),
+		Value: sarama.ByteEncoder(marshalled),
+	}
+
+	// Send message to kafka
+	sc.producer.Input() <- kafkaMsg
+	// Wait for result
+	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
+	select {
+	case ok := <-sc.producer.Successes():
+		logger.Debugw("message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(true)
+	case notOk := <-sc.producer.Errors():
+		logger.Debugw("error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(notOk) {
+			sc.updateLiveness(false)
+		}
+		return notOk
+	}
+	return nil
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every publish, which indicates whether
+// or not the channel is still live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (sc *SaramaClient) EnableLivenessChannel(enable bool) chan bool {
+	logger.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
+	if enable {
+		if sc.liveness == nil {
+			logger.Info("kafka-create-liveness-channel")
+			// At least 1, so we can immediately post to it without blocking
+			// Setting a bigger number (10) allows the monitor to fall behind
+			// without blocking others. The monitor shouldn't really fall
+			// behind...
+			sc.liveness = make(chan bool, 10)
+			// post intial state to the channel
+			sc.liveness <- sc.alive
+		}
+	} else {
+		// TODO: Think about whether we need the ability to turn off
+		// liveness monitoring
+		panic("Turning off liveness reporting is not supported")
+	}
+	return sc.liveness
+}
+
+// Enable the Healthiness monitor channel. This channel will report "false"
+// if the kafka consumers die, or some other problem occurs which is
+// catastrophic that would require re-creating the client.
+func (sc *SaramaClient) EnableHealthinessChannel(enable bool) chan bool {
+	logger.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+	if enable {
+		if sc.healthiness == nil {
+			logger.Info("kafka-create-healthiness-channel")
+			// At least 1, so we can immediately post to it without blocking
+			// Setting a bigger number (10) allows the monitor to fall behind
+			// without blocking others. The monitor shouldn't really fall
+			// behind...
+			sc.healthiness = make(chan bool, 10)
+			// post intial state to the channel
+			sc.healthiness <- sc.healthy
+		}
+	} else {
+		// TODO: Think about whether we need the ability to turn off
+		// liveness monitoring
+		panic("Turning off healthiness reporting is not supported")
+	}
+	return sc.healthiness
+}
+
+// send an empty message on the liveness channel to check whether connectivity has
+// been restored.
+func (sc *SaramaClient) SendLiveness() error {
+	if !sc.started {
+		return fmt.Errorf("SendLiveness() called while not started")
+	}
+
+	kafkaMsg := &sarama.ProducerMessage{
+		Topic: "_liveness_test",
+		Value: sarama.StringEncoder(time.Now().Format(time.RFC3339)), // for debugging / informative use
+	}
+
+	// Send message to kafka
+	sc.producer.Input() <- kafkaMsg
+	// Wait for result
+	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
+	select {
+	case ok := <-sc.producer.Successes():
+		logger.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(true)
+	case notOk := <-sc.producer.Errors():
+		logger.Debugw("liveness-error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(notOk) {
+			sc.updateLiveness(false)
+		}
+		return notOk
+	}
+	return nil
+}
+
+// getGroupId returns the group id from the key-value args.
+func getGroupId(kvArgs ...*KVArg) string {
+	for _, arg := range kvArgs {
+		if arg.Key == GroupIdKey {
+			return arg.Value.(string)
+		}
+	}
+	return ""
+}
+
+// getOffset returns the offset from the key-value args.
+func getOffset(kvArgs ...*KVArg) int64 {
+	for _, arg := range kvArgs {
+		if arg.Key == Offset {
+			return arg.Value.(int64)
+		}
+	}
+	return sarama.OffsetNewest
+}
+
+func (sc *SaramaClient) createClusterAdmin() error {
+	kafkaFullAddr := fmt.Sprintf("%s:%d", sc.KafkaHost, sc.KafkaPort)
+	config := sarama.NewConfig()
+	config.Version = sarama.V1_0_0_0
+
+	// Create a cluster Admin
+	var cAdmin sarama.ClusterAdmin
+	var err error
+	if cAdmin, err = sarama.NewClusterAdmin([]string{kafkaFullAddr}, config); err != nil {
+		logger.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": kafkaFullAddr})
+		return err
+	}
+	sc.cAdmin = cAdmin
+	return nil
+}
+
+func (sc *SaramaClient) lockTopic(topic *Topic) {
+	sc.lockOfTopicLockMap.Lock()
+	if _, exist := sc.topicLockMap[topic.Name]; exist {
+		sc.lockOfTopicLockMap.Unlock()
+		sc.topicLockMap[topic.Name].Lock()
+	} else {
+		sc.topicLockMap[topic.Name] = &sync.RWMutex{}
+		sc.lockOfTopicLockMap.Unlock()
+		sc.topicLockMap[topic.Name].Lock()
+	}
+}
+
+func (sc *SaramaClient) unLockTopic(topic *Topic) {
+	sc.lockOfTopicLockMap.Lock()
+	defer sc.lockOfTopicLockMap.Unlock()
+	if _, exist := sc.topicLockMap[topic.Name]; exist {
+		sc.topicLockMap[topic.Name].Unlock()
+	}
+}
+
+func (sc *SaramaClient) addTopicToConsumerChannelMap(id string, arg *consumerChannels) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if _, exist := sc.topicToConsumerChannelMap[id]; !exist {
+		sc.topicToConsumerChannelMap[id] = arg
+	}
+}
+
+func (sc *SaramaClient) deleteFromTopicToConsumerChannelMap(id string) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if _, exist := sc.topicToConsumerChannelMap[id]; exist {
+		delete(sc.topicToConsumerChannelMap, id)
+	}
+}
+
+func (sc *SaramaClient) getConsumerChannel(topic *Topic) *consumerChannels {
+	sc.lockTopicToConsumerChannelMap.RLock()
+	defer sc.lockTopicToConsumerChannelMap.RUnlock()
+
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		return consumerCh
+	}
+	return nil
+}
+
+func (sc *SaramaClient) addChannelToConsumerChannelMap(topic *Topic, ch chan *ic.InterContainerMessage) {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		consumerCh.channels = append(consumerCh.channels, ch)
+		return
+	}
+	logger.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+}
+
+//closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
+func closeConsumers(consumers []interface{}) error {
+	var err error
+	for _, consumer := range consumers {
+		//	Is it a partition consumers?
+		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
+			if errTemp := partionConsumer.Close(); errTemp != nil {
+				logger.Debugw("partition!!!", log.Fields{"err": errTemp})
+				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
+					// This can occur on race condition
+					err = nil
+				} else {
+					err = errTemp
+				}
+			}
+		} else if groupConsumer, ok := consumer.(*scc.Consumer); ok {
+			if errTemp := groupConsumer.Close(); errTemp != nil {
+				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
+					// This can occur on race condition
+					err = nil
+				} else {
+					err = errTemp
+				}
+			}
+		}
+	}
+	return err
+}
+
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(topic Topic, ch <-chan *ic.InterContainerMessage) error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		// Channel will be closed in the removeChannel method
+		consumerCh.channels = removeChannel(consumerCh.channels, ch)
+		// If there are no more channels then we can close the consumers itself
+		if len(consumerCh.channels) == 0 {
+			logger.Debugw("closing-consumers", log.Fields{"topic": topic})
+			err := closeConsumers(consumerCh.consumers)
+			//err := consumerCh.consumers.Close()
+			delete(sc.topicToConsumerChannelMap, topic.Name)
+			return err
+		}
+		return nil
+	}
+	logger.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	return errors.New("topic-does-not-exist")
+}
+
+func (sc *SaramaClient) clearTopicFromConsumerChannelMap(topic Topic) error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
+		for _, ch := range consumerCh.channels {
+			// Channel will be closed in the removeChannel method
+			removeChannel(consumerCh.channels, ch)
+		}
+		err := closeConsumers(consumerCh.consumers)
+		//if err == sarama.ErrUnknownTopicOrPartition {
+		//	// Not an error
+		//	err = nil
+		//}
+		//err := consumerCh.consumers.Close()
+		delete(sc.topicToConsumerChannelMap, topic.Name)
+		return err
+	}
+	logger.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	return nil
+}
+
+func (sc *SaramaClient) clearConsumerChannelMap() error {
+	sc.lockTopicToConsumerChannelMap.Lock()
+	defer sc.lockTopicToConsumerChannelMap.Unlock()
+	var err error
+	for topic, consumerCh := range sc.topicToConsumerChannelMap {
+		for _, ch := range consumerCh.channels {
+			// Channel will be closed in the removeChannel method
+			removeChannel(consumerCh.channels, ch)
+		}
+		if errTemp := closeConsumers(consumerCh.consumers); errTemp != nil {
+			err = errTemp
+		}
+		//err = consumerCh.consumers.Close()
+		delete(sc.topicToConsumerChannelMap, topic)
+	}
+	return err
+}
+
+//createPublisher creates the publisher which is used to send a message onto kafka
+func (sc *SaramaClient) createPublisher() error {
+	// This Creates the publisher
+	config := sarama.NewConfig()
+	config.Producer.Partitioner = sarama.NewRandomPartitioner
+	config.Producer.Flush.Frequency = time.Duration(sc.producerFlushFrequency)
+	config.Producer.Flush.Messages = sc.producerFlushMessages
+	config.Producer.Flush.MaxMessages = sc.producerFlushMaxmessages
+	config.Producer.Return.Errors = sc.producerReturnErrors
+	config.Producer.Return.Successes = sc.producerReturnSuccess
+	//config.Producer.RequiredAcks = sarama.WaitForAll
+	config.Producer.RequiredAcks = sarama.WaitForLocal
+
+	kafkaFullAddr := fmt.Sprintf("%s:%d", sc.KafkaHost, sc.KafkaPort)
+	brokers := []string{kafkaFullAddr}
+
+	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
+		logger.Errorw("error-starting-publisher", log.Fields{"error": err})
+		return err
+	} else {
+		sc.producer = producer
+	}
+	logger.Info("Kafka-publisher-created")
+	return nil
+}
+
+func (sc *SaramaClient) createConsumer() error {
+	config := sarama.NewConfig()
+	config.Consumer.Return.Errors = true
+	config.Consumer.Fetch.Min = 1
+	config.Consumer.MaxWaitTime = time.Duration(sc.consumerMaxwait) * time.Millisecond
+	config.Consumer.MaxProcessingTime = time.Duration(sc.maxProcessingTime) * time.Millisecond
+	config.Consumer.Offsets.Initial = sarama.OffsetNewest
+	config.Metadata.Retry.Max = sc.metadataMaxRetry
+	kafkaFullAddr := fmt.Sprintf("%s:%d", sc.KafkaHost, sc.KafkaPort)
+	brokers := []string{kafkaFullAddr}
+
+	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
+		logger.Errorw("error-starting-consumers", log.Fields{"error": err})
+		return err
+	} else {
+		sc.consumer = consumer
+	}
+	logger.Info("Kafka-consumers-created")
+	return nil
+}
+
+// createGroupConsumer creates a consumers group
+func (sc *SaramaClient) createGroupConsumer(topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
+	config := scc.NewConfig()
+	config.ClientID = uuid.New().String()
+	config.Group.Mode = scc.ConsumerModeMultiplex
+	config.Consumer.Group.Heartbeat.Interval, _ = time.ParseDuration("1s")
+	config.Consumer.Return.Errors = true
+	//config.Group.Return.Notifications = false
+	//config.Consumer.MaxWaitTime = time.Duration(DefaultConsumerMaxwait) * time.Millisecond
+	//config.Consumer.MaxProcessingTime = time.Duration(DefaultMaxProcessingTime) * time.Millisecond
+	config.Consumer.Offsets.Initial = initialOffset
+	//config.Consumer.Offsets.Initial = sarama.OffsetOldest
+	kafkaFullAddr := fmt.Sprintf("%s:%d", sc.KafkaHost, sc.KafkaPort)
+	brokers := []string{kafkaFullAddr}
+
+	topics := []string{topic.Name}
+	var consumer *scc.Consumer
+	var err error
+
+	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
+		logger.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		return nil, err
+	}
+	logger.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+
+	//sc.groupConsumers[topic.Name] = consumer
+	sc.addToGroupConsumers(topic.Name, consumer)
+	return consumer, nil
+}
+
+// dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
+// topic via the unique channel each subscriber received during subscription
+func (sc *SaramaClient) dispatchToConsumers(consumerCh *consumerChannels, protoMessage *ic.InterContainerMessage) {
+	// Need to go over all channels and publish messages to them - do we need to copy msg?
+	sc.lockTopicToConsumerChannelMap.RLock()
+	defer sc.lockTopicToConsumerChannelMap.RUnlock()
+	for _, ch := range consumerCh.channels {
+		go func(c chan *ic.InterContainerMessage) {
+			c <- protoMessage
+		}(ch)
+	}
+}
+
+func (sc *SaramaClient) consumeFromAPartition(topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
+	logger.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+startloop:
+	for {
+		select {
+		case err, ok := <-consumer.Errors():
+			if ok {
+				if sc.isLivenessError(err) {
+					sc.updateLiveness(false)
+					logger.Warnw("partition-consumers-error", log.Fields{"error": err})
+				}
+			} else {
+				// Channel is closed
+				break startloop
+			}
+		case msg, ok := <-consumer.Messages():
+			//logger.Debugw("message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			if !ok {
+				// channel is closed
+				break startloop
+			}
+			msgBody := msg.Value
+			sc.updateLiveness(true)
+			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			icm := &ic.InterContainerMessage{}
+			if err := proto.Unmarshal(msgBody, icm); err != nil {
+				logger.Warnw("partition-invalid-message", log.Fields{"error": err})
+				continue
+			}
+			go sc.dispatchToConsumers(consumerChnls, icm)
+		case <-sc.doneCh:
+			logger.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			break startloop
+		}
+	}
+	logger.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy()
+}
+
+func (sc *SaramaClient) consumeGroupMessages(topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+	logger.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+
+startloop:
+	for {
+		select {
+		case err, ok := <-consumer.Errors():
+			if ok {
+				if sc.isLivenessError(err) {
+					sc.updateLiveness(false)
+				}
+				logger.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+			} else {
+				logger.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				// channel is closed
+				break startloop
+			}
+		case msg, ok := <-consumer.Messages():
+			if !ok {
+				logger.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				// Channel closed
+				break startloop
+			}
+			sc.updateLiveness(true)
+			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			msgBody := msg.Value
+			icm := &ic.InterContainerMessage{}
+			if err := proto.Unmarshal(msgBody, icm); err != nil {
+				logger.Warnw("invalid-message", log.Fields{"error": err})
+				continue
+			}
+			go sc.dispatchToConsumers(consumerChnls, icm)
+			consumer.MarkOffset(msg, "")
+		case ntf := <-consumer.Notifications():
+			logger.Debugw("group-received-notification", log.Fields{"notification": ntf})
+		case <-sc.doneCh:
+			logger.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
+			break startloop
+		}
+	}
+	logger.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy()
+}
+
+func (sc *SaramaClient) startConsumers(topic *Topic) error {
+	logger.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
+	var consumerCh *consumerChannels
+	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
+		logger.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
+		return errors.New("consumers-not-exist")
+	}
+	// For each consumer listening for that topic, start a consumption loop
+	for _, consumer := range consumerCh.consumers {
+		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
+			go sc.consumeFromAPartition(topic, pConsumer, consumerCh)
+		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
+			go sc.consumeGroupMessages(topic, gConsumer, consumerCh)
+		} else {
+			logger.Errorw("invalid-consumer", log.Fields{"topic": topic})
+			return errors.New("invalid-consumer")
+		}
+	}
+	return nil
+}
+
+//// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
+//// for that topic.  It also starts the routine that listens for messages on that topic.
+func (sc *SaramaClient) setupPartitionConsumerChannel(topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+	var pConsumers []sarama.PartitionConsumer
+	var err error
+
+	if pConsumers, err = sc.createPartitionConsumers(topic, initialOffset); err != nil {
+		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+
+	consumersIf := make([]interface{}, 0)
+	for _, pConsumer := range pConsumers {
+		consumersIf = append(consumersIf, pConsumer)
+	}
+
+	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
+	// unbuffered to verify race conditions.
+	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	cc := &consumerChannels{
+		consumers: consumersIf,
+		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+	}
+
+	// Add the consumers channel to the map
+	sc.addTopicToConsumerChannelMap(topic.Name, cc)
+
+	//Start a consumers to listen on that specific topic
+	go sc.startConsumers(topic)
+
+	return consumerListeningChannel, nil
+}
+
+// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
+// for that topic.  It also starts the routine that listens for messages on that topic.
+func (sc *SaramaClient) setupGroupConsumerChannel(topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+	// TODO:  Replace this development partition consumers with a group consumers
+	var pConsumer *scc.Consumer
+	var err error
+	if pConsumer, err = sc.createGroupConsumer(topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
+		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
+	// unbuffered to verify race conditions.
+	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	cc := &consumerChannels{
+		consumers: []interface{}{pConsumer},
+		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+	}
+
+	// Add the consumers channel to the map
+	sc.addTopicToConsumerChannelMap(topic.Name, cc)
+
+	//Start a consumers to listen on that specific topic
+	go sc.startConsumers(topic)
+
+	return consumerListeningChannel, nil
+}
+
+func (sc *SaramaClient) createPartitionConsumers(topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
+	logger.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
+	partitionList, err := sc.consumer.Partitions(topic.Name)
+	if err != nil {
+		logger.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		return nil, err
+	}
+
+	pConsumers := make([]sarama.PartitionConsumer, 0)
+	for _, partition := range partitionList {
+		var pConsumer sarama.PartitionConsumer
+		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
+			logger.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			return nil, err
+		}
+		pConsumers = append(pConsumers, pConsumer)
+	}
+	return pConsumers, nil
+}
+
+func removeChannel(channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+	var i int
+	var channel chan *ic.InterContainerMessage
+	for i, channel = range channels {
+		if channel == ch {
+			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
+			close(channel)
+			logger.Debug("channel-closed")
+			return channels[:len(channels)-1]
+		}
+	}
+	return channels
+}
+
+func (sc *SaramaClient) addToGroupConsumers(topic string, consumer *scc.Consumer) {
+	sc.lockOfGroupConsumers.Lock()
+	defer sc.lockOfGroupConsumers.Unlock()
+	if _, exist := sc.groupConsumers[topic]; !exist {
+		sc.groupConsumers[topic] = consumer
+	}
+}
+
+func (sc *SaramaClient) deleteFromGroupConsumers(topic string) error {
+	sc.lockOfGroupConsumers.Lock()
+	defer sc.lockOfGroupConsumers.Unlock()
+	if _, exist := sc.groupConsumers[topic]; exist {
+		consumer := sc.groupConsumers[topic]
+		delete(sc.groupConsumers, topic)
+		if err := consumer.Close(); err != nil {
+			logger.Errorw("failure-closing-consumer", log.Fields{"error": err})
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
new file mode 100644
index 0000000..0cb9535
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import "strings"
+
+const (
+	TopicSeparator = "_"
+	DeviceIdLength = 24
+)
+
+// A Topic definition - may be augmented with additional attributes eventually
+type Topic struct {
+	// The name of the topic. It must start with a letter,
+	// and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+	// underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+	// signs (`%`).
+	Name string
+}
+
+type KVArg struct {
+	Key   string
+	Value interface{}
+}
+
+// TODO:  Remove and provide better may to get the device id
+// GetDeviceIdFromTopic extract the deviceId from the topic name.  The topic name is formatted either as:
+//			<any string> or <any string>_<deviceId>.  The device Id is 24 characters long.
+func GetDeviceIdFromTopic(topic Topic) string {
+	pos := strings.LastIndex(topic.Name, TopicSeparator)
+	if pos == -1 {
+		return ""
+	}
+	adjustedPos := pos + len(TopicSeparator)
+	if adjustedPos >= len(topic.Name) {
+		return ""
+	}
+	deviceId := topic.Name[adjustedPos:len(topic.Name)]
+	if len(deviceId) != DeviceIdLength {
+		return ""
+	}
+	return deviceId
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
new file mode 100644
index 0000000..43567e3
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
@@ -0,0 +1,786 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+//1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+//2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+//3. Dynamic log level change - for a given package (SetPackageLogLevel)
+//4. Provides a default logger for unregistered packages
+//5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+//6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach).  In the examples below, log refers to this log package.
+// 1.  In the appropriate package add the following in the init section of the package.  The log level can be changed
+// and any number of default fields can be added as well. The log level specifies the lowest log level that will be
+// in the output while the fields will be automatically added to all log printouts.
+//
+//	log.AddPackage(mylog.JSON, log.WarnLevel, log.Fields{"anyFieldName": "any value"})
+//
+//2. In the calling package, just invoke any of the publicly available functions of the logger.  Here is an  example
+// to write an Info log with additional fields:
+//
+//log.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+//3. To dynamically change the log level, you can use 1)SetLogLevel from inside your package or 2) SetPackageLogLevel
+// from anywhere or 3)  SetAllLogLevel from anywhere.
+//
+
+package log
+
+import (
+	"errors"
+	"fmt"
+	zp "go.uber.org/zap"
+	zc "go.uber.org/zap/zapcore"
+	"path"
+	"runtime"
+	"strings"
+)
+
+const (
+	// DebugLevel logs a message at debug level
+	DebugLevel = iota
+	// InfoLevel logs a message at info level
+	InfoLevel
+	// WarnLevel logs a message at warning level
+	WarnLevel
+	// ErrorLevel logs a message at error level
+	ErrorLevel
+	// FatalLevel logs a message, then calls os.Exit(1).
+	FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Logger represents an abstract logging interface.  Any logging implementation used
+// will need to abide by this interface
+type Logger interface {
+	Debug(...interface{})
+	Debugln(...interface{})
+	Debugf(string, ...interface{})
+	Debugw(string, Fields)
+
+	Info(...interface{})
+	Infoln(...interface{})
+	Infof(string, ...interface{})
+	Infow(string, Fields)
+
+	Warn(...interface{})
+	Warnln(...interface{})
+	Warnf(string, ...interface{})
+	Warnw(string, Fields)
+
+	Error(...interface{})
+	Errorln(...interface{})
+	Errorf(string, ...interface{})
+	Errorw(string, Fields)
+
+	Fatal(...interface{})
+	Fatalln(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalw(string, Fields)
+
+	With(Fields) Logger
+
+	// The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+	//
+	Warning(...interface{})
+	Warningln(...interface{})
+	Warningf(string, ...interface{})
+
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l int) bool
+
+	//Returns the log level of this specific logger
+	GetLogLevel() int
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *logger
+var cfg zp.Config
+
+var loggers map[string]*logger
+var cfgs map[string]zp.Config
+
+type logger struct {
+	log         *zp.SugaredLogger
+	parent      *zp.Logger
+	packageName string
+}
+
+func intToAtomicLevel(l int) zp.AtomicLevel {
+	switch l {
+	case DebugLevel:
+		return zp.NewAtomicLevelAt(zc.DebugLevel)
+	case InfoLevel:
+		return zp.NewAtomicLevelAt(zc.InfoLevel)
+	case WarnLevel:
+		return zp.NewAtomicLevelAt(zc.WarnLevel)
+	case ErrorLevel:
+		return zp.NewAtomicLevelAt(zc.ErrorLevel)
+	case FatalLevel:
+		return zp.NewAtomicLevelAt(zc.FatalLevel)
+	}
+	return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func intToLevel(l int) zc.Level {
+	switch l {
+	case DebugLevel:
+		return zc.DebugLevel
+	case InfoLevel:
+		return zc.InfoLevel
+	case WarnLevel:
+		return zc.WarnLevel
+	case ErrorLevel:
+		return zc.ErrorLevel
+	case FatalLevel:
+		return zc.FatalLevel
+	}
+	return zc.ErrorLevel
+}
+
+func levelToInt(l zc.Level) int {
+	switch l {
+	case zc.DebugLevel:
+		return DebugLevel
+	case zc.InfoLevel:
+		return InfoLevel
+	case zc.WarnLevel:
+		return WarnLevel
+	case zc.ErrorLevel:
+		return ErrorLevel
+	case zc.FatalLevel:
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func StringToInt(l string) int {
+	switch l {
+	case "DEBUG":
+		return DebugLevel
+	case "INFO":
+		return InfoLevel
+	case "WARN":
+		return WarnLevel
+	case "ERROR":
+		return ErrorLevel
+	case "FATAL":
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func getDefaultConfig(outputType string, level int, defaultFields Fields) zp.Config {
+	return zp.Config{
+		Level:            intToAtomicLevel(level),
+		Encoding:         outputType,
+		Development:      true,
+		OutputPaths:      []string{"stdout"},
+		ErrorOutputPaths: []string{"stderr"},
+		InitialFields:    defaultFields,
+		EncoderConfig: zc.EncoderConfig{
+			LevelKey:       "level",
+			MessageKey:     "msg",
+			TimeKey:        "ts",
+			StacktraceKey:  "stacktrace",
+			LineEnding:     zc.DefaultLineEnding,
+			EncodeLevel:    zc.LowercaseLevelEncoder,
+			EncodeTime:     zc.ISO8601TimeEncoder,
+			EncodeDuration: zc.SecondsDurationEncoder,
+			EncodeCaller:   zc.ShortCallerEncoder,
+		},
+	}
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked.  This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level int, defaultFields Fields) (Logger, error) {
+	// Build a custom config using zap
+	cfg = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfg.Build()
+	if err != nil {
+		return nil, err
+	}
+
+	defaultLogger = &logger{
+		log:    l.Sugar(),
+		parent: l,
+	}
+
+	return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map.  Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger.  If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func AddPackage(outputType string, level int, defaultFields Fields, pkgNames ...string) (Logger, error) {
+	if cfgs == nil {
+		cfgs = make(map[string]zp.Config)
+	}
+	if loggers == nil {
+		loggers = make(map[string]*logger)
+	}
+
+	var pkgName string
+	for _, name := range pkgNames {
+		pkgName = name
+		break
+	}
+	if pkgName == "" {
+		pkgName, _, _, _ = getCallerInfo()
+	}
+
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName], nil
+	}
+
+	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfgs[pkgName].Build()
+	if err != nil {
+		return nil, err
+	}
+
+	loggers[pkgName] = &logger{
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
+	}
+	return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+	for pkgName, cfg := range cfgs {
+		for k, v := range defaultFields {
+			if cfg.InitialFields == nil {
+				cfg.InitialFields = make(map[string]interface{})
+			}
+			cfg.InitialFields[k] = v
+		}
+		l, err := cfg.Build()
+		if err != nil {
+			return err
+		}
+
+		loggers[pkgName] = &logger{
+			log:         l.Sugar(),
+			parent:      l,
+			packageName: pkgName,
+		}
+	}
+	return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+	i := 0
+	keys := make([]string, len(loggers))
+	for k := range loggers {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// UpdateLogger deletes the logger associated with a caller's package and creates a new logger with the
+// defaultFields.  If a calling package is holding on to a Logger reference obtained from AddPackage invocation, then
+// that package needs to invoke UpdateLogger if it needs to make changes to the default fields and obtain a new logger
+// reference
+func UpdateLogger(defaultFields Fields) (Logger, error) {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; !exist {
+		return nil, errors.New(fmt.Sprintf("package-%s-not-registered", pkgName))
+	}
+
+	// Build a new logger
+	if _, exist := cfgs[pkgName]; !exist {
+		return nil, errors.New(fmt.Sprintf("config-%s-not-registered", pkgName))
+	}
+
+	cfg := cfgs[pkgName]
+	for k, v := range defaultFields {
+		if cfg.InitialFields == nil {
+			cfg.InitialFields = make(map[string]interface{})
+		}
+		cfg.InitialFields[k] = v
+	}
+	l, err := cfg.Build()
+	if err != nil {
+		return nil, err
+	}
+
+	// Set the logger
+	loggers[pkgName] = &logger{
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
+	}
+	return loggers[pkgName], nil
+}
+
+func setLevel(cfg zp.Config, level int) {
+	switch level {
+	case DebugLevel:
+		cfg.Level.SetLevel(zc.DebugLevel)
+	case InfoLevel:
+		cfg.Level.SetLevel(zc.InfoLevel)
+	case WarnLevel:
+		cfg.Level.SetLevel(zc.WarnLevel)
+	case ErrorLevel:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	case FatalLevel:
+		cfg.Level.SetLevel(zc.FatalLevel)
+	default:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	}
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level.  This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level int) {
+	// Get proper config
+	if cfg, ok := cfgs[packageName]; ok {
+		setLevel(cfg, level)
+	}
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level int) {
+	// Get proper config
+	for _, cfg := range cfgs {
+		setLevel(cfg, level)
+	}
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (int, error) {
+	var name string
+	if len(packageName) == 1 {
+		name = packageName[0]
+	} else {
+		name, _, _, _ = getCallerInfo()
+	}
+	if cfg, ok := cfgs[name]; ok {
+		return levelToInt(cfg.Level.Level()), nil
+	}
+	return 0, errors.New(fmt.Sprintf("unknown-package-%s", name))
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() int {
+	return levelToInt(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level int) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := cfgs[pkgName]; !exist {
+		return errors.New(fmt.Sprintf("unregistered-package-%s", pkgName))
+	}
+	cfg := cfgs[pkgName]
+	setLevel(cfg, level)
+	return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level int) {
+	setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+	for _, logger := range loggers {
+		if logger != nil {
+			if logger.parent != nil {
+				if err := logger.parent.Sync(); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	if defaultLogger != nil {
+		if defaultLogger.parent != nil {
+			if err := defaultLogger.parent.Sync(); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+	// Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+	// filename, then first look for the last log.go filename and then grab the caller info one level higher.
+	maxLevel := 3
+	skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+	pc := make([]uintptr, maxLevel)
+	n := runtime.Callers(skiplevel, pc)
+	packageName := ""
+	funcName := ""
+	fileName := ""
+	var line int
+	if n == 0 {
+		return packageName, fileName, funcName, line
+	}
+	frames := runtime.CallersFrames(pc[:n])
+	var frame runtime.Frame
+	var foundFrame runtime.Frame
+	more := true
+	for more {
+		frame, more = frames.Next()
+		_, fileName = path.Split(frame.File)
+		if fileName != "log.go" {
+			foundFrame = frame // First frame after log.go in the frame stack
+			break
+		}
+	}
+	parts := strings.Split(foundFrame.Function, ".")
+	pl := len(parts)
+	if pl >= 2 {
+		funcName = parts[pl-1]
+		if parts[pl-2][0] == '(' {
+			packageName = strings.Join(parts[0:pl-2], ".")
+		} else {
+			packageName = strings.Join(parts[0:pl-1], ".")
+		}
+	}
+
+	if strings.HasSuffix(packageName, ".init") {
+		packageName = strings.TrimSuffix(packageName, ".init")
+	}
+
+	if strings.HasSuffix(fileName, ".go") {
+		fileName = strings.TrimSuffix(fileName, ".go")
+	}
+
+	return packageName, fileName, funcName, foundFrame.Line
+}
+
+func getPackageLevelSugaredLogger() *zp.SugaredLogger {
+	pkgName, fileName, funcName, line := getCallerInfo()
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName].log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+	}
+	return defaultLogger.log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+}
+
+func getPackageLevelLogger() Logger {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName]
+	}
+	return defaultLogger
+}
+
+func serializeMap(fields Fields) []interface{} {
+	data := make([]interface{}, len(fields)*2)
+	i := 0
+	for k, v := range fields {
+		data[i] = k
+		data[i+1] = v
+		i = i + 2
+	}
+	return data
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l logger) With(keysAndValues Fields) Logger {
+	return logger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+	l.log.Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l logger) Debugln(args ...interface{}) {
+	l.log.Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+	l.log.Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Debugw(msg string, keysAndValues Fields) {
+	l.log.Debugw(msg, serializeMap(keysAndValues)...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+	l.log.Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l logger) Infoln(args ...interface{}) {
+	l.log.Info(args...)
+	//msg := fmt.Sprintln(args...)
+	//l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+	l.log.Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Infow(msg string, keysAndValues Fields) {
+	l.log.Infow(msg, serializeMap(keysAndValues)...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l logger) Warnln(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+	l.log.Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Warnw(msg string, keysAndValues Fields) {
+	l.log.Warnw(msg, serializeMap(keysAndValues)...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+	l.log.Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l logger) Errorln(args ...interface{}) {
+	l.log.Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+	l.log.Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Errorw(msg string, keysAndValues Fields) {
+	l.log.Errorw(msg, serializeMap(keysAndValues)...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+	l.log.Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l logger) Fatalln(args ...interface{}) {
+	l.log.Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+	l.log.Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Fatalw(msg string, keysAndValues Fields) {
+	l.log.Fatalw(msg, serializeMap(keysAndValues)...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l logger) Warning(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l logger) Warningln(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l logger) Warningf(format string, args ...interface{}) {
+	l.log.Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l logger) V(level int) bool {
+	return l.parent.Core().Enabled(intToLevel(level))
+}
+
+// GetLogLevel returns the current level of the logger
+func (l logger) GetLogLevel() int {
+	return levelToInt(cfgs[l.packageName].Level.Level())
+}
+
+// With returns a logger initialized with the key-value pairs
+func With(keysAndValues Fields) Logger {
+	return logger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	getPackageLevelSugaredLogger().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Debug(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Debugw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Debugw(msg, serializeMap(keysAndValues)...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	getPackageLevelSugaredLogger().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Info(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Infof(format, args...)
+}
+
+//Infow logs a message with some additional context. The variadic key-value
+//pairs are treated as they are in With.
+func Infow(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Infow(msg, serializeMap(keysAndValues)...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Warnw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Warnw(msg, serializeMap(keysAndValues)...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	getPackageLevelSugaredLogger().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Errorw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Errorw(msg, serializeMap(keysAndValues)...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	getPackageLevelSugaredLogger().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Fatalw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Fatalw(msg, serializeMap(keysAndValues)...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func V(level int) bool {
+	return getPackageLevelLogger().V(level)
+}
+
+//GetLogLevel returns the log level of the invoking package
+func GetLogLevel() int {
+	return getPackageLevelLogger().GetLogLevel()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics/performance_metrics.go
new file mode 100644
index 0000000..b5d9369
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics/performance_metrics.go
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package pmmetrics
+
+import (
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+// PmMetrics structure holds metric and device info
+type PmMetrics struct {
+	deviceID          string
+	frequency         uint32
+	grouped           bool
+	frequencyOverride bool
+	metrics           map[string]*voltha.PmConfig
+}
+
+type PmMetricsOption func(*PmMetrics)
+
+// Frequency is to poll stats at this interval
+func Frequency(frequency uint32) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequency = frequency
+	}
+}
+
+// GetSubscriberMetrics will return the metrics subscribed for the device.
+func (pm *PmMetrics) GetSubscriberMetrics() map[string]*voltha.PmConfig {
+	if pm == nil {
+		return nil
+	}
+	return pm.metrics
+}
+
+func Grouped(grouped bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.grouped = grouped
+	}
+}
+
+func FrequencyOverride(frequencyOverride bool) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.frequencyOverride = frequencyOverride
+	}
+}
+
+// Metrics will store the PMMetric params
+func Metrics(pmNames []string) PmMetricsOption {
+	return func(args *PmMetrics) {
+		args.metrics = make(map[string]*voltha.PmConfig)
+		for _, name := range pmNames {
+			args.metrics[name] = &voltha.PmConfig{
+				Name:    name,
+				Type:    voltha.PmConfig_COUNTER,
+				Enabled: true,
+			}
+		}
+	}
+}
+
+// NewPmMetrics will return the pmmetric object
+func NewPmMetrics(deviceID string, opts ...PmMetricsOption) *PmMetrics {
+	pm := &PmMetrics{}
+	pm.deviceID = deviceID
+	for _, option := range opts {
+		option(pm)
+	}
+	return pm
+}
+
+// ToPmConfigs will enable the defined pmmetric
+func (pm *PmMetrics) ToPmConfigs() *voltha.PmConfigs {
+	pmConfigs := &voltha.PmConfigs{
+		Id:           pm.deviceID,
+		DefaultFreq:  pm.frequency,
+		Grouped:      pm.grouped,
+		FreqOverride: pm.frequencyOverride,
+	}
+	for _, v := range pm.metrics {
+		pmConfigs.Metrics = append(pmConfigs.Metrics, &voltha.PmConfig{Name: v.Name, Type: v.Type, Enabled: v.Enabled})
+	}
+	return pmConfigs
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
new file mode 100644
index 0000000..4587675
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
@@ -0,0 +1,1250 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ponresourcemanager
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+
+	bitmap "github.com/boljen/go-bitmap"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp "github.com/opencord/voltha-lib-go/v3/pkg/techprofile"
+)
+
+const (
+	//Constants to identify resource pool
+	UNI_ID     = "UNI_ID"
+	ONU_ID     = "ONU_ID"
+	ALLOC_ID   = "ALLOC_ID"
+	GEMPORT_ID = "GEMPORT_ID"
+	FLOW_ID    = "FLOW_ID"
+
+	//Constants for passing command line arugments
+	OLT_MODEL_ARG = "--olt_model"
+	PATH_PREFIX   = "service/voltha/resource_manager/{%s}"
+	/*The resource ranges for a given device model should be placed
+	  at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+	  path on the KV store.
+	  If Resource Range parameters are to be read from the external KV store,
+	  they are expected to be stored in the following format.
+	  Note: All parameters are MANDATORY for now.
+	  constants used as keys to reference the resource range parameters from
+	  and external KV store.
+	*/
+	UNI_ID_START_IDX      = "uni_id_start"
+	UNI_ID_END_IDX        = "uni_id_end"
+	ONU_ID_START_IDX      = "onu_id_start"
+	ONU_ID_END_IDX        = "onu_id_end"
+	ONU_ID_SHARED_IDX     = "onu_id_shared"
+	ALLOC_ID_START_IDX    = "alloc_id_start"
+	ALLOC_ID_END_IDX      = "alloc_id_end"
+	ALLOC_ID_SHARED_IDX   = "alloc_id_shared"
+	GEMPORT_ID_START_IDX  = "gemport_id_start"
+	GEMPORT_ID_END_IDX    = "gemport_id_end"
+	GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+	FLOW_ID_START_IDX     = "flow_id_start"
+	FLOW_ID_END_IDX       = "flow_id_end"
+	FLOW_ID_SHARED_IDX    = "flow_id_shared"
+	NUM_OF_PON_PORT       = "pon_ports"
+
+	/*
+	   The KV store backend is initialized with a path prefix and we need to
+	   provide only the suffix.
+	*/
+	PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
+
+	//resource path suffix
+	//Path on the KV store for storing alloc id ranges and resource pool for a given interface
+	//Format: <device_id>/alloc_id_pool/<pon_intf_id>
+	ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
+	//Path on the KV store for storing gemport id ranges and resource pool for a given interface
+	//Format: <device_id>/gemport_id_pool/<pon_intf_id>
+	GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
+	//Path on the KV store for storing onu id ranges and resource pool for a given interface
+	//Format: <device_id>/onu_id_pool/<pon_intf_id>
+	ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
+	//Path on the KV store for storing flow id ranges and resource pool for a given interface
+	//Format: <device_id>/flow_id_pool/<pon_intf_id>
+	FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
+
+	//Path on the KV store for storing list of alloc IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+	ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
+
+	//Path on the KV store for storing list of gemport IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+	GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
+
+	//Path on the KV store for storing list of Flow IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+	FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
+
+	//Flow Id info: Use to store more metadata associated with the flow_id
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+	FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
+
+	//path on the kvstore to store onugem info map
+	//format: <device-id>/onu_gem_info/<intfid>
+	ONU_GEM_INFO_PATH = "{%s}/onu_gem_info/{%d}" // onu_gem/<(intfid)>
+
+	//Constants for internal usage.
+	PON_INTF_ID     = "pon_intf_id"
+	START_IDX       = "start_idx"
+	END_IDX         = "end_idx"
+	POOL            = "pool"
+	NUM_OF_PON_INTF = 16
+
+	KVSTORE_RETRY_TIMEOUT = 5
+)
+
+//type ResourceTypeIndex string
+//type ResourceType string
+
+type PONResourceManager struct {
+	//Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
+	Technology     string
+	DeviceType     string
+	DeviceID       string
+	Backend        string // ETCD, or consul
+	Host           string // host ip of the KV store
+	Port           int    // port number for the KV store
+	OLTModel       string
+	KVStore        *db.Backend
+	TechProfileMgr tp.TechProfileIf // create object of *tp.TechProfileMgr
+
+	// Below attribute, pon_resource_ranges, should be initialized
+	// by reading from KV store.
+	PonResourceRanges  map[string]interface{}
+	SharedResourceMgrs map[string]*PONResourceManager
+	SharedIdxByType    map[string]string
+	IntfIDs            []uint32 // list of pon interface IDs
+	Globalorlocal      string
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+	log.Infow("kv-store-type", log.Fields{"store": storeType})
+	switch storeType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func SetKVClient(Technology string, Backend string, Host string, Port int) *db.Backend {
+	addr := Host + ":" + strconv.Itoa(Port)
+	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+	// issue between kv store and backend , core is not calling NewBackend directly
+	kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
+	if err != nil {
+		log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+		return nil
+	}
+	kvbackend := &db.Backend{
+		Client:     kvClient,
+		StoreType:  Backend,
+		Host:       Host,
+		Port:       Port,
+		Timeout:    KVSTORE_RETRY_TIMEOUT,
+		PathPrefix: fmt.Sprintf(PATH_PREFIX, Technology)}
+
+	return kvbackend
+}
+
+// NewPONResourceManager creates a new PON resource manager.
+func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Host string, Port int) (*PONResourceManager, error) {
+	var PONMgr PONResourceManager
+	PONMgr.Technology = Technology
+	PONMgr.DeviceType = DeviceType
+	PONMgr.DeviceID = DeviceID
+	PONMgr.Backend = Backend
+	PONMgr.Host = Host
+	PONMgr.Port = Port
+	PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port)
+	if PONMgr.KVStore == nil {
+		log.Error("KV Client initilization failed")
+		return nil, errors.New("Failed to init KV client")
+	}
+	// Initialize techprofile for this technology
+	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Host, Port); PONMgr.TechProfileMgr == nil {
+		log.Error("Techprofile initialization failed")
+		return nil, errors.New("Failed to init tech profile")
+	}
+	PONMgr.PonResourceRanges = make(map[string]interface{})
+	PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
+	PONMgr.SharedIdxByType = make(map[string]string)
+	PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
+	PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
+	PONMgr.OLTModel = DeviceType
+	return &PONMgr, nil
+}
+
+/*
+  Initialize PON resource ranges with config fetched from kv store.
+  return boolean: True if PON resource ranges initialized else false
+  Try to initialize the PON Resource Ranges from KV store based on the
+  OLT model key, if available
+*/
+
+func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore() bool {
+	//Initialize PON resource ranges with config fetched from kv store.
+	//:return boolean: True if PON resource ranges initialized else false
+	// Try to initialize the PON Resource Ranges from KV store based on the
+	// OLT model key, if available
+	if PONRMgr.OLTModel == "" {
+		log.Error("Failed to get OLT model")
+		return false
+	}
+	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
+	//get resource from kv store
+	Result, err := PONRMgr.KVStore.Get(Path)
+	if err != nil {
+		log.Debugf("Error in fetching resource %s from KV strore", Path)
+		return false
+	}
+	if Result == nil {
+		log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+		return false
+	}
+	//update internal ranges from kv ranges. If there are missing
+	// values in the KV profile, continue to use the defaults
+	Value, err := ToByte(Result.Value)
+	if err != nil {
+		log.Error("Failed to convert kvpair to byte string")
+		return false
+	}
+	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
+		log.Error("Failed to Unmarshal json byte")
+		return false
+	}
+	log.Debug("Init resource ranges from kvstore success")
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+	SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
+	/*
+	   Update the ranges for all reosurce type in the intermnal maps
+	   param: resource type start index
+	   param: start ID
+	   param: resource type end index
+	   param: end ID
+	   param: resource type shared index
+	   param: shared pool id
+	   param: global resource manager
+	*/
+	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
+
+	if StartID != 0 {
+		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+			PONRMgr.PonResourceRanges[StartIDx] = StartID
+		}
+	}
+	if EndID != 0 {
+		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+			PONRMgr.PonResourceRanges[EndIDx] = EndID
+		}
+	}
+	//if SharedPoolID != 0 {
+	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
+	//}
+	if RMgr != nil {
+		PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
+	}
+}
+
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
+	ONUIDEnd uint32,
+	ONUIDSharedPoolID uint32,
+	AllocIDStart uint32,
+	AllocIDEnd uint32,
+	AllocIDSharedPoolID uint32,
+	GEMPortIDStart uint32,
+	GEMPortIDEnd uint32,
+	GEMPortIDSharedPoolID uint32,
+	FlowIDStart uint32,
+	FlowIDEnd uint32,
+	FlowIDSharedPoolID uint32,
+	UNIIDStart uint32,
+	UNIIDEnd uint32,
+	NoOfPONPorts uint32,
+	IntfIDs []uint32) bool {
+
+	/*Initialize default PON resource ranges
+
+	  :param onu_id_start_idx: onu id start index
+	  :param onu_id_end_idx: onu id end index
+	  :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+	  :param alloc_id_start_idx: alloc id start index
+	  :param alloc_id_end_idx: alloc id end index
+	  :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+	  :param gemport_id_start_idx: gemport id start index
+	  :param gemport_id_end_idx: gemport id end index
+	  :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+	  :param flow_id_start_idx: flow id start index
+	  :param flow_id_end_idx: flow id end index
+	  :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+	  :param num_of_pon_ports: number of PON ports
+	  :param intf_ids: interfaces serviced by this manager
+	*/
+	PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+	log.Debug("Initialize default range values")
+	var i uint32
+	if IntfIDs == nil {
+		for i = 0; i < NoOfPONPorts; i++ {
+			PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
+		}
+	} else {
+		PONRMgr.IntfIDs = IntfIDs
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) InitDeviceResourcePool() error {
+
+	//Initialize resource pool for all PON ports.
+
+	log.Debug("Init resource ranges")
+
+	var err error
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(Intf, ONU_ID,
+			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init ONU ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(Intf, ALLOC_ID,
+			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init ALLOC ID resource pool ")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(Intf, GEMPORT_ID,
+			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init GEMPORT ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(Intf, FLOW_ID,
+			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init FLOW ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) ClearDeviceResourcePool() error {
+
+	//Clear resource pool for all PON ports.
+
+	log.Debug("Clear resource ranges")
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(Intf, ONU_ID); status != true {
+			log.Error("Failed to clear ONU ID resource pool")
+			return errors.New("Failed to clear ONU ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(Intf, ALLOC_ID); status != true {
+			log.Error("Failed to clear ALLOC ID resource pool ")
+			return errors.New("Failed to clear ALLOC ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(Intf, GEMPORT_ID); status != true {
+			log.Error("Failed to clear GEMPORT ID resource pool")
+			return errors.New("Failed to clear GEMPORT ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(Intf, FLOW_ID); status != true {
+			log.Error("Failed to clear FLOW ID resource pool")
+			return errors.New("Failed to clear FLOW ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) InitResourceIDPool(Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
+
+	/*Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+	  :param pon_intf_id: OLT PON interface id
+	  :param resource_type: String to identify type of resource
+	  :param start_idx: start index for onu id pool
+	  :param end_idx: end index for onu id pool
+	  :return boolean: True if resource id pool initialized else false
+	*/
+
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.InitResourceIDPool(Intf, ResourceType, StartID, EndID)
+	}
+
+	Path := PONRMgr.GetPath(Intf, ResourceType)
+	if Path == "" {
+		log.Errorf("Failed to get path for resource type %s", ResourceType)
+		return errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+	}
+
+	//In case of adapter reboot and reconciliation resource in kv store
+	//checked for its presence if not kv store update happens
+	Res, err := PONRMgr.GetResource(Path)
+	if (err == nil) && (Res != nil) {
+		log.Debugf("Resource %s already present in store ", Path)
+		return nil
+	} else {
+		FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID)
+		if err != nil {
+			log.Errorf("Failed to format resource")
+			return err
+		}
+		// Add resource as json in kv store.
+		err = PONRMgr.KVStore.Put(Path, FormatResult)
+		if err == nil {
+			log.Debug("Successfuly posted to kv store")
+			return err
+		}
+	}
+
+	log.Debug("Error initializing pool")
+
+	return err
+}
+
+func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32) ([]byte, error) {
+	/*
+	   Format resource as json.
+	   :param pon_intf_id: OLT PON interface id
+	   :param start_idx: start index for id pool
+	   :param end_idx: end index for id pool
+	   :return dictionary: resource formatted as map
+	*/
+	// Format resource as json to be stored in backend store
+	Resource := make(map[string]interface{})
+	Resource[PON_INTF_ID] = IntfID
+	Resource[START_IDX] = StartIDx
+	Resource[END_IDX] = EndIDx
+	/*
+	   Resource pool stored in backend store as binary string.
+	   Tracking the resource allocation will be done by setting the bits \
+	   in the byte array. The index set will be the resource number allocated.
+	*/
+	var TSData *bitmap.Threadsafe
+	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
+		log.Error("Failed to create a bitmap")
+		return nil, errors.New("Failed to create bitmap")
+	}
+	Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
+
+	Value, err := json.Marshal(Resource)
+	if err != nil {
+		log.Errorf("Failed to marshall resource")
+		return nil, err
+	}
+	return Value, err
+}
+func (PONRMgr *PONResourceManager) GetResource(Path string) (map[string]interface{}, error) {
+	/*
+	   Get resource from kv store.
+
+	   :param path: path to get resource
+	   :return: resource if resource present in kv store else None
+	*/
+	//get resource from kv store
+
+	var Value []byte
+	Result := make(map[string]interface{})
+	var Str string
+
+	Resource, err := PONRMgr.KVStore.Get(Path)
+	if (err != nil) || (Resource == nil) {
+		log.Debugf("Resource  unavailable at %s", Path)
+		return nil, err
+	}
+
+	Value, err = ToByte(Resource.Value)
+
+	// decode resource fetched from backend store to dictionary
+	err = json.Unmarshal(Value, &Result)
+	if err != nil {
+		log.Error("Failed to decode resource")
+		return Result, err
+	}
+	/*
+	   resource pool in backend store stored as binary string whereas to
+	   access the pool to generate/release IDs it need to be converted
+	   as BitArray
+	*/
+	Str, err = ToString(Result[POOL])
+	if err != nil {
+		log.Error("Failed to conver to kv pair to string")
+		return Result, err
+	}
+	Decode64, _ := base64.StdEncoding.DecodeString(Str)
+	Result[POOL], err = ToByte(Decode64)
+	if err != nil {
+		log.Error("Failed to convert resource pool to byte")
+		return Result, err
+	}
+
+	return Result, err
+}
+
+func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
+	/*
+	   Get path for given resource type.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :return: path for given resource type
+	*/
+
+	/*
+	   Get the shared pool for the given resource type.
+	   all the resource ranges and the shared resource maps are initialized during the init.
+	*/
+	SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
+	if SharedPoolID != 0 {
+		IntfID = SharedPoolID
+	}
+	var Path string
+	if ResourceType == ONU_ID {
+		Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == ALLOC_ID {
+		Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == GEMPORT_ID {
+		Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == FLOW_ID {
+		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else {
+		log.Error("Invalid resource pool identifier")
+	}
+	return Path
+}
+
+func (PONRMgr *PONResourceManager) GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
+	/*
+	   Create alloc/gemport/onu/flow id for given OLT PON interface.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :param num_of_id: required number of ids
+	   :return list/uint32/None: list, uint32 or None if resource type is
+	    alloc_id/gemport_id, onu_id or invalid type respectively
+	*/
+	if NumIDs < 1 {
+		log.Error("Invalid number of resources requested")
+		return nil, errors.New(fmt.Sprintf("Invalid number of resources requested %d", NumIDs))
+	}
+	// delegate to the master instance if sharing enabled across instances
+
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
+	}
+	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	if Path == "" {
+		log.Errorf("Failed to get path for resource type %s", ResourceType)
+		return nil, errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+	}
+	log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+	var Result []uint32
+	var NextID uint32
+	Resource, err := PONRMgr.GetResource(Path)
+	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
+		if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+			log.Error("Failed to Generate ID")
+			return Result, err
+		}
+		Result = append(Result, NextID)
+	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
+		if NumIDs == 1 {
+			if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+				log.Error("Failed to Generate ID")
+				return Result, err
+			}
+			Result = append(Result, NextID)
+		} else {
+			for NumIDs > 0 {
+				if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+					log.Error("Failed to Generate ID")
+					return Result, err
+				}
+				Result = append(Result, NextID)
+				NumIDs--
+			}
+		}
+	} else {
+		log.Error("get resource failed")
+		return Result, err
+	}
+
+	//Update resource in kv store
+	if PONRMgr.UpdateResource(Path, Resource) != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return nil, errors.New(fmt.Sprintf("Failed to update resource %s", Path))
+	}
+	return Result, nil
+}
+
+func checkValidResourceType(ResourceType string) bool {
+	KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
+
+	for _, v := range KnownResourceTypes {
+		if v == ResourceType {
+			return true
+		}
+	}
+	return false
+}
+
+func (PONRMgr *PONResourceManager) FreeResourceID(IntfID uint32, ResourceType string, ReleaseContent []uint32) bool {
+	/*
+	   Release alloc/gemport/onu/flow id for given OLT PON interface.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :param release_content: required number of ids
+	   :return boolean: True if all IDs in given release_content release else False
+	*/
+	if checkValidResourceType(ResourceType) == false {
+		log.Error("Invalid resource type")
+		return false
+	}
+	if ReleaseContent == nil {
+		log.Debug("Nothing to release")
+		return true
+	}
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.FreeResourceID(IntfID, ResourceType, ReleaseContent)
+	}
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	if Path == "" {
+		log.Error("Failed to get path")
+		return false
+	}
+	Resource, err := PONRMgr.GetResource(Path)
+	if err != nil {
+		log.Error("Failed to get resource")
+		return false
+	}
+	for _, Val := range ReleaseContent {
+		PONRMgr.ReleaseID(Resource, Val)
+	}
+	if PONRMgr.UpdateResource(Path, Resource) != nil {
+		log.Errorf("Free resource for %s failed", Path)
+		return false
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateResource(Path string, Resource map[string]interface{}) error {
+	/*
+	   Update resource in resource kv store.
+	   :param path: path to update resource
+	   :param resource: resource need to be updated
+	   :return boolean: True if resource updated in kv store else False
+	*/
+	// TODO resource[POOL] = resource[POOL].bin
+	Value, err := json.Marshal(Resource)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+	err = PONRMgr.KVStore.Put(Path, Value)
+	if err != nil {
+		log.Error("failed to put data to kv store %s", Path)
+		return err
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearResourceIDPool(IntfID uint32, ResourceType string) bool {
+	/*
+	   Clear Resource Pool for a given Resource Type on a given PON Port.
+	   :return boolean: True if removed else False
+	*/
+
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.ClearResourceIDPool(IntfID, ResourceType)
+	}
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	if Path == "" {
+		log.Error("Failed to get path")
+		return false
+	}
+
+	if err := PONRMgr.KVStore.Delete(Path); err != nil {
+		log.Errorf("Failed to delete resource %s", Path)
+		return false
+	}
+	log.Debugf("Cleared resource %s", Path)
+	return true
+}
+
+func (PONRMgr PONResourceManager) InitResourceMap(PONIntfONUID string) {
+	/*
+	   Initialize resource map
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	// initialize pon_intf_onu_id tuple to alloc_ids map
+	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	var AllocIDs []byte
+	Result := PONRMgr.KVStore.Put(AllocIDPath, AllocIDs)
+	if Result != nil {
+		log.Error("Failed to update the KV store")
+		return
+	}
+	// initialize pon_intf_onu_id tuple to gemport_ids map
+	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	var GEMPortIDs []byte
+	Result = PONRMgr.KVStore.Put(GEMPortIDPath, GEMPortIDs)
+	if Result != nil {
+		log.Error("Failed to update the KV store")
+		return
+	}
+}
+
+func (PONRMgr PONResourceManager) RemoveResourceMap(PONIntfONUID string) bool {
+	/*
+	   Remove resource map
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	// remove pon_intf_onu_id tuple to alloc_ids map
+	var err error
+	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	if err = PONRMgr.KVStore.Delete(AllocIDPath); err != nil {
+		log.Errorf("Failed to remove resource %s", AllocIDPath)
+		return false
+	}
+	// remove pon_intf_onu_id tuple to gemport_ids map
+	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	err = PONRMgr.KVStore.Delete(GEMPortIDPath)
+	if err != nil {
+		log.Errorf("Failed to remove resource %s", GEMPortIDPath)
+		return false
+	}
+
+	FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	if FlowIDs, err := PONRMgr.KVStore.List(FlowIDPath); err != nil {
+		for _, Flow := range FlowIDs {
+			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
+			if err = PONRMgr.KVStore.Delete(FlowIDInfoPath); err != nil {
+				log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+				return false
+			}
+		}
+	}
+
+	if err = PONRMgr.KVStore.Delete(FlowIDPath); err != nil {
+		log.Errorf("Failed to remove resource %s", FlowIDPath)
+		return false
+	}
+
+	return true
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(IntfONUID string) []uint32 {
+	/*
+	   Get currently configured alloc ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of alloc_ids if available, else None
+	*/
+	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(Path)
+	if err == nil {
+		if Value != nil {
+			Val, err := ToByte(Value.Value)
+			if err != nil {
+				log.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+				return Data
+			}
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				log.Error("Failed to unmarshal", log.Fields{"error": err})
+				return Data
+			}
+		}
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(IntfONUID string) []uint32 {
+	/*
+	   Get currently configured gemport ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of gemport IDs if available, else None
+	*/
+
+	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	log.Debugf("Getting current gemports for %s", Path)
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(Path)
+	if err == nil {
+		if Value != nil {
+			Val, _ := ToByte(Value.Value)
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				return Data
+			}
+		}
+	} else {
+		log.Errorf("Failed to get data from kvstore for %s", Path)
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(IntfONUID string) []uint32 {
+	/*
+	   Get currently configured flow ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of Flow IDs if available, else None
+	*/
+
+	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(Path)
+	if err == nil {
+		if Value != nil {
+			Val, _ := ToByte(Value.Value)
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				log.Error("Failed to unmarshal")
+				return Data
+			}
+		}
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetFlowIDInfo(IntfONUID string, FlowID uint32, Data interface{}) error {
+	/*
+	   Get flow details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow Id reference
+	   :param Data: Result
+	   :return error: nil if no error in getting from KV store
+	*/
+
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+	Value, err := PONRMgr.KVStore.Get(Path)
+	if err == nil {
+		if Value != nil {
+			Val, err := ToByte(Value.Value)
+			if err != nil {
+				log.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
+				return err
+			}
+			if err = json.Unmarshal(Val, Data); err != nil {
+				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(IntfONUID string, FlowID uint32) bool {
+	/*
+	   Get flow_id details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow Id reference
+	*/
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+	if err := PONRMgr.KVStore.Delete(Path); err != nil {
+		log.Errorf("Falied to remove resource %s", Path)
+		return false
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(IntfONUID string, AllocIDs []uint32) error {
+	/*
+	   Update currently configured alloc ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param alloc_ids: list of alloc ids
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	Value, err = json.Marshal(AllocIDs)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(IntfONUID string, GEMPortIDs []uint32) error {
+	/*
+	   Update currently configured gemport ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param gemport_ids: list of gem port ids
+	*/
+
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	log.Debugf("Updating gemport ids for %s", Path)
+	Value, err = json.Marshal(GEMPortIDs)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
+	/*
+	   Check for a flow id in a given list of flow IDs.
+	   :param FLowIDList: List of Flow IDs
+	   :param FlowID: Flowd to check in the list
+	   : return true and the index if present false otherwise.
+	*/
+
+	for idx, _ := range FlowIDList {
+		if FlowID == FlowIDList[idx] {
+			return true, uint32(idx)
+		}
+	}
+	return false, 0
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(IntfONUID string, FlowID uint32, Add bool) error {
+	/*
+	   Update the flow_id list of the ONU (add or remove flow_id from the list)
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: flow ID
+	   :param add: Boolean flag to indicate whether the flow_id should be
+	               added or removed from the list. Defaults to adding the flow.
+	*/
+	var Value []byte
+	var err error
+	var RetVal bool
+	var IDx uint32
+	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(IntfONUID)
+
+	if Add {
+		if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == true {
+			return err
+		}
+		FlowIDs = append(FlowIDs, FlowID)
+	} else {
+		if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == false {
+			return err
+		}
+		// delete the index and shift
+		FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
+	}
+	Value, err = json.Marshal(FlowIDs)
+	if err != nil {
+		log.Error("Failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(IntfONUID string, FlowID uint32, FlowData interface{}) error {
+	/*
+	   Update any metadata associated with the flow_id. The flow_data could be json
+	   or any of other data structure. The resource manager doesnt care
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow ID
+	   :param flow_data: Flow data blob
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+	Value, err = json.Marshal(FlowData)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
+	/*
+	   Generate unique id having OFFSET as start
+	   :param resource: resource used to generate ID
+	   :return uint32: generated id
+	*/
+	ByteArray, err := ToByte(Resource[POOL])
+	if err != nil {
+		log.Error("Failed to convert resource to byte array")
+		return 0, err
+	}
+	Data := bitmap.TSFromData(ByteArray, false)
+	if Data == nil {
+		log.Error("Failed to get data from byte array")
+		return 0, errors.New("Failed to get data from byte array")
+	}
+
+	Len := Data.Len()
+	var Idx int
+	for Idx = 0; Idx < Len; Idx++ {
+		Val := Data.Get(Idx)
+		if Val == false {
+			break
+		}
+	}
+	Data.Set(Idx, true)
+	res := uint32(Resource[START_IDX].(float64))
+	Resource[POOL] = Data.Data(false)
+	log.Debugf("Generated ID for %d", (uint32(Idx) + res))
+	return (uint32(Idx) + res), err
+}
+
+func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
+	/*
+	   Release unique id having OFFSET as start index.
+	   :param resource: resource used to release ID
+	   :param unique_id: id need to be released
+	*/
+	ByteArray, err := ToByte(Resource[POOL])
+	if err != nil {
+		log.Error("Failed to convert resource to byte array")
+		return false
+	}
+	Data := bitmap.TSFromData(ByteArray, false)
+	if Data == nil {
+		log.Error("Failed to get resource pool")
+		return false
+	}
+	var Idx uint32
+	Idx = Id - uint32(Resource[START_IDX].(float64))
+	Data.Set(int(Idx), false)
+	Resource[POOL] = Data.Data(false)
+
+	return true
+}
+
+func (PONRMgr *PONResourceManager) GetTechnology() string {
+	return PONRMgr.Technology
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeAllocID() string {
+	return ALLOC_ID
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID() string {
+	return GEMPORT_ID
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+func (PONRMgr *PONResourceManager) AddOnuGemInfo(intfID uint32, onuGemData interface{}) error {
+	/*
+	   Update onugem info map,
+	   :param pon_intf_id: reference of PON interface id
+	   :param onuegmdata: onugem info map
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfID)
+	Value, err = json.Marshal(onuGemData)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) GetOnuGemInfo(IntfId uint32, onuGemInfo interface{}) error {
+	/*
+	  Get onugeminfo map from kvstore
+	  :param intfid: refremce pon intfid
+	  :param onuGemInfo: onugem info to return from kv strore.
+	*/
+	var Val []byte
+
+	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, IntfId)
+	value, err := PONRMgr.KVStore.Get(path)
+	if err != nil {
+		log.Errorw("Failed to get from kv store", log.Fields{"path": path})
+		return err
+	} else if value == nil {
+		log.Debug("No onuinfo for path", log.Fields{"path": path})
+		return nil // returning nil as this could happen if there are no onus for the interface yet
+	}
+	if Val, err = kvstore.ToByte(value.Value); err != nil {
+		log.Error("Failed to convert to byte array")
+		return err
+	}
+
+	if err = json.Unmarshal(Val, &onuGemInfo); err != nil {
+		log.Error("Failed to unmarshall")
+		return err
+	}
+	log.Debugw("found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
+	return err
+}
+
+func (PONRMgr *PONResourceManager) DelOnuGemInfoForIntf(intfId uint32) error {
+	/*
+	   delete onugem info for an interface from kvstore
+	   :param intfid: refremce pon intfid
+	*/
+
+	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfId)
+	if err := PONRMgr.KVStore.Delete(path); err != nil {
+		log.Errorf("Falied to remove resource %s", path)
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
new file mode 100644
index 0000000..9f00953
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"context"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"net/http"
+	"sync"
+)
+
+// ProbeContextKey used to fetch the Probe instance from a context
+type ProbeContextKeyType string
+
+// ServiceStatus typed values for service status
+type ServiceStatus int
+
+const (
+	// ServiceStatusUnknown initial state of services
+	ServiceStatusUnknown ServiceStatus = iota
+
+	// ServiceStatusPreparing to optionally be used for prep, such as connecting
+	ServiceStatusPreparing
+
+	// ServiceStatusPrepared to optionally be used when prep is complete, but before run
+	ServiceStatusPrepared
+
+	// ServiceStatusRunning service is functional
+	ServiceStatusRunning
+
+	// ServiceStatusStopped service has stopped, but not because of error
+	ServiceStatusStopped
+
+	// ServiceStatusFailed service has stopped because of an error
+	ServiceStatusFailed
+
+	// ServiceStatusNotReady service has started but is unable to accept requests
+	ServiceStatusNotReady
+)
+
+const (
+	// ProbeContextKey value of context key to fetch probe
+	ProbeContextKey = ProbeContextKeyType("status-update-probe")
+)
+
+// String convert ServiceStatus values to strings
+func (s ServiceStatus) String() string {
+	switch s {
+	default:
+		fallthrough
+	case ServiceStatusUnknown:
+		return "Unknown"
+	case ServiceStatusPreparing:
+		return "Preparing"
+	case ServiceStatusPrepared:
+		return "Prepared"
+	case ServiceStatusRunning:
+		return "Running"
+	case ServiceStatusStopped:
+		return "Stopped"
+	case ServiceStatusFailed:
+		return "Failed"
+	case ServiceStatusNotReady:
+		return "NotReady"
+	}
+}
+
+// ServiceStatusUpdate status update event
+type ServiceStatusUpdate struct {
+	Name   string
+	Status ServiceStatus
+}
+
+// Probe reciever on which to implement probe capabilities
+type Probe struct {
+	readyFunc  func(map[string]ServiceStatus) bool
+	healthFunc func(map[string]ServiceStatus) bool
+
+	mutex     sync.RWMutex
+	status    map[string]ServiceStatus
+	isReady   bool
+	isHealthy bool
+}
+
+// WithReadyFunc override the default ready calculation function
+func (p *Probe) WithReadyFunc(readyFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.readyFunc = readyFunc
+	return p
+}
+
+// WithHealthFunc override the default health calculation function
+func (p *Probe) WithHealthFunc(healthFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.healthFunc = healthFunc
+	return p
+}
+
+// RegisterService register one or more service names with the probe, status will be track against service name
+func (p *Probe) RegisterService(names ...string) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+	for _, name := range names {
+		if _, ok := p.status[name]; !ok {
+			p.status[name] = ServiceStatusUnknown
+			log.Debugw("probe-service-registered", log.Fields{"service-name": name})
+		}
+	}
+
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+}
+
+// UpdateStatus utility function to send a service update to the probe
+func (p *Probe) UpdateStatus(name string, status ServiceStatus) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	// if status hasn't changed, avoid doing useless work
+	existingStatus, ok := p.status[name]
+	if ok && (existingStatus == status) {
+		return
+	}
+
+	p.status[name] = status
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+	log.Debugw("probe-service-status-updated",
+		log.Fields{
+			"service-name": name,
+			"status":       status.String(),
+			"ready":        p.isReady,
+			"health":       p.isHealthy,
+		})
+}
+
+func (p *Probe) GetStatus(name string) ServiceStatus {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	currentStatus, ok := p.status[name]
+	if ok {
+		return currentStatus
+	}
+
+	return ServiceStatusUnknown
+}
+
+func GetProbeFromContext(ctx context.Context) *Probe {
+	if ctx != nil {
+		if value := ctx.Value(ProbeContextKey); value != nil {
+			if p, ok := value.(*Probe); ok {
+				return p
+			}
+		}
+	}
+	return nil
+}
+
+// UpdateStatusFromContext a convenience function to pull the Probe reference from the
+// Context, if it exists, and then calling UpdateStatus on that Probe reference. If Context
+// is nil or if a Probe reference is not associated with the ProbeContextKey then nothing
+// happens
+func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
+	p := GetProbeFromContext(ctx)
+	if p != nil {
+		p.UpdateStatus(name, status)
+	}
+}
+
+// pulled out to a function to help better enable unit testing
+func (p *Probe) readzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isReady {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) healthzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isHealthy {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	w.Header().Set("Content-Type", "application/json")
+	w.Write([]byte("{"))
+	comma := ""
+	for c, s := range p.status {
+		w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String())))
+		comma = ", "
+	}
+	w.Write([]byte("}"))
+	w.WriteHeader(http.StatusOK)
+
+}
+
+// ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
+func (p *Probe) ListenAndServe(address string) {
+	mux := http.NewServeMux()
+
+	// Returns the result of the readyFunc calculation
+	mux.HandleFunc("/readz", p.readzFunc)
+
+	// Returns the result of the healthFunc calculation
+	mux.HandleFunc("/healthz", p.healthzFunc)
+
+	// Returns the details of the services and their status as JSON
+	mux.HandleFunc("/detailz", p.detailzFunc)
+	s := &http.Server{
+		Addr:    address,
+		Handler: mux,
+	}
+	log.Fatal(s.ListenAndServe())
+}
+
+func (p *Probe) IsReady() bool {
+	return p.isReady
+}
+
+// defaultReadyFunc if all services are running then ready, else not
+func defaultReadyFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status != ServiceStatusRunning {
+			return false
+		}
+	}
+	return true
+}
+
+// defaultHealthFunc if no service is stopped or failed, then healthy, else not.
+// service is start as unknown, so they are considered healthy
+func defaultHealthFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status == ServiceStatusStopped || status == ServiceStatusFailed {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
new file mode 100644
index 0000000..d11f8e4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
@@ -0,0 +1,141 @@
+ {
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
new file mode 100644
index 0000000..03a396d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
@@ -0,0 +1,336 @@
+Technology Profile Management
+Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+Example JSON :
+
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3 
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
+
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/xgspon/64
+
+jq -c . 4QueueHybridProfileMap1.json |  etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/xgspon/64
+
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+
+
+#List Tech profile 
+etcdctl --endpoints=<EtcdIPAddres>:2379 get  service/voltha/technology_profiles/xgspon/64
+
+
+# Example output
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
+
+etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/xgspon/64
+# Example outpout
+service/voltha/technology_profiles/xgspon/64/uni-1
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+#Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
new file mode 100644
index 0000000..2df7147
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+)
+
+// tech profile default constants
+const (
+	defaultTechProfileName        = "Default_1tcont_1gem_Profile"
+	DEFAULT_TECH_PROFILE_TABLE_ID = 64
+	defaultVersion                = 1.0
+	defaultLogLevel               = 0
+	defaultGemportsCount          = 1
+	defaultNumTconts              = 1
+	defaultPbits                  = "0b11111111"
+
+	defaultKVStoreType    = "etcd"
+	defaultKVStoreTimeout = 5 //in seconds
+	defaultKVStoreHost    = "127.0.0.1"
+	defaultKVStorePort    = 2379 // Consul = 8500; Etcd = 2379
+
+	// Tech profile path prefix in kv store
+	defaultKVPathPrefix = "service/voltha/technology_profiles"
+
+	// Tech profile path in kv store
+	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
+
+	// Tech profile instance path in kv store
+	// Format: <technology>/<tech_profile_tableID>/<uni_port_name>
+	defaultTPInstanceKVPath = "%s/%d/%s"
+)
+
+//Tech-Profile JSON String Keys
+// NOTE: Tech profile templeate JSON file should comply with below keys
+const (
+	NAME                               = "name"
+	PROFILE_TYPE                       = "profile_type"
+	VERSION                            = "version"
+	NUM_GEM_PORTS                      = "num_gem_ports"
+	INSTANCE_CONTROL                   = "instance_control"
+	US_SCHEDULER                       = "us_scheduler"
+	DS_SCHEDULER                       = "ds_scheduler"
+	UPSTREAM_GEM_PORT_ATTRIBUTE_LIST   = "upstream_gem_port_attribute_list"
+	DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
+	ONU                                = "onu"
+	UNI                                = "uni"
+	MAX_GEM_PAYLOAD_SIZE               = "max_gem_payload_size"
+	DIRECTION                          = "direction"
+	ADDITIONAL_BW                      = "additional_bw"
+	PRIORITY                           = "priority"
+	Q_SCHED_POLICY                     = "q_sched_policy"
+	WEIGHT                             = "weight"
+	PBIT_MAP                           = "pbit_map"
+	DISCARD_CONFIG                     = "discard_config"
+	MAX_THRESHOLD                      = "max_threshold"
+	MIN_THRESHOLD                      = "min_threshold"
+	MAX_PROBABILITY                    = "max_probability"
+	DISCARD_POLICY                     = "discard_policy"
+	PRIORITY_Q                         = "priority_q"
+	SCHEDULING_POLICY                  = "scheduling_policy"
+	MAX_Q_SIZE                         = "max_q_size"
+	AES_ENCRYPTION                     = "aes_encryption"
+)
+
+// TechprofileFlags represents the set of configurations used
+type TechProfileFlags struct {
+	KVStoreHost          string
+	KVStorePort          int
+	KVStoreType          string
+	KVStoreTimeout       int
+	KVBackend            *db.Backend
+	TPKVPathPrefix       string
+	TPFileKVPath         string
+	TPInstanceKVPath     string
+	DefaultTPName        string
+	TPVersion            int
+	NumGemPorts          uint32
+	DefaultPbits         []string
+	LogLevel             int
+	DefaultTechProfileID uint32
+	DefaultNumGemPorts   uint32
+}
+
+func NewTechProfileFlags(KVStoreType string, KVStoreHost string, KVStorePort int) *TechProfileFlags {
+	// initialize with default values
+	var techProfileFlags = TechProfileFlags{
+		KVBackend:            nil,
+		KVStoreHost:          KVStoreHost,
+		KVStorePort:          KVStorePort,
+		KVStoreType:          KVStoreType,
+		KVStoreTimeout:       defaultKVStoreTimeout,
+		DefaultTPName:        defaultTechProfileName,
+		TPKVPathPrefix:       defaultKVPathPrefix,
+		TPVersion:            defaultVersion,
+		TPFileKVPath:         defaultTechProfileKVPath,
+		TPInstanceKVPath:     defaultTPInstanceKVPath,
+		DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
+		DefaultNumGemPorts:   defaultGemportsCount,
+		DefaultPbits:         []string{defaultPbits},
+		LogLevel:             defaultLogLevel,
+	}
+
+	return &techProfileFlags
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
new file mode 100644
index 0000000..0358291
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
@@ -0,0 +1,921 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+)
+
+// Interface to pon resource manager APIs
+type iPonResourceMgr interface {
+	GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
+	GetResourceTypeAllocID() string
+	GetResourceTypeGemPortID() string
+	GetTechnology() string
+}
+
+type Direction int32
+
+const (
+	Direction_UPSTREAM      Direction = 0
+	Direction_DOWNSTREAM    Direction = 1
+	Direction_BIDIRECTIONAL Direction = 2
+)
+
+var Direction_name = map[Direction]string{
+	0: "UPSTREAM",
+	1: "DOWNSTREAM",
+	2: "BIDIRECTIONAL",
+}
+
+type SchedulingPolicy int32
+
+const (
+	SchedulingPolicy_WRR            SchedulingPolicy = 0
+	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
+)
+
+var SchedulingPolicy_name = map[SchedulingPolicy]string{
+	0: "WRR",
+	1: "StrictPriority",
+	2: "Hybrid",
+}
+
+type AdditionalBW int32
+
+const (
+	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
+	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
+	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
+)
+
+var AdditionalBW_name = map[AdditionalBW]string{
+	0: "AdditionalBW_None",
+	1: "AdditionalBW_NA",
+	2: "AdditionalBW_BestEffort",
+	3: "AdditionalBW_Auto",
+}
+
+type DiscardPolicy int32
+
+const (
+	DiscardPolicy_TailDrop  DiscardPolicy = 0
+	DiscardPolicy_WTailDrop DiscardPolicy = 1
+	DiscardPolicy_Red       DiscardPolicy = 2
+	DiscardPolicy_WRed      DiscardPolicy = 3
+)
+
+var DiscardPolicy_name = map[DiscardPolicy]string{
+	0: "TailDrop",
+	1: "WTailDrop",
+	2: "Red",
+	3: "WRed",
+}
+
+// Required uniPortName format
+var uniPortNameFormat = regexp.MustCompile(`^pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+
+/*
+type InferredAdditionBWIndication int32
+
+const (
+	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
+	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
+	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
+)
+
+var InferredAdditionBWIndication_name = map[int32]string{
+	0: "InferredAdditionBWIndication_None",
+	1: "InferredAdditionBWIndication_Assured",
+	2: "InferredAdditionBWIndication_BestEffort",
+}
+*/
+// instance control defaults
+const (
+	defaultOnuInstance    = "multi-instance"
+	defaultUniInstance    = "single-instance"
+	defaultNumGemPorts    = 1
+	defaultGemPayloadSize = "auto"
+)
+
+const MAX_GEM_PAYLOAD = "max_gem_payload_size"
+
+type InstanceControl struct {
+	Onu               string `json:"ONU"`
+	Uni               string `json:"uni"`
+	MaxGemPayloadSize string `json:"max_gem_payload_size"`
+}
+
+// default discard config constants
+const (
+	defaultMinThreshold   = 0
+	defaultMaxThreshold   = 0
+	defaultMaxProbability = 0
+)
+
+type DiscardConfig struct {
+	MinThreshold   int `json:"min_threshold"`
+	MaxThreshold   int `json:"max_threshold"`
+	MaxProbability int `json:"max_probability"`
+}
+
+// default scheduler contants
+const (
+	defaultAdditionalBw     = AdditionalBW_AdditionalBW_BestEffort
+	defaultPriority         = 0
+	defaultWeight           = 0
+	defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
+)
+
+type Scheduler struct {
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
+}
+
+// default GEM attribute constants
+const (
+	defaultAESEncryption     = "True"
+	defaultPriorityQueue     = 0
+	defaultQueueWeight       = 0
+	defaultMaxQueueSize      = "auto"
+	defaultdropPolicy        = DiscardPolicy_TailDrop
+	defaultSchedulePolicy    = SchedulingPolicy_WRR
+	defaultIsMulticast       = "False"
+	defaultAccessControlList = "224.0.0.0-239.255.255.255"
+	defaultMcastGemID        = 4069
+)
+
+type GemPortAttribute struct {
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    uint32        `json:"priority_q"`
+	Weight           uint32        `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
+	IsMulticast      string        `json:"is_multicast"`
+	DControlList     string        `json:"dynamic_access_control_list"`
+	SControlList     string        `json:"static_access_control_list"`
+	McastGemID       uint32        `json:"multicast_gem_id"`
+}
+
+type iScheduler struct {
+	AllocID      uint32 `json:"alloc_id"`
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
+}
+type iGemPortAttribute struct {
+	GemportID        uint32        `json:"gemport_id"`
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    uint32        `json:"priority_q"`
+	Weight           uint32        `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
+	IsMulticast      string        `json:"is_multicast"`
+	DControlList     string        `json:"dynamic_access_control_list"`
+	SControlList     string        `json:"static_access_control_list"`
+	McastGemID       uint32        `json:"multicast_gem_id"`
+}
+
+type TechProfileMgr struct {
+	config      *TechProfileFlags
+	resourceMgr iPonResourceMgr
+}
+type DefaultTechProfile struct {
+	Name                           string             `json:"name"`
+	ProfileType                    string             `json:"profile_type"`
+	Version                        int                `json:"version"`
+	NumGemPorts                    uint32             `json:"num_gem_ports"`
+	InstanceCtrl                   InstanceControl    `json:"instance_control"`
+	UsScheduler                    Scheduler          `json:"us_scheduler"`
+	DsScheduler                    Scheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+type TechProfile struct {
+	Name                           string              `json:"name"`
+	SubscriberIdentifier           string              `json:"subscriber_identifier"`
+	ProfileType                    string              `json:"profile_type"`
+	Version                        int                 `json:"version"`
+	NumGemPorts                    uint32              `json:"num_gem_ports"`
+	InstanceCtrl                   InstanceControl     `json:"instance_control"`
+	UsScheduler                    iScheduler          `json:"us_scheduler"`
+	DsScheduler                    iScheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []iGemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+
+func (t *TechProfileMgr) SetKVClient() *db.Backend {
+	addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort)
+	kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout)
+	if err != nil {
+		log.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort,
+				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
+				"error": err.Error(),
+			})
+		return nil
+	}
+	return &db.Backend{
+		Client:     kvClient,
+		StoreType:  t.config.KVStoreType,
+		Host:       t.config.KVStoreHost,
+		Port:       t.config.KVStorePort,
+		Timeout:    t.config.KVStoreTimeout,
+		PathPrefix: t.config.TPKVPathPrefix}
+
+	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+	            issue between kv store and backend , core is not calling NewBackend directly
+		   kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
+										t.config.KVStoreTimeout,  kvStoreTechProfilePathPrefix)
+	*/
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+
+	log.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
+	switch storeType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreHost string, KVStorePort int) (*TechProfileMgr, error) {
+	var techprofileObj TechProfileMgr
+	log.Debug("Initializing techprofile Manager")
+	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreHost, KVStorePort)
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
+	if techprofileObj.config.KVBackend == nil {
+		log.Error("Failed to initialize KV backend\n")
+		return nil, errors.New("KV backend init failed")
+	}
+	techprofileObj.resourceMgr = resourceMgr
+	log.Debug("Initializing techprofile object instance success")
+	return &techprofileObj, nil
+}
+
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+	return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
+}
+
+func (t *TechProfileMgr) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error) {
+	var KvTpIns TechProfile
+	var resPtr *TechProfile = &KvTpIns
+	var err error
+	var kvResult *kvstore.KVPair
+
+	kvResult, _ = t.config.KVBackend.Get(path)
+	if kvResult == nil {
+		log.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
+		return nil, nil
+	} else {
+		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
+			if err = json.Unmarshal(value, resPtr); err != nil {
+				log.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+				return nil, errors.New("error-unmarshal-kv-result")
+			} else {
+				return resPtr, nil
+			}
+		}
+	}
+	return nil, err
+}
+
+func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	tpInstanceJson, err := json.Marshal(*tpInstance)
+	if err == nil {
+		// Backend will convert JSON byte array into string format
+		log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		err = t.config.KVBackend.Put(path, tpInstanceJson)
+	} else {
+		log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+	}
+	return err
+}
+func (t *TechProfileMgr) getTPFromKVStore(techProfiletblID uint32) *DefaultTechProfile {
+	var kvtechprofile DefaultTechProfile
+	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
+	log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	kvresult, err := t.config.KVBackend.Get(key)
+	if err != nil {
+		log.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		return nil
+	}
+	if kvresult != nil {
+		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
+				log.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				return nil
+			}
+
+			log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			return &kvtechprofile
+		}
+	}
+	return nil
+}
+
+func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error) {
+	var tpInstance *TechProfile
+	log.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+
+	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+	if !uniPortNameFormat.Match([]byte(uniPortName)) {
+		log.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		return nil, errors.New("uni-port-name-not-confirming-to-format")
+	}
+
+	tp := t.getTPFromKVStore(techProfiletblID)
+	if tp != nil {
+		if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
+			log.Error("invalid-instance-ctrl-attr--using-default-tp")
+			tp = t.getDefaultTechProfile()
+		} else {
+			log.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+		}
+	} else {
+		log.Info("tp-not-found-on-kv--creating-default-tp")
+		tp = t.getDefaultTechProfile()
+	}
+	tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	if tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
+		log.Error("tp-intance-allocation-failed")
+		return nil, errors.New("tp-intance-allocation-failed")
+	}
+	if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil {
+		log.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+		return nil, errors.New("error-adding-tp-to-kv-store")
+	}
+	log.Infow("tp-added-to-kv-store-successfully",
+		log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
+	return tpInstance, nil
+}
+
+func (t *TechProfileMgr) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error {
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	return t.config.KVBackend.Delete(path)
+}
+
+func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
+	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
+		log.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		return errors.New("invalid-onu-instance-ctl-attr")
+	}
+
+	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
+		log.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		return errors.New("invalid-uni-instance-ctl-attr")
+	}
+
+	if instCtl.Uni == "multi-instance" {
+		log.Error("uni-multi-instance-tp-not-supported")
+		return errors.New("uni-multi-instance-tp-not-supported")
+	}
+
+	return nil
+}
+
+func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, tpInstPath string) *TechProfile {
+
+	var usGemPortAttributeList []iGemPortAttribute
+	var dsGemPortAttributeList []iGemPortAttribute
+	var dsMulticastGemAttributeList []iGemPortAttribute
+	var dsUnicastGemAttributeList []iGemPortAttribute
+	var tcontIDs []uint32
+	var gemPorts []uint32
+	var err error
+
+	log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+
+	if tp.InstanceCtrl.Onu == "multi-instance" {
+		if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			return nil
+		}
+	} else { // "single-instance"
+		tpInst, err := t.getSingleInstanceTp(tpInstPath)
+		if tpInst == nil {
+			// No "single-instance" tp found on one any uni port for the given TP ID
+			// Allocate a new TcontID or AllocID
+			if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+				log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				return nil
+			}
+		} else {
+			// Use the alloc-id from the existing TpInstance
+			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
+		}
+	}
+	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
+		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		return nil
+	}
+	log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			iGemPortAttribute{GemportID: gemPorts[index],
+				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+	}
+
+	log.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	//put multicast and unicast downstream GEM port attributes in different lists first
+	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
+		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+				iGemPortAttribute{
+					McastGemID:       tp.DownstreamGemPortAttributeList[index].McastGemID,
+					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+					IsMulticast:      tp.DownstreamGemPortAttributeList[index].IsMulticast,
+					DControlList:     tp.DownstreamGemPortAttributeList[index].DControlList,
+					SControlList:     tp.DownstreamGemPortAttributeList[index].SControlList})
+		} else {
+			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+				iGemPortAttribute{
+					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+		}
+	}
+	//add unicast downstream GEM ports to dsGemPortAttributeList
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			iGemPortAttribute{GemportID: gemPorts[index],
+				MaxQueueSize:     dsUnicastGemAttributeList[index].MaxQueueSize,
+				PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
+				AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
+				SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    dsUnicastGemAttributeList[index].PriorityQueue,
+				Weight:           dsUnicastGemAttributeList[index].Weight,
+				DiscardPolicy:    dsUnicastGemAttributeList[index].DiscardPolicy,
+				DiscardConfig:    dsUnicastGemAttributeList[index].DiscardConfig})
+	}
+	//add multicast GEM ports to dsGemPortAttributeList afterwards
+	for k := range dsMulticastGemAttributeList {
+		dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+	}
+
+	return &TechProfile{
+		SubscriberIdentifier: uniPortName,
+		Name:                 tp.Name,
+		ProfileType:          tp.ProfileType,
+		Version:              tp.Version,
+		NumGemPorts:          tp.NumGemPorts,
+		InstanceCtrl:         tp.InstanceCtrl,
+		UsScheduler: iScheduler{
+			AllocID:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+		DsScheduler: iScheduler{
+			AllocID:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// getSingleInstanceTp returns another TpInstance for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceTp(tpPath string) (*TechProfile, error) {
+	var tpInst TechProfile
+
+	// For example:
+	// tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
+	// is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
+	uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
+	kvPairs, _ := t.config.KVBackend.List(uniPathSlice[0])
+
+	// Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
+	for keyPath, kvPair := range kvPairs {
+		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+			if err = json.Unmarshal(value, &tpInst); err != nil {
+				log.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				return nil, errors.New("error-unmarshal-kv-pair")
+			} else {
+				log.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				return &tpInst, nil
+			}
+		}
+	}
+	return nil, nil
+}
+
+func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
+
+	var usGemPortAttributeList []GemPortAttribute
+	var dsGemPortAttributeList []GemPortAttribute
+
+	for _, pbit := range t.config.DefaultPbits {
+		log.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			GemPortAttribute{
+				MaxQueueSize:     defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+				PriorityQueue:    defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
+				DiscardConfig: DiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability}})
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			GemPortAttribute{
+				MaxQueueSize:     defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+				PriorityQueue:    defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
+				DiscardConfig: DiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability},
+				IsMulticast:  defaultIsMulticast,
+				DControlList: defaultAccessControlList,
+				SControlList: defaultAccessControlList,
+				McastGemID:   defaultMcastGemID})
+	}
+	return &DefaultTechProfile{
+		Name:        t.config.DefaultTPName,
+		ProfileType: t.resourceMgr.GetTechnology(),
+		Version:     t.config.TPVersion,
+		NumGemPorts: uint32(len(usGemPortAttributeList)),
+		InstanceCtrl: InstanceControl{
+			Onu:               defaultOnuInstance,
+			Uni:               defaultUniInstance,
+			MaxGemPayloadSize: defaultGemPayloadSize},
+		UsScheduler: Scheduler{
+			Direction:    Direction_name[Direction_UPSTREAM],
+			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+		DsScheduler: Scheduler{
+			Direction:    Direction_name[Direction_DOWNSTREAM],
+			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+	var result int32 = -1
+
+	if paramType == "direction" {
+		for key, val := range tp_pb.Direction_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else if paramType == "discard_policy" {
+		for key, val := range tp_pb.DiscardPolicy_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else if paramType == "sched_policy" {
+		for key, val := range tp_pb.SchedulingPolicy_value {
+			if key == paramKey {
+				log.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+				result = val
+			}
+		}
+	} else if paramType == "additional_bw" {
+		for key, val := range tp_pb.AdditionalBW_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else {
+		log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		return -1
+	}
+	log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+	return result
+}
+
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
+	if dir == -1 {
+		log.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+	}
+
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
+	if bw == -1 {
+		log.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+	}
+
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
+	if policy == -1 {
+		log.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+	}
+
+	return &tp_pb.SchedulerConfig{
+		Direction:    dir,
+		AdditionalBw: bw,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
+		SchedPolicy:  policy}, nil
+}
+
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+
+	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
+	if dir == -1 {
+		log.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+	}
+
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
+	if bw == -1 {
+		log.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+	}
+
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
+	if policy == -1 {
+		log.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+	}
+
+	return &tp_pb.SchedulerConfig{
+		Direction:    dir,
+		AdditionalBw: bw,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
+		SchedPolicy:  policy}, nil
+}
+
+func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+	ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
+
+	tSched := &tp_pb.TrafficScheduler{
+		Direction:          SchedCfg.Direction,
+		AllocId:            tpInstance.UsScheduler.AllocID,
+		TrafficShapingInfo: ShapingCfg,
+		Scheduler:          SchedCfg}
+
+	return tSched
+}
+
+func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+
+	var encryp bool
+	if Dir == tp_pb.Direction_UPSTREAM {
+		// upstream GEM ports
+		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+		GemPorts := make([]*tp_pb.TrafficQueue, 0)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
+				encryp = true
+			} else {
+				encryp = false
+			}
+
+			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			if schedPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			}
+
+			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			if discardPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			}
+
+			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
+				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
+				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
+				AesEncryption: encryp,
+				SchedPolicy:   tp_pb.SchedulingPolicy(schedPolicy),
+				Priority:      tp.UpstreamGemPortAttributeList[Count].PriorityQueue,
+				Weight:        tp.UpstreamGemPortAttributeList[Count].Weight,
+				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
+			})
+		}
+		log.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		return GemPorts, nil
+	} else if Dir == tp_pb.Direction_DOWNSTREAM {
+		//downstream GEM ports
+		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+		GemPorts := make([]*tp_pb.TrafficQueue, 0)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+				//do not take multicast GEM ports. They are handled separately.
+				continue
+			}
+			if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+				encryp = true
+			} else {
+				encryp = false
+			}
+
+			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			if schedPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			}
+
+			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			if discardPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			}
+
+			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
+				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+				AesEncryption: encryp,
+				SchedPolicy:   tp_pb.SchedulingPolicy(schedPolicy),
+				Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
+				Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
+			})
+		}
+		log.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		return GemPorts, nil
+	}
+
+	log.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
+	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
+}
+
+//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
+func isMulticastGem(isMulticastAttrValue string) bool {
+	return isMulticastAttrValue != "" &&
+		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
+}
+
+func (tpm *TechProfileMgr) GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue {
+	var encryp bool
+	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
+	for Count := 0; Count < NumGemPorts; Count++ {
+		if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+			continue
+		}
+		if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+			encryp = true
+		} else {
+			encryp = false
+		}
+		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
+			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
+			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+			AesEncryption: encryp,
+			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
+			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+		})
+	}
+	log.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	return mcastTrafficQueues
+}
+
+func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
+	UsScheduler, _ := tpm.GetUsScheduler(tp)
+
+	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
+		AllocId:   tp.UsScheduler.AllocID,
+		Scheduler: UsScheduler}
+}
+
+func (t *TechProfileMgr) GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
+	/*
+	   Function to get the Gemport ID mapped to a pbit.
+	*/
+	if Dir == tp_pb.Direction_UPSTREAM {
+		// upstream GEM ports
+		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			NumPbitMaps := len(tp.UpstreamGemPortAttributeList[Count].PbitMap)
+			for ICount := 2; ICount < NumPbitMaps; ICount++ {
+				if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
+					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
+						log.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID})
+						return tp.UpstreamGemPortAttributeList[Count].GemportID
+					}
+				}
+			}
+		}
+	} else if Dir == tp_pb.Direction_DOWNSTREAM {
+		//downstream GEM ports
+		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			NumPbitMaps := len(tp.DownstreamGemPortAttributeList[Count].PbitMap)
+			for ICount := 2; ICount < NumPbitMaps; ICount++ {
+				if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
+					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
+						log.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID})
+						return tp.DownstreamGemPortAttributeList[Count].GemportID
+					}
+				}
+			}
+		}
+	}
+	log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+	return 0
+}
+
+// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
+func (t *TechProfileMgr) FindAllTpInstances(techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile {
+	var tp TechProfile
+	onuTpInstancePath := fmt.Sprintf("%s/%d/pon-{%d}/onu-{%d}", t.resourceMgr.GetTechnology(), techProfiletblID, ponIntf, onuID)
+
+	if kvPairs, _ := t.config.KVBackend.List(onuTpInstancePath); kvPairs != nil {
+		tpInstances := make([]TechProfile, 0, len(kvPairs))
+		for kvPath, kvPair := range kvPairs {
+			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+				if err = json.Unmarshal(value, &tp); err != nil {
+					log.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+					continue
+				} else {
+					tpInstances = append(tpInstances, tp)
+				}
+			}
+		}
+		return tpInstances
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
new file mode 100644
index 0000000..9184b5b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+)
+
+type TechProfileIf interface {
+	SetKVClient() *db.Backend
+	GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
+	GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error)
+	CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error)
+	DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error
+	GetprotoBufParamValue(paramType string, paramKey string) int32
+	GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
+	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue
+	GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
+	FindAllTpInstances(techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile
+}