VOL-2640 Restructure openolt-adapter repo to best practices

Change-Id: Icead31e8ecb82ec75a22e66361fbf83f80136589
diff --git a/internal/pkg/core/device_handler.go b/internal/pkg/core/device_handler.go
new file mode 100644
index 0000000..0695af6
--- /dev/null
+++ b/internal/pkg/core/device_handler.go
@@ -0,0 +1,1828 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/codes"
+
+	"github.com/cenkalti/backoff/v3"
+	"github.com/gogo/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics"
+	rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
+	"github.com/opencord/voltha-protos/v3/go/common"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	of "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	oop "github.com/opencord/voltha-protos/v3/go/openolt"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/status"
+)
+
+// Constants for number of retries and for timeout
+const (
+	MaxRetry       = 10
+	MaxTimeOutInMs = 500
+)
+
+//DeviceHandler will interact with the OLT device.
+type DeviceHandler struct {
+	deviceID      string
+	deviceType    string
+	adminState    string
+	device        *voltha.Device
+	coreProxy     adapterif.CoreProxy
+	AdapterProxy  adapterif.AdapterProxy
+	EventProxy    adapterif.EventProxy
+	openOLT       *OpenOLT
+	exitChannel   chan int
+	lockDevice    sync.RWMutex
+	Client        oop.OpenoltClient
+	transitionMap *TransitionMap
+	clientCon     *grpc.ClientConn
+	flowMgr       *OpenOltFlowMgr
+	eventMgr      *OpenOltEventMgr
+	resourceMgr   *rsrcMgr.OpenOltResourceMgr
+
+	discOnus           sync.Map
+	onus               sync.Map
+	portStats          *OpenOltStatisticsMgr
+	metrics            *pmmetrics.PmMetrics
+	stopCollector      chan bool
+	stopHeartbeatCheck chan bool
+	activePorts        sync.Map
+}
+
+//OnuDevice represents ONU related info
+type OnuDevice struct {
+	deviceID      string
+	deviceType    string
+	serialNumber  string
+	onuID         uint32
+	intfID        uint32
+	proxyDeviceID string
+	uniPorts      map[uint32]struct{}
+}
+
+var pmNames = []string{
+	"rx_bytes",
+	"rx_packets",
+	"rx_mcast_packets",
+	"rx_bcast_packets",
+	"tx_bytes",
+	"tx_packets",
+	"tx_mcast_packets",
+	"tx_bcast_packets",
+}
+
+//NewOnuDevice creates a new Onu Device
+func NewOnuDevice(devID, deviceTp, serialNum string, onuID, intfID uint32, proxyDevID string) *OnuDevice {
+	var device OnuDevice
+	device.deviceID = devID
+	device.deviceType = deviceTp
+	device.serialNumber = serialNum
+	device.onuID = onuID
+	device.intfID = intfID
+	device.proxyDeviceID = proxyDevID
+	device.uniPorts = make(map[uint32]struct{})
+	return &device
+}
+
+//NewDeviceHandler creates a new device handler
+func NewDeviceHandler(cp adapterif.CoreProxy, ap adapterif.AdapterProxy, ep adapterif.EventProxy, device *voltha.Device, adapter *OpenOLT) *DeviceHandler {
+	var dh DeviceHandler
+	dh.coreProxy = cp
+	dh.AdapterProxy = ap
+	dh.EventProxy = ep
+	cloned := (proto.Clone(device)).(*voltha.Device)
+	dh.deviceID = cloned.Id
+	dh.deviceType = cloned.Type
+	dh.adminState = "up"
+	dh.device = cloned
+	dh.openOLT = adapter
+	dh.exitChannel = make(chan int, 1)
+	dh.lockDevice = sync.RWMutex{}
+	dh.stopCollector = make(chan bool, 2)
+	dh.stopHeartbeatCheck = make(chan bool, 2)
+	dh.metrics = pmmetrics.NewPmMetrics(cloned.Id, pmmetrics.Frequency(150), pmmetrics.FrequencyOverride(false), pmmetrics.Grouped(false), pmmetrics.Metrics(pmNames))
+	dh.activePorts = sync.Map{}
+	//TODO initialize the support classes.
+	return &dh
+}
+
+// start save the device to the data model
+func (dh *DeviceHandler) start(ctx context.Context) {
+	dh.lockDevice.Lock()
+	defer dh.lockDevice.Unlock()
+	log.Debugw("starting-device-agent", log.Fields{"device": dh.device})
+	// Add the initial device to the local model
+	log.Debug("device-agent-started")
+}
+
+// stop stops the device dh.  Not much to do for now
+func (dh *DeviceHandler) stop(ctx context.Context) {
+	dh.lockDevice.Lock()
+	defer dh.lockDevice.Unlock()
+	log.Debug("stopping-device-agent")
+	dh.exitChannel <- 1
+	log.Debug("device-agent-stopped")
+}
+
+func macifyIP(ip net.IP) string {
+	if len(ip) > 0 {
+		oct1 := strconv.FormatInt(int64(ip[12]), 16)
+		oct2 := strconv.FormatInt(int64(ip[13]), 16)
+		oct3 := strconv.FormatInt(int64(ip[14]), 16)
+		oct4 := strconv.FormatInt(int64(ip[15]), 16)
+		return fmt.Sprintf("00:00:%02v:%02v:%02v:%02v", oct1, oct2, oct3, oct4)
+	}
+	return ""
+}
+
+func generateMacFromHost(host string) (string, error) {
+	var genmac string
+	var addr net.IP
+	var ips []string
+	var err error
+
+	log.Debugw("generating-mac-from-host", log.Fields{"host": host})
+
+	if addr = net.ParseIP(host); addr == nil {
+		log.Debugw("looking-up-hostname", log.Fields{"host": host})
+
+		if ips, err = net.LookupHost(host); err == nil {
+			log.Debugw("dns-result-ips", log.Fields{"ips": ips})
+			if addr = net.ParseIP(ips[0]); addr == nil {
+				return "", NewErrInvalidValue(log.Fields{"ip": ips[0]}, nil).Log()
+			}
+			genmac = macifyIP(addr)
+			log.Debugw("using-ip-as-mac", log.Fields{"host": ips[0], "mac": genmac})
+			return genmac, nil
+		}
+		return "", NewErrAdapter("cannot-resolve-hostname-to-ip", nil, err).Log()
+	}
+
+	genmac = macifyIP(addr)
+	log.Debugw("using-ip-as-mac", log.Fields{"host": host, "mac": genmac})
+	return genmac, nil
+}
+
+func macAddressToUint32Array(mac string) []uint32 {
+	slist := strings.Split(mac, ":")
+	result := make([]uint32, len(slist))
+	var err error
+	var tmp int64
+	for index, val := range slist {
+		if tmp, err = strconv.ParseInt(val, 16, 32); err != nil {
+			return []uint32{1, 2, 3, 4, 5, 6}
+		}
+		result[index] = uint32(tmp)
+	}
+	return result
+}
+
+//GetportLabel returns the label for the NNI and the PON port based on port number and port type
+func GetportLabel(portNum uint32, portType voltha.Port_PortType) (string, error) {
+
+	switch portType {
+	case voltha.Port_ETHERNET_NNI:
+		return fmt.Sprintf("nni-%d", portNum), nil
+	case voltha.Port_PON_OLT:
+		return fmt.Sprintf("pon-%d", portNum), nil
+	}
+
+	return "", NewErrInvalidValue(log.Fields{"port-type": portType}, nil).Log()
+}
+
+func (dh *DeviceHandler) addPort(intfID uint32, portType voltha.Port_PortType, state string) error {
+	var operStatus common.OperStatus_Types
+	if state == "up" {
+		operStatus = voltha.OperStatus_ACTIVE
+		//populating the intfStatus map
+		dh.activePorts.Store(intfID, true)
+	} else {
+		operStatus = voltha.OperStatus_DISCOVERED
+		dh.activePorts.Store(intfID, false)
+	}
+	portNum := IntfIDToPortNo(intfID, portType)
+	label, err := GetportLabel(portNum, portType)
+	if err != nil {
+		return NewErrNotFound("port-label", log.Fields{"port-number": portNum, "port-type": portType}, nil).Log()
+	}
+
+	device, err := dh.coreProxy.GetDevice(context.TODO(), dh.device.Id, dh.device.Id)
+	if err != nil || device == nil {
+		return NewErrNotFound("device", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+	if device.Ports != nil {
+		for _, dPort := range device.Ports {
+			if dPort.Type == portType && dPort.PortNo == portNum {
+				log.Debug("port-already-exists-updating-oper-status-of-port")
+				if err := dh.coreProxy.PortStateUpdate(context.TODO(), dh.device.Id, portType, portNum, operStatus); err != nil {
+					return NewErrAdapter("failed-to-update-port-state", log.Fields{
+						"device-id":   dh.device.Id,
+						"port-type":   portType,
+						"port-number": portNum,
+						"oper-status": operStatus}, err).Log()
+
+				}
+				return nil
+			}
+		}
+	}
+	//    Now create  Port
+	port := &voltha.Port{
+		PortNo:     portNum,
+		Label:      label,
+		Type:       portType,
+		OperStatus: operStatus,
+	}
+	log.Debugw("Sending-port-update-to-core", log.Fields{"port": port})
+	// Synchronous call to update device - this method is run in its own go routine
+	if err := dh.coreProxy.PortCreated(context.TODO(), dh.device.Id, port); err != nil {
+		return NewErrAdapter("Error-creating-port", log.Fields{
+			"device-id": dh.device.Id,
+			"port-type": portType}, err).Log()
+	}
+	return nil
+}
+
+// readIndications to read the indications from the OLT device
+func (dh *DeviceHandler) readIndications(ctx context.Context) error {
+	defer log.Debugw("indications-ended", log.Fields{"device-id": dh.device.Id})
+	indications, err := dh.Client.EnableIndication(ctx, new(oop.Empty))
+	if err != nil {
+		return NewErrCommunication("fail-to-read-indications", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+	if indications == nil {
+		return NewErrInvalidValue(log.Fields{"indications": nil, "device-id": dh.device.Id}, nil).Log()
+	}
+	/* get device state */
+	device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
+	if err != nil || device == nil {
+		/*TODO: needs to handle error scenarios */
+		return NewErrNotFound("device", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+	// When the device is in DISABLED and Adapter container restarts, we need to
+	// rebuild the locally maintained admin state.
+	if device.AdminState == voltha.AdminState_DISABLED {
+		dh.lockDevice.Lock()
+		dh.adminState = "down"
+		dh.lockDevice.Unlock()
+	}
+
+	// Create an exponential backoff around re-enabling indications. The
+	// maximum elapsed time for the back off is set to 0 so that we will
+	// continue to retry. The max interval defaults to 1m, but is set
+	// here for code clarity
+	indicationBackoff := backoff.NewExponentialBackOff()
+	indicationBackoff.MaxElapsedTime = 0
+	indicationBackoff.MaxInterval = 1 * time.Minute
+	for {
+		indication, err := indications.Recv()
+		if err == io.EOF {
+			log.Infow("EOF for  indications", log.Fields{"err": err})
+			// Use an exponential back off to prevent getting into a tight loop
+			duration := indicationBackoff.NextBackOff()
+			if duration == backoff.Stop {
+				// If we reach a maximum then warn and reset the backoff
+				// timer and keep attempting.
+				log.Warnw("Maximum indication backoff reached, resetting backoff timer",
+					log.Fields{"max_indication_backoff": indicationBackoff.MaxElapsedTime})
+				indicationBackoff.Reset()
+			}
+			time.Sleep(indicationBackoff.NextBackOff())
+			indications, err = dh.Client.EnableIndication(ctx, new(oop.Empty))
+			if err != nil {
+				return NewErrCommunication("indication-read-failure", log.Fields{"device-id": dh.device.Id}, err).Log()
+			}
+			continue
+		}
+		if err != nil {
+			log.Infow("Failed to read from indications", log.Fields{"err": err})
+			if dh.adminState == "deleted" {
+				log.Debug("Device deleted stoping the read indication thread")
+				break
+			}
+			dh.transitionMap.Handle(ctx, DeviceDownInd)
+			dh.transitionMap.Handle(ctx, DeviceInit)
+			return NewErrCommunication("indication-read-failure", log.Fields{"device-id": dh.device.Id}, err).Log()
+		}
+		// Reset backoff if we have a successful receive
+		indicationBackoff.Reset()
+		dh.lockDevice.RLock()
+		adminState := dh.adminState
+		dh.lockDevice.RUnlock()
+		// When OLT is admin down, ignore all indications.
+		if adminState == "down" {
+
+			log.Infow("olt is admin down, ignore indication", log.Fields{"indication": indication})
+			continue
+		}
+		dh.handleIndication(ctx, indication)
+
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) handleOltIndication(ctx context.Context, oltIndication *oop.OltIndication) error {
+	raisedTs := time.Now().UnixNano()
+	if oltIndication.OperState == "up" && dh.transitionMap.currentDeviceState != deviceStateUp {
+		dh.transitionMap.Handle(ctx, DeviceUpInd)
+	} else if oltIndication.OperState == "down" {
+		dh.transitionMap.Handle(ctx, DeviceDownInd)
+	}
+	// Send or clear Alarm
+	if err := dh.eventMgr.oltUpDownIndication(oltIndication, dh.deviceID, raisedTs); err != nil {
+		return NewErrAdapter("failed-indication", log.Fields{
+			"device_id":  dh.deviceID,
+			"indication": oltIndication,
+			"timestamp":  raisedTs}, err).Log()
+	}
+	return nil
+}
+
+// nolint: gocyclo
+func (dh *DeviceHandler) handleIndication(ctx context.Context, indication *oop.Indication) {
+	raisedTs := time.Now().UnixNano()
+	switch indication.Data.(type) {
+	case *oop.Indication_OltInd:
+		if err := dh.handleOltIndication(ctx, indication.GetOltInd()); err != nil {
+			NewErrAdapter("handle-indication-error", log.Fields{"type": "olt"}, err).Log()
+		}
+	case *oop.Indication_IntfInd:
+		intfInd := indication.GetIntfInd()
+		go func() {
+			if err := dh.addPort(intfInd.GetIntfId(), voltha.Port_PON_OLT, intfInd.GetOperState()); err != nil {
+				NewErrAdapter("handle-indication-error", log.Fields{"type": "interface"}, err).Log()
+			}
+		}()
+		log.Infow("Received interface indication ", log.Fields{"InterfaceInd": intfInd})
+	case *oop.Indication_IntfOperInd:
+		intfOperInd := indication.GetIntfOperInd()
+		if intfOperInd.GetType() == "nni" {
+			go func() {
+				if err := dh.addPort(intfOperInd.GetIntfId(), voltha.Port_ETHERNET_NNI, intfOperInd.GetOperState()); err != nil {
+					NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-nni"}, err).Log()
+				}
+			}()
+			dh.resourceMgr.AddNNIToKVStore(ctx, intfOperInd.GetIntfId())
+		} else if intfOperInd.GetType() == "pon" {
+			// TODO: Check what needs to be handled here for When PON PORT down, ONU will be down
+			// Handle pon port update
+			go func() {
+				if err := dh.addPort(intfOperInd.GetIntfId(), voltha.Port_PON_OLT, intfOperInd.GetOperState()); err != nil {
+					NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-pon"}, err).Log()
+				}
+			}()
+			go dh.eventMgr.oltIntfOperIndication(indication.GetIntfOperInd(), dh.deviceID, raisedTs)
+		}
+		log.Infow("Received interface oper indication ", log.Fields{"InterfaceOperInd": intfOperInd})
+	case *oop.Indication_OnuDiscInd:
+		onuDiscInd := indication.GetOnuDiscInd()
+		log.Infow("Received Onu discovery indication ", log.Fields{"OnuDiscInd": onuDiscInd})
+		sn := dh.stringifySerialNumber(onuDiscInd.SerialNumber)
+		go func() {
+			if err := dh.onuDiscIndication(ctx, onuDiscInd, sn); err != nil {
+				NewErrAdapter("handle-indication-error", log.Fields{"type": "onu-discovery"}, err).Log()
+			}
+		}()
+	case *oop.Indication_OnuInd:
+		onuInd := indication.GetOnuInd()
+		log.Infow("Received Onu indication ", log.Fields{"OnuInd": onuInd})
+		go func() {
+			if err := dh.onuIndication(onuInd); err != nil {
+				NewErrAdapter("handle-indication-error", log.Fields{"type": "onu"}, err).Log()
+			}
+		}()
+	case *oop.Indication_OmciInd:
+		omciInd := indication.GetOmciInd()
+		log.Debugw("Received Omci indication ", log.Fields{"IntfId": omciInd.IntfId, "OnuId": omciInd.OnuId, "pkt": hex.EncodeToString(omciInd.Pkt)})
+		go func() {
+			if err := dh.omciIndication(omciInd); err != nil {
+				NewErrAdapter("handle-indication-error", log.Fields{"type": "omci"}, err).Log()
+			}
+		}()
+	case *oop.Indication_PktInd:
+		pktInd := indication.GetPktInd()
+		log.Infow("Received pakcet indication ", log.Fields{"PktInd": pktInd})
+		go func() {
+			if err := dh.handlePacketIndication(ctx, pktInd); err != nil {
+				NewErrAdapter("handle-indication-error", log.Fields{"type": "packet"}, err).Log()
+			}
+		}()
+	case *oop.Indication_PortStats:
+		portStats := indication.GetPortStats()
+		go dh.portStats.PortStatisticsIndication(portStats, dh.resourceMgr.DevInfo.GetPonPorts())
+	case *oop.Indication_FlowStats:
+		flowStats := indication.GetFlowStats()
+		log.Infow("Received flow stats", log.Fields{"FlowStats": flowStats})
+	case *oop.Indication_AlarmInd:
+		alarmInd := indication.GetAlarmInd()
+		log.Infow("Received alarm indication ", log.Fields{"AlarmInd": alarmInd})
+		go dh.eventMgr.ProcessEvents(alarmInd, dh.deviceID, raisedTs)
+	}
+}
+
+// doStateUp handle the olt up indication and update to voltha core
+func (dh *DeviceHandler) doStateUp(ctx context.Context) error {
+	// Synchronous call to update device state - this method is run in its own go routine
+	if err := dh.coreProxy.DeviceStateUpdate(ctx, dh.device.Id, voltha.ConnectStatus_REACHABLE,
+		voltha.OperStatus_ACTIVE); err != nil {
+		return NewErrAdapter("device-update-failed", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+	return nil
+}
+
+// doStateDown handle the olt down indication
+func (dh *DeviceHandler) doStateDown(ctx context.Context) error {
+	dh.lockDevice.Lock()
+	defer dh.lockDevice.Unlock()
+	log.Debug("do-state-down-start")
+
+	device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
+	if err != nil || device == nil {
+		/*TODO: needs to handle error scenarios */
+		return NewErrNotFound("device", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+
+	cloned := proto.Clone(device).(*voltha.Device)
+	// Update the all ports state on that device to disable
+	if err = dh.coreProxy.PortsStateUpdate(ctx, cloned.Id, voltha.OperStatus_UNKNOWN); err != nil {
+		return NewErrAdapter("port-update-failed", log.Fields{"device-id": device.Id}, err).Log()
+	}
+
+	//Update the device oper state and connection status
+	cloned.OperStatus = voltha.OperStatus_UNKNOWN
+	cloned.ConnectStatus = common.ConnectStatus_UNREACHABLE
+	dh.device = cloned
+
+	if err = dh.coreProxy.DeviceStateUpdate(ctx, cloned.Id, cloned.ConnectStatus, cloned.OperStatus); err != nil {
+		return NewErrAdapter("state-update-failed", log.Fields{"device-id": device.Id}, err).Log()
+	}
+
+	//get the child device for the parent device
+	onuDevices, err := dh.coreProxy.GetChildDevices(ctx, dh.device.Id)
+	if err != nil {
+		return NewErrAdapter("child-device-fetch-failed", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+	for _, onuDevice := range onuDevices.Items {
+
+		// Update onu state as down in onu adapter
+		onuInd := oop.OnuIndication{}
+		onuInd.OperState = "down"
+		err := dh.AdapterProxy.SendInterAdapterMessage(ctx, &onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
+			"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
+		if err != nil {
+			NewErrCommunication("inter-adapter-send-failed", log.Fields{
+				"source":        "openolt",
+				"onu-indicator": onuInd,
+				"device-type":   onuDevice.Type,
+				"device-id":     onuDevice.Id}, err).LogAt(log.ErrorLevel)
+			//Do not return here and continue to process other ONUs
+		}
+	}
+	/* Discovered ONUs entries need to be cleared , since after OLT
+	   is up, it starts sending discovery indications again*/
+	dh.discOnus = sync.Map{}
+	log.Debugw("do-state-down-end", log.Fields{"device-id": device.Id})
+	return nil
+}
+
+// doStateInit dial the grpc before going to init state
+func (dh *DeviceHandler) doStateInit(ctx context.Context) error {
+	var err error
+	if dh.clientCon, err = grpc.Dial(dh.device.GetHostAndPort(), grpc.WithInsecure(), grpc.WithBlock()); err != nil {
+		return NewErrCommunication("dial-failure", log.Fields{
+			"device-id":     dh.deviceID,
+			"host-and-port": dh.device.GetHostAndPort()}, err).Log()
+	}
+	return nil
+}
+
+// postInit create olt client instance to invoke RPC on the olt device
+func (dh *DeviceHandler) postInit(ctx context.Context) error {
+	dh.Client = oop.NewOpenoltClient(dh.clientCon)
+	dh.transitionMap.Handle(ctx, GrpcConnected)
+	return nil
+}
+
+// doStateConnected get the device info and update to voltha core
+func (dh *DeviceHandler) doStateConnected(ctx context.Context) error {
+	log.Debug("OLT device has been connected")
+
+	// Case where OLT is disabled and then rebooted.
+	if dh.adminState == "down" {
+		log.Debugln("do-state-connected--device-admin-state-down")
+		device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
+		if err != nil || device == nil {
+			/*TODO: needs to handle error scenarios */
+			NewErrAdapter("device-fetch-failed", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
+		}
+
+		cloned := proto.Clone(device).(*voltha.Device)
+		cloned.ConnectStatus = voltha.ConnectStatus_REACHABLE
+		cloned.OperStatus = voltha.OperStatus_UNKNOWN
+		dh.device = cloned
+		if er := dh.coreProxy.DeviceStateUpdate(ctx, cloned.Id, cloned.ConnectStatus, cloned.OperStatus); er != nil {
+			NewErrAdapter("device-state-update-failed", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
+		}
+
+		// Since the device was disabled before the OLT was rebooted, enforce the OLT to be Disabled after re-connection.
+		_, err = dh.Client.DisableOlt(ctx, new(oop.Empty))
+		if err != nil {
+			NewErrAdapter("olt-disable-failed", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
+		}
+
+		// Start reading indications
+		go func() {
+			if err := dh.readIndications(ctx); err != nil {
+				NewErrAdapter("indication-read-failure", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
+			}
+		}()
+		return nil
+	}
+
+	deviceInfo, err := dh.populateDeviceInfo()
+	if err != nil {
+		return NewErrAdapter("populate-device-info-failed", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+
+	device, err := dh.coreProxy.GetDevice(context.TODO(), dh.device.Id, dh.device.Id)
+	if err != nil || device == nil {
+		/*TODO: needs to handle error scenarios */
+		return NewErrAdapter("fetch-device-failed", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+	dh.populateActivePorts(device)
+	if err := dh.disableAdminDownPorts(device); err != nil {
+		return NewErrAdapter("port-status-update-failed", log.Fields{"device": device}, err).Log()
+	}
+
+	KVStoreHostPort := fmt.Sprintf("%s:%d", dh.openOLT.KVStoreHost, dh.openOLT.KVStorePort)
+	// Instantiate resource manager
+	if dh.resourceMgr = rsrcMgr.NewResourceMgr(ctx, dh.deviceID, KVStoreHostPort, dh.openOLT.KVStoreType, dh.deviceType, deviceInfo); dh.resourceMgr == nil {
+		return ErrResourceManagerInstantiating.Log()
+	}
+	// Instantiate flow manager
+	if dh.flowMgr = NewFlowManager(ctx, dh, dh.resourceMgr); dh.flowMgr == nil {
+		return ErrResourceManagerInstantiating.Log()
+	}
+	/* TODO: Instantiate Alarm , stats , BW managers */
+	/* Instantiating Event Manager to handle Alarms and KPIs */
+	dh.eventMgr = NewEventMgr(dh.EventProxy, dh)
+	// Stats config for new device
+	dh.portStats = NewOpenOltStatsMgr(dh)
+
+	// Start reading indications
+	go func() {
+		if err := dh.readIndications(ctx); err != nil {
+			NewErrAdapter("read-indications-failure", log.Fields{"device-id": dh.device.Id}, err).Log()
+		}
+	}()
+	return nil
+}
+
+func (dh *DeviceHandler) populateDeviceInfo() (*oop.DeviceInfo, error) {
+	var err error
+	var deviceInfo *oop.DeviceInfo
+
+	deviceInfo, err = dh.Client.GetDeviceInfo(context.Background(), new(oop.Empty))
+
+	if err != nil {
+		return nil, NewErrPersistence("get", "device", 0, nil, err).Log()
+	}
+	if deviceInfo == nil {
+		return nil, NewErrInvalidValue(log.Fields{"device": nil}, nil).Log()
+	}
+
+	log.Debugw("Fetched device info", log.Fields{"deviceInfo": deviceInfo})
+	dh.device.Root = true
+	dh.device.Vendor = deviceInfo.Vendor
+	dh.device.Model = deviceInfo.Model
+	dh.device.SerialNumber = deviceInfo.DeviceSerialNumber
+	dh.device.HardwareVersion = deviceInfo.HardwareVersion
+	dh.device.FirmwareVersion = deviceInfo.FirmwareVersion
+
+	if deviceInfo.DeviceId == "" {
+		log.Warnw("no-device-id-provided-using-host", log.Fields{"hostport": dh.device.GetHostAndPort()})
+		host := strings.Split(dh.device.GetHostAndPort(), ":")[0]
+		genmac, err := generateMacFromHost(host)
+		if err != nil {
+			return nil, NewErrAdapter("failed-to-generate-mac-host", log.Fields{"host": host}, err).Log()
+		}
+		log.Debugw("using-host-for-mac-address", log.Fields{"host": host, "mac": genmac})
+		dh.device.MacAddress = genmac
+	} else {
+		dh.device.MacAddress = deviceInfo.DeviceId
+	}
+
+	// Synchronous call to update device - this method is run in its own go routine
+	if err := dh.coreProxy.DeviceUpdate(context.TODO(), dh.device); err != nil {
+		return nil, NewErrAdapter("device-update-failed", log.Fields{"device-id": dh.device.Id}, err).Log()
+	}
+
+	return deviceInfo, nil
+}
+
+func startCollector(dh *DeviceHandler) {
+	// Initial delay for OLT initialization
+	time.Sleep(1 * time.Minute)
+	log.Debugf("Starting-Collector")
+	context := make(map[string]string)
+	for {
+		select {
+		case <-dh.stopCollector:
+			log.Debugw("Stopping-Collector-for-OLT", log.Fields{"deviceID:": dh.deviceID})
+			return
+		default:
+			freq := dh.metrics.ToPmConfigs().DefaultFreq
+			time.Sleep(time.Duration(freq) * time.Second)
+			context["oltid"] = dh.deviceID
+			context["devicetype"] = dh.deviceType
+			// NNI Stats
+			cmnni := dh.portStats.collectNNIMetrics(uint32(0))
+			log.Debugf("Collect-NNI-Metrics %v", cmnni)
+			go dh.portStats.publishMetrics("NNIStats", cmnni, uint32(0), context, dh.deviceID)
+			log.Debugf("Publish-NNI-Metrics")
+			// PON Stats
+			NumPonPORTS := dh.resourceMgr.DevInfo.GetPonPorts()
+			for i := uint32(0); i < NumPonPORTS; i++ {
+				if val, ok := dh.activePorts.Load(i); ok && val == true {
+					cmpon := dh.portStats.collectPONMetrics(i)
+					log.Debugf("Collect-PON-Metrics %v", cmpon)
+
+					go dh.portStats.publishMetrics("PONStats", cmpon, i, context, dh.deviceID)
+					log.Debugf("Publish-PON-Metrics")
+				}
+			}
+		}
+	}
+}
+
+//AdoptDevice adopts the OLT device
+func (dh *DeviceHandler) AdoptDevice(ctx context.Context, device *voltha.Device) {
+	dh.transitionMap = NewTransitionMap(dh)
+	log.Infow("Adopt_device", log.Fields{"deviceID": device.Id, "Address": device.GetHostAndPort()})
+	dh.transitionMap.Handle(ctx, DeviceInit)
+
+	// Now, set the initial PM configuration for that device
+	if err := dh.coreProxy.DevicePMConfigUpdate(nil, dh.metrics.ToPmConfigs()); err != nil {
+		NewErrAdapter("error-updating-performance-metrics", log.Fields{"device-id": device.Id}, err).LogAt(log.ErrorLevel)
+	}
+
+	go startCollector(dh)
+	go startHeartbeatCheck(dh)
+}
+
+//GetOfpDeviceInfo Gets the Ofp information of the given device
+func (dh *DeviceHandler) GetOfpDeviceInfo(device *voltha.Device) (*ic.SwitchCapability, error) {
+	return &ic.SwitchCapability{
+		Desc: &of.OfpDesc{
+			MfrDesc:   "VOLTHA Project",
+			HwDesc:    "open_pon",
+			SwDesc:    "open_pon",
+			SerialNum: dh.device.SerialNumber,
+		},
+		SwitchFeatures: &of.OfpSwitchFeatures{
+			NBuffers: 256,
+			NTables:  2,
+			Capabilities: uint32(of.OfpCapabilities_OFPC_FLOW_STATS |
+				of.OfpCapabilities_OFPC_TABLE_STATS |
+				of.OfpCapabilities_OFPC_PORT_STATS |
+				of.OfpCapabilities_OFPC_GROUP_STATS),
+		},
+	}, nil
+}
+
+//GetOfpPortInfo Get Ofp port information
+func (dh *DeviceHandler) GetOfpPortInfo(device *voltha.Device, portNo int64) (*ic.PortCapability, error) {
+	capacity := uint32(of.OfpPortFeatures_OFPPF_1GB_FD | of.OfpPortFeatures_OFPPF_FIBER)
+	return &ic.PortCapability{
+		Port: &voltha.LogicalPort{
+			OfpPort: &of.OfpPort{
+				HwAddr:     macAddressToUint32Array(dh.device.MacAddress),
+				Config:     0,
+				State:      uint32(of.OfpPortState_OFPPS_LIVE),
+				Curr:       capacity,
+				Advertised: capacity,
+				Peer:       capacity,
+				CurrSpeed:  uint32(of.OfpPortFeatures_OFPPF_1GB_FD),
+				MaxSpeed:   uint32(of.OfpPortFeatures_OFPPF_1GB_FD),
+			},
+			DeviceId:     dh.device.Id,
+			DevicePortNo: uint32(portNo),
+		},
+	}, nil
+}
+
+func (dh *DeviceHandler) omciIndication(omciInd *oop.OmciIndication) error {
+	log.Debugw("omci indication", log.Fields{"intfID": omciInd.IntfId, "onuID": omciInd.OnuId})
+	var deviceType string
+	var deviceID string
+	var proxyDeviceID string
+
+	onuKey := dh.formOnuKey(omciInd.IntfId, omciInd.OnuId)
+
+	if onuInCache, ok := dh.onus.Load(onuKey); !ok {
+
+		log.Debugw("omci indication for a device not in cache.", log.Fields{"intfID": omciInd.IntfId, "onuID": omciInd.OnuId})
+		ponPort := IntfIDToPortNo(omciInd.GetIntfId(), voltha.Port_PON_OLT)
+		kwargs := make(map[string]interface{})
+		kwargs["onu_id"] = omciInd.OnuId
+		kwargs["parent_port_no"] = ponPort
+
+		onuDevice, err := dh.coreProxy.GetChildDevice(context.TODO(), dh.device.Id, kwargs)
+		if err != nil {
+			return NewErrNotFound("onu", log.Fields{
+				"interface-id": omciInd.IntfId,
+				"onu-id":       omciInd.OnuId}, err).Log()
+		}
+		deviceType = onuDevice.Type
+		deviceID = onuDevice.Id
+		proxyDeviceID = onuDevice.ProxyAddress.DeviceId
+		//if not exist in cache, then add to cache.
+		dh.onus.Store(onuKey, NewOnuDevice(deviceID, deviceType, onuDevice.SerialNumber, omciInd.OnuId, omciInd.IntfId, proxyDeviceID))
+	} else {
+		//found in cache
+		log.Debugw("omci indication for a device in cache.", log.Fields{"intfID": omciInd.IntfId, "onuID": omciInd.OnuId})
+		deviceType = onuInCache.(*OnuDevice).deviceType
+		deviceID = onuInCache.(*OnuDevice).deviceID
+		proxyDeviceID = onuInCache.(*OnuDevice).proxyDeviceID
+	}
+
+	omciMsg := &ic.InterAdapterOmciMessage{Message: omciInd.Pkt}
+	if err := dh.AdapterProxy.SendInterAdapterMessage(context.Background(), omciMsg,
+		ic.InterAdapterMessageType_OMCI_REQUEST, dh.deviceType, deviceType,
+		deviceID, proxyDeviceID, ""); err != nil {
+		return NewErrCommunication("omci-request", log.Fields{
+			"source":          dh.deviceType,
+			"destination":     deviceType,
+			"onu-id":          deviceID,
+			"proxy-device-id": proxyDeviceID}, err).Log()
+	}
+	return nil
+}
+
+//ProcessInterAdapterMessage sends the proxied messages to the target device
+// If the proxy address is not found in the unmarshalled message, it first fetches the onu device for which the message
+// is meant, and then send the unmarshalled omci message to this onu
+func (dh *DeviceHandler) ProcessInterAdapterMessage(msg *ic.InterAdapterMessage) error {
+	log.Debugw("Process_inter_adapter_message", log.Fields{"msgID": msg.Header.Id})
+	if msg.Header.Type == ic.InterAdapterMessageType_OMCI_REQUEST {
+		msgID := msg.Header.Id
+		fromTopic := msg.Header.FromTopic
+		toTopic := msg.Header.ToTopic
+		toDeviceID := msg.Header.ToDeviceId
+		proxyDeviceID := msg.Header.ProxyDeviceId
+
+		log.Debugw("omci request message header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+
+		msgBody := msg.GetBody()
+
+		omciMsg := &ic.InterAdapterOmciMessage{}
+		if err := ptypes.UnmarshalAny(msgBody, omciMsg); err != nil {
+			log.Warnw("cannot-unmarshal-omci-msg-body", log.Fields{"error": err})
+			return err
+		}
+
+		if omciMsg.GetProxyAddress() == nil {
+			onuDevice, err := dh.coreProxy.GetDevice(context.TODO(), dh.device.Id, toDeviceID)
+			if err != nil {
+				return NewErrNotFound("onu", log.Fields{
+					"device-id":     dh.device.Id,
+					"onu-device-id": toDeviceID}, err).Log()
+			}
+			log.Debugw("device retrieved from core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+			if err := dh.sendProxiedMessage(onuDevice, omciMsg); err != nil {
+				return NewErrCommunication("send-failed", log.Fields{
+					"device-id":     dh.device.Id,
+					"onu-device-id": toDeviceID}, err).Log()
+			}
+		} else {
+			log.Debugw("Proxy Address found in omci message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+			if err := dh.sendProxiedMessage(nil, omciMsg); err != nil {
+				return NewErrCommunication("send-failed", log.Fields{
+					"device-id":     dh.device.Id,
+					"onu-device-id": toDeviceID}, err).Log()
+			}
+		}
+
+	} else {
+		return NewErrInvalidValue(log.Fields{"inter-adapter-message-type": msg.Header.Type}, nil).Log()
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) sendProxiedMessage(onuDevice *voltha.Device, omciMsg *ic.InterAdapterOmciMessage) error {
+	var intfID uint32
+	var onuID uint32
+	var connectStatus common.ConnectStatus_Types
+	if onuDevice != nil {
+		intfID = onuDevice.ProxyAddress.GetChannelId()
+		onuID = onuDevice.ProxyAddress.GetOnuId()
+		connectStatus = onuDevice.ConnectStatus
+	} else {
+		intfID = omciMsg.GetProxyAddress().GetChannelId()
+		onuID = omciMsg.GetProxyAddress().GetOnuId()
+		connectStatus = omciMsg.GetConnectStatus()
+	}
+	if connectStatus != voltha.ConnectStatus_REACHABLE {
+		log.Debugw("ONU is not reachable, cannot send OMCI", log.Fields{"intfID": intfID, "onuID": onuID})
+
+		return NewErrCommunication("unreachable", log.Fields{
+			"interface-id": intfID,
+			"onu-id":       onuID}, nil).Log()
+	}
+
+	// TODO: Once we are sure openonu/openomci is sending only binary in omciMsg.Message, we can remove this check
+	isHexString := false
+	_, decodeerr := hex.DecodeString(string(omciMsg.Message))
+	if decodeerr == nil {
+		isHexString = true
+	}
+
+	// TODO: OpenOLT Agent expects a hex string for OMCI packets rather than binary.  Fix this in the agent and then we can pass binary Pkt: omciMsg.Message.
+	var omciMessage *oop.OmciMsg
+	if isHexString {
+		omciMessage = &oop.OmciMsg{IntfId: intfID, OnuId: onuID, Pkt: omciMsg.Message}
+	} else {
+		hexPkt := make([]byte, hex.EncodedLen(len(omciMsg.Message)))
+		hex.Encode(hexPkt, omciMsg.Message)
+		omciMessage = &oop.OmciMsg{IntfId: intfID, OnuId: onuID, Pkt: hexPkt}
+	}
+
+	_, err := dh.Client.OmciMsgOut(context.Background(), omciMessage)
+	if err != nil {
+		return NewErrCommunication("omci-send-failed", log.Fields{
+			"interface-id": intfID,
+			"onu-id":       onuID,
+			"message":      omciMessage}, err).Log()
+	}
+	log.Debugw("Sent Omci message", log.Fields{"intfID": intfID, "onuID": onuID, "omciMsg": hex.EncodeToString(omciMsg.Message)})
+	return nil
+}
+
+func (dh *DeviceHandler) activateONU(ctx context.Context, intfID uint32, onuID int64, serialNum *oop.SerialNumber, serialNumber string) error {
+	log.Debugw("activate-onu", log.Fields{"intfID": intfID, "onuID": onuID, "serialNum": serialNum, "serialNumber": serialNumber})
+	dh.flowMgr.UpdateOnuInfo(ctx, intfID, uint32(onuID), serialNumber)
+	// TODO: need resource manager
+	var pir uint32 = 1000000
+	Onu := oop.Onu{IntfId: intfID, OnuId: uint32(onuID), SerialNumber: serialNum, Pir: pir}
+	if _, err := dh.Client.ActivateOnu(ctx, &Onu); err != nil {
+		st, _ := status.FromError(err)
+		if st.Code() == codes.AlreadyExists {
+			log.Debug("ONU activation is in progress", log.Fields{"SerialNumber": serialNumber})
+		} else {
+			return NewErrAdapter("onu-activate-failed", log.Fields{"onu": Onu}, err).Log()
+		}
+	} else {
+		log.Infow("activated-onu", log.Fields{"SerialNumber": serialNumber})
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) onuDiscIndication(ctx context.Context, onuDiscInd *oop.OnuDiscIndication, sn string) error {
+
+	channelID := onuDiscInd.GetIntfId()
+	parentPortNo := IntfIDToPortNo(onuDiscInd.GetIntfId(), voltha.Port_PON_OLT)
+
+	log.Infow("new-discovery-indication", log.Fields{"sn": sn})
+
+	kwargs := make(map[string]interface{})
+	if sn != "" {
+		kwargs["serial_number"] = sn
+	} else {
+		return NewErrInvalidValue(log.Fields{"serial-number": sn}, nil).Log()
+	}
+
+	if _, loaded := dh.discOnus.LoadOrStore(sn, true); loaded {
+		log.Warnw("onu-sn-is-already-being-processed", log.Fields{"sn": sn})
+		return nil
+	}
+
+	var onuID uint32
+
+	// check the ONU is already know to the OLT
+	// NOTE the second time the ONU is discovered this should return a device
+	onuDevice, err := dh.coreProxy.GetChildDevice(ctx, dh.device.Id, kwargs)
+
+	if err != nil {
+		log.Warnw("core-proxy-get-child-device-failed", log.Fields{"parentDevice": dh.device.Id, "err": err, "sn": sn})
+		if e, ok := status.FromError(err); ok {
+			log.Warnw("core-proxy-get-child-device-failed-with-code", log.Fields{"errCode": e.Code(), "sn": sn})
+			switch e.Code() {
+			case codes.Internal:
+				// this probably means NOT FOUND, so just create a new device
+				onuDevice = nil
+			case codes.DeadlineExceeded:
+				// if the call times out, cleanup and exit
+				dh.discOnus.Delete(sn)
+				return NewErrTimeout("get-child-device", log.Fields{"device-id": dh.device.Id}, err).Log()
+			}
+		}
+	}
+
+	if onuDevice == nil {
+		// NOTE this should happen a single time, and only if GetChildDevice returns NotFound
+		log.Infow("creating-new-onu", log.Fields{"sn": sn})
+		// we need to create a new ChildDevice
+		ponintfid := onuDiscInd.GetIntfId()
+		dh.lockDevice.Lock()
+		onuID, err = dh.resourceMgr.GetONUID(ctx, ponintfid)
+		dh.lockDevice.Unlock()
+
+		log.Infow("creating-new-onu-got-onu-id", log.Fields{"sn": sn, "onuId": onuID})
+
+		if err != nil {
+			// if we can't create an ID in resource manager,
+			// cleanup and exit
+			dh.discOnus.Delete(sn)
+			return NewErrAdapter("resource-manage-get-onu-id-failed", log.Fields{
+				"pon-interface-id": ponintfid,
+				"serial-number":    sn}, err).Log()
+		}
+
+		if onuDevice, err = dh.coreProxy.ChildDeviceDetected(context.TODO(), dh.device.Id, int(parentPortNo),
+			"", int(channelID), string(onuDiscInd.SerialNumber.GetVendorId()), sn, int64(onuID)); err != nil {
+			dh.discOnus.Delete(sn)
+			dh.resourceMgr.FreeonuID(ctx, ponintfid, []uint32{onuID}) // NOTE I'm not sure this method is actually cleaning up the right thing
+			return NewErrAdapter("core-proxy-child-device-detected-failed", log.Fields{
+				"pon-interface-id": ponintfid,
+				"serial-number":    sn}, err).Log()
+		}
+
+		log.Infow("onu-child-device-added", log.Fields{"onuDevice": onuDevice, "sn": sn})
+	}
+
+	// we can now use the existing ONU Id
+	onuID = onuDevice.ProxyAddress.OnuId
+
+	//Insert the ONU into cache to use in OnuIndication.
+	//TODO: Do we need to remove this from the cache on ONU change, or wait for overwritten on next discovery.
+	log.Debugw("onu-discovery-indication-key-create", log.Fields{"onuID": onuID,
+		"intfId": onuDiscInd.GetIntfId(), "sn": sn})
+	onuKey := dh.formOnuKey(onuDiscInd.GetIntfId(), onuID)
+
+	onuDev := NewOnuDevice(onuDevice.Id, onuDevice.Type, onuDevice.SerialNumber, onuID, onuDiscInd.GetIntfId(), onuDevice.ProxyAddress.DeviceId)
+	dh.onus.Store(onuKey, onuDev)
+	log.Debugw("new-onu-device-discovered", log.Fields{"onu": onuDev, "sn": sn})
+
+	if err = dh.coreProxy.DeviceStateUpdate(ctx, onuDevice.Id, common.ConnectStatus_REACHABLE, common.OperStatus_DISCOVERED); err != nil {
+		return NewErrAdapter("failed-to-update-device-state", log.Fields{
+			"device-id":     onuDevice.Id,
+			"serial-number": sn}, err).Log()
+	}
+	log.Infow("onu-discovered-reachable", log.Fields{"deviceId": onuDevice.Id, "sn": sn})
+	if err = dh.activateONU(ctx, onuDiscInd.IntfId, int64(onuID), onuDiscInd.SerialNumber, sn); err != nil {
+		return NewErrAdapter("onu-activation-failed", log.Fields{
+			"device-id":     onuDevice.Id,
+			"serial-number": sn}, err).Log()
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) onuIndication(onuInd *oop.OnuIndication) error {
+	serialNumber := dh.stringifySerialNumber(onuInd.SerialNumber)
+
+	kwargs := make(map[string]interface{})
+	ponPort := IntfIDToPortNo(onuInd.GetIntfId(), voltha.Port_PON_OLT)
+	var onuDevice *voltha.Device
+	var err error
+	foundInCache := false
+	log.Debugw("ONU indication key create", log.Fields{"onuId": onuInd.OnuId,
+		"intfId": onuInd.GetIntfId()})
+	onuKey := dh.formOnuKey(onuInd.GetIntfId(), onuInd.OnuId)
+
+	errFields := log.Fields{"device-id": dh.device.Id}
+
+	if onuInCache, ok := dh.onus.Load(onuKey); ok {
+
+		//If ONU id is discovered before then use GetDevice to get onuDevice because it is cheaper.
+		foundInCache = true
+		errFields["onu-id"] = onuInCache.(*OnuDevice).deviceID
+		onuDevice, err = dh.coreProxy.GetDevice(nil, dh.device.Id, onuInCache.(*OnuDevice).deviceID)
+	} else {
+		//If ONU not found in adapter cache then we have to use GetChildDevice to get onuDevice
+		if serialNumber != "" {
+			kwargs["serial_number"] = serialNumber
+			errFields["serial-number"] = serialNumber
+		} else {
+			kwargs["onu_id"] = onuInd.OnuId
+			kwargs["parent_port_no"] = ponPort
+			errFields["onu-id"] = onuInd.OnuId
+			errFields["parent-port-no"] = ponPort
+		}
+		onuDevice, err = dh.coreProxy.GetChildDevice(context.TODO(), dh.device.Id, kwargs)
+	}
+
+	if err != nil || onuDevice == nil {
+		return NewErrNotFound("onu-device", errFields, err).Log()
+	}
+
+	if onuDevice.ParentPortNo != ponPort {
+		log.Warnw("ONU-is-on-a-different-intf-id-now", log.Fields{
+			"previousIntfId": onuDevice.ParentPortNo,
+			"currentIntfId":  ponPort})
+	}
+
+	if onuDevice.ProxyAddress.OnuId != onuInd.OnuId {
+		log.Warnw("ONU-id-mismatch, can happen if both voltha and the olt rebooted", log.Fields{
+			"expected_onu_id": onuDevice.ProxyAddress.OnuId,
+			"received_onu_id": onuInd.OnuId})
+	}
+	if !foundInCache {
+		onuKey := dh.formOnuKey(onuInd.GetIntfId(), onuInd.GetOnuId())
+
+		dh.onus.Store(onuKey, NewOnuDevice(onuDevice.Id, onuDevice.Type, onuDevice.SerialNumber, onuInd.GetOnuId(), onuInd.GetIntfId(), onuDevice.ProxyAddress.DeviceId))
+
+	}
+	if err := dh.updateOnuStates(onuDevice, onuInd); err != nil {
+		return NewErrCommunication("state-update-failed", errFields, err).Log()
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) updateOnuStates(onuDevice *voltha.Device, onuInd *oop.OnuIndication) error {
+	ctx := context.TODO()
+	log.Debugw("onu-indication-for-state", log.Fields{"onuIndication": onuInd, "DeviceId": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+	if onuInd.AdminState == "down" {
+		// Tests have shown that we sometimes get OperState as NOT down even if AdminState is down, forcing it
+		if onuInd.OperState != "down" {
+			log.Warnw("ONU-admin-state-down", log.Fields{"operState": onuInd.OperState})
+			onuInd.OperState = "down"
+		}
+	}
+
+	switch onuInd.OperState {
+	case "down":
+		log.Debugw("sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "DeviceId": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+		// TODO NEW CORE do not hardcode adapter name. Handler needs Adapter reference
+		err := dh.AdapterProxy.SendInterAdapterMessage(ctx, onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
+			"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
+		if err != nil {
+			return NewErrCommunication("inter-adapter-send-failed", log.Fields{
+				"onu-indicator": onuInd,
+				"source":        "openolt",
+				"device-type":   onuDevice.Type,
+				"device-id":     onuDevice.Id}, err).Log()
+		}
+	case "up":
+		log.Debugw("sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "DeviceId": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+		// TODO NEW CORE do not hardcode adapter name. Handler needs Adapter reference
+		err := dh.AdapterProxy.SendInterAdapterMessage(ctx, onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
+			"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
+		if err != nil {
+			return NewErrCommunication("inter-adapter-send-failed", log.Fields{
+				"onu-indicator": onuInd,
+				"source":        "openolt",
+				"device-type":   onuDevice.Type,
+				"device-id":     onuDevice.Id}, err).Log()
+		}
+	default:
+		return NewErrInvalidValue(log.Fields{"oper-state": onuInd.OperState}, nil).Log()
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) stringifySerialNumber(serialNum *oop.SerialNumber) string {
+	if serialNum != nil {
+		return string(serialNum.VendorId) + dh.stringifyVendorSpecific(serialNum.VendorSpecific)
+	}
+	return ""
+}
+func (dh *DeviceHandler) deStringifySerialNumber(serialNum string) (*oop.SerialNumber, error) {
+	decodedStr, err := hex.DecodeString(serialNum[4:])
+	if err != nil {
+		return nil, err
+	}
+	return &oop.SerialNumber{
+		VendorId:       []byte(serialNum[:4]),
+		VendorSpecific: []byte(decodedStr),
+	}, nil
+}
+
+func (dh *DeviceHandler) stringifyVendorSpecific(vendorSpecific []byte) string {
+	tmp := fmt.Sprintf("%x", (uint32(vendorSpecific[0])>>4)&0x0f) +
+		fmt.Sprintf("%x", uint32(vendorSpecific[0]&0x0f)) +
+		fmt.Sprintf("%x", (uint32(vendorSpecific[1])>>4)&0x0f) +
+		fmt.Sprintf("%x", (uint32(vendorSpecific[1]))&0x0f) +
+		fmt.Sprintf("%x", (uint32(vendorSpecific[2])>>4)&0x0f) +
+		fmt.Sprintf("%x", (uint32(vendorSpecific[2]))&0x0f) +
+		fmt.Sprintf("%x", (uint32(vendorSpecific[3])>>4)&0x0f) +
+		fmt.Sprintf("%x", (uint32(vendorSpecific[3]))&0x0f)
+	return tmp
+}
+
+//UpdateFlowsBulk upates the bulk flow
+func (dh *DeviceHandler) UpdateFlowsBulk() error {
+	return ErrNotImplemented
+}
+
+//GetChildDevice returns the child device for given parent port and onu id
+func (dh *DeviceHandler) GetChildDevice(parentPort, onuID uint32) (*voltha.Device, error) {
+	log.Debugw("GetChildDevice", log.Fields{"pon port": parentPort, "onuID": onuID})
+	kwargs := make(map[string]interface{})
+	kwargs["onu_id"] = onuID
+	kwargs["parent_port_no"] = parentPort
+	onuDevice, err := dh.coreProxy.GetChildDevice(context.TODO(), dh.device.Id, kwargs)
+	if err != nil {
+		return nil, NewErrNotFound("onu", log.Fields{
+			"interface-id": parentPort,
+			"onu-id":       onuID}, err).Log()
+	}
+	log.Debugw("Successfully received child device from core", log.Fields{"child_device": *onuDevice})
+	return onuDevice, nil
+}
+
+// SendPacketInToCore sends packet-in to core
+// For this, it calls SendPacketIn of the core-proxy which uses a device specific topic to send the request.
+// The adapter handling the device creates a device specific topic
+func (dh *DeviceHandler) SendPacketInToCore(logicalPort uint32, packetPayload []byte) error {
+	log.Debugw("send-packet-in-to-core", log.Fields{
+		"port":   logicalPort,
+		"packet": hex.EncodeToString(packetPayload),
+	})
+	if err := dh.coreProxy.SendPacketIn(context.TODO(), dh.device.Id, logicalPort, packetPayload); err != nil {
+		return NewErrCommunication("packet-send-failed", log.Fields{
+			"source":       "adapter",
+			"destination":  "core",
+			"device-id":    dh.device.Id,
+			"logical-port": logicalPort,
+			"packet":       hex.EncodeToString(packetPayload)}, err).Log()
+	}
+	log.Debugw("Sent packet-in to core successfully", log.Fields{
+		"packet": hex.EncodeToString(packetPayload),
+	})
+	return nil
+}
+
+// AddUniPortToOnu adds the uni port to the onu device
+func (dh *DeviceHandler) AddUniPortToOnu(intfID, onuID, uniPort uint32) {
+	onuKey := dh.formOnuKey(intfID, onuID)
+
+	if onuDevice, ok := dh.onus.Load(onuKey); ok {
+		// add it to the uniPort map for the onu device
+		if _, ok = onuDevice.(*OnuDevice).uniPorts[uniPort]; !ok {
+			onuDevice.(*OnuDevice).uniPorts[uniPort] = struct{}{}
+			log.Debugw("adding-uni-port", log.Fields{"port": uniPort, "intfID": intfID, "onuId": onuID})
+		}
+	}
+}
+
+//UpdateFlowsIncrementally updates the device flow
+func (dh *DeviceHandler) UpdateFlowsIncrementally(ctx context.Context, device *voltha.Device, flows *of.FlowChanges, groups *of.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
+	log.Debugw("Received-incremental-flowupdate-in-device-handler", log.Fields{"deviceID": device.Id, "flows": flows, "groups": groups, "flowMetadata": flowMetadata})
+	if flows != nil {
+		for _, flow := range flows.ToRemove.Items {
+			log.Debug("Removing flow", log.Fields{"deviceId": device.Id, "flowToRemove": flow})
+			dh.flowMgr.RemoveFlow(ctx, flow)
+		}
+
+		for _, flow := range flows.ToAdd.Items {
+			log.Debug("Adding flow", log.Fields{"deviceId": device.Id, "flowToAdd": flow})
+			dh.flowMgr.AddFlow(ctx, flow, flowMetadata)
+		}
+	}
+	if groups != nil && flows != nil {
+		for _, flow := range flows.ToRemove.Items {
+			log.Debug("Removing flow", log.Fields{"deviceID": device.Id, "flowToRemove": flow})
+			//  dh.flowMgr.RemoveFlow(flow)
+		}
+	}
+
+	if groups != nil {
+		for _, group := range groups.ToAdd.Items {
+			dh.flowMgr.AddGroup(ctx, group)
+		}
+		for _, group := range groups.ToUpdate.Items {
+			dh.flowMgr.ModifyGroup(ctx, group)
+		}
+		if len(groups.ToRemove.Items) != 0 {
+			log.Debug("Group delete operation is not supported for now")
+		}
+	}
+	log.Debug("UpdateFlowsIncrementally done successfully")
+	return nil
+}
+
+//DisableDevice disables the given device
+//It marks the following for the given device:
+//Device-Handler Admin-State : down
+//Device Port-State: UNKNOWN
+//Device Oper-State: UNKNOWN
+func (dh *DeviceHandler) DisableDevice(device *voltha.Device) error {
+	/* On device disable ,admin state update has to be done prior sending request to agent since
+	   the indication thread may processes invalid  indications of ONU and OLT*/
+	dh.lockDevice.Lock()
+	dh.adminState = "down"
+	dh.lockDevice.Unlock()
+	if dh.Client != nil {
+		if _, err := dh.Client.DisableOlt(context.Background(), new(oop.Empty)); err != nil {
+			if e, ok := status.FromError(err); ok && e.Code() == codes.Internal {
+				dh.lockDevice.Lock()
+				dh.adminState = "up"
+				dh.lockDevice.Unlock()
+				return NewErrAdapter("olt-disable-failed", log.Fields{"device-id": device.Id}, err).Log()
+			}
+		}
+	}
+	log.Debugw("olt-disabled", log.Fields{"deviceID": device.Id})
+	/* Discovered ONUs entries need to be cleared , since on device disable the child devices goes to
+	UNREACHABLE state which needs to be configured again*/
+
+	dh.discOnus = sync.Map{}
+	dh.onus = sync.Map{}
+
+	go dh.notifyChildDevices("unreachable")
+	cloned := proto.Clone(device).(*voltha.Device)
+	// Update the all pon ports state on that device to disable and NNI remains active as NNI remains active in openolt agent.
+	for _, port := range cloned.Ports {
+		if port.GetType() == voltha.Port_PON_OLT {
+			if err := dh.coreProxy.PortStateUpdate(context.TODO(), cloned.Id,
+				voltha.Port_PON_OLT, port.GetPortNo(), voltha.OperStatus_UNKNOWN); err != nil {
+				return err
+			}
+		}
+	}
+
+	log.Debugw("disable-device-end", log.Fields{"deviceID": device.Id})
+	return nil
+}
+
+func (dh *DeviceHandler) notifyChildDevices(state string) {
+
+	// Update onu state as unreachable in onu adapter
+	onuInd := oop.OnuIndication{}
+	onuInd.OperState = state
+	//get the child device for the parent device
+	onuDevices, err := dh.coreProxy.GetChildDevices(context.TODO(), dh.device.Id)
+	if err != nil {
+		log.Errorw("failed-to-get-child-devices-information", log.Fields{"deviceID": dh.device.Id, "error": err})
+	}
+	if onuDevices != nil {
+		for _, onuDevice := range onuDevices.Items {
+			err := dh.AdapterProxy.SendInterAdapterMessage(context.TODO(), &onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
+				"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
+			if err != nil {
+				log.Errorw("failed-to-send-inter-adapter-message", log.Fields{"OnuInd": onuInd,
+					"From Adapter": "openolt", "DeviceType": onuDevice.Type, "DeviceID": onuDevice.Id})
+			}
+
+		}
+	}
+
+}
+
+//ReenableDevice re-enables the olt device after disable
+//It marks the following for the given device:
+//Device-Handler Admin-State : up
+//Device Port-State: ACTIVE
+//Device Oper-State: ACTIVE
+func (dh *DeviceHandler) ReenableDevice(device *voltha.Device) error {
+	dh.lockDevice.Lock()
+	dh.adminState = "up"
+	dh.lockDevice.Unlock()
+
+	if _, err := dh.Client.ReenableOlt(context.Background(), new(oop.Empty)); err != nil {
+		if e, ok := status.FromError(err); ok && e.Code() == codes.Internal {
+			dh.lockDevice.Lock()
+			dh.adminState = "down"
+			dh.lockDevice.Unlock()
+			return NewErrAdapter("olt-reenable-failed", log.Fields{"device-id": dh.device.Id}, err).Log()
+		}
+	}
+	log.Debug("olt-reenabled")
+
+	cloned := proto.Clone(device).(*voltha.Device)
+	// Update the all ports state on that device to enable
+
+	if err := dh.disableAdminDownPorts(device); err != nil {
+		return NewErrAdapter("port-status-update-failed-after-olt-reenable", log.Fields{"device": device}, err).Log()
+	}
+	//Update the device oper status as ACTIVE
+	cloned.OperStatus = voltha.OperStatus_ACTIVE
+	dh.device = cloned
+
+	if err := dh.coreProxy.DeviceStateUpdate(context.TODO(), cloned.Id, cloned.ConnectStatus, cloned.OperStatus); err != nil {
+		return NewErrAdapter("state-update-failed", log.Fields{
+			"device-id":      device.Id,
+			"connect-status": cloned.ConnectStatus,
+			"oper-status":    cloned.OperStatus}, err).Log()
+	}
+
+	log.Debugw("ReEnableDevice-end", log.Fields{"deviceID": device.Id})
+
+	return nil
+}
+
+func (dh *DeviceHandler) clearUNIData(ctx context.Context, onu *rsrcMgr.OnuGemInfo) error {
+	var uniID uint32
+	var err error
+	for _, port := range onu.UniPorts {
+		uniID = UniIDFromPortNum(uint32(port))
+		log.Debugw("clearing-resource-data-for-uni-port", log.Fields{"port": port, "uniID": uniID})
+		/* Delete tech-profile instance from the KV store */
+		if err = dh.flowMgr.DeleteTechProfileInstances(ctx, onu.IntfID, onu.OnuID, uniID, onu.SerialNumber); err != nil {
+			log.Debugw("Failed-to-remove-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
+		}
+		log.Debugw("Deleted-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
+		flowIDs := dh.resourceMgr.GetCurrentFlowIDsForOnu(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID))
+		for _, flowID := range flowIDs {
+			dh.resourceMgr.FreeFlowID(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID), flowID)
+		}
+		tpIDList := dh.resourceMgr.GetTechProfileIDForOnu(ctx, onu.IntfID, onu.OnuID, uniID)
+		for _, tpID := range tpIDList {
+			if err = dh.resourceMgr.RemoveMeterIDForOnu(ctx, "upstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
+				log.Debugw("Failed-to-remove-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
+			}
+			log.Debugw("Removed-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
+			if err = dh.resourceMgr.RemoveMeterIDForOnu(ctx, "downstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
+				log.Debugw("Failed-to-remove-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
+			}
+			log.Debugw("Removed-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
+		}
+		dh.resourceMgr.FreePONResourcesForONU(ctx, onu.IntfID, onu.OnuID, uniID)
+		if err = dh.resourceMgr.RemoveTechProfileIDsForOnu(ctx, onu.IntfID, onu.OnuID, uniID); err != nil {
+			log.Debugw("Failed-to-remove-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
+		}
+		log.Debugw("Removed-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
+		if err = dh.resourceMgr.DelGemPortPktIn(ctx, onu.IntfID, onu.OnuID, uint32(port)); err != nil {
+			log.Debugw("Failed-to-remove-gemport-pkt-in", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
+		}
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) clearNNIData(ctx context.Context) error {
+	nniUniID := -1
+	nniOnuID := -1
+
+	if dh.resourceMgr == nil {
+		return fmt.Errorf("no resource manager for deviceID %s", dh.deviceID)
+	}
+	//Free the flow-ids for the NNI port
+	nni, err := dh.resourceMgr.GetNNIFromKVStore(ctx)
+	if err != nil {
+		return NewErrPersistence("get", "nni", 0, nil, err).Log()
+	}
+	log.Debugw("NNI are ", log.Fields{"nni": nni})
+	for _, nniIntfID := range nni {
+		flowIDs := dh.resourceMgr.GetCurrentFlowIDsForOnu(ctx, uint32(nniIntfID), int32(nniOnuID), int32(nniUniID))
+		log.Debugw("Current flow ids for nni", log.Fields{"flow-ids": flowIDs})
+		for _, flowID := range flowIDs {
+			dh.resourceMgr.FreeFlowID(ctx, uint32(nniIntfID), -1, -1, uint32(flowID))
+		}
+		dh.resourceMgr.RemoveResourceMap(ctx, nniIntfID, int32(nniOnuID), int32(nniUniID))
+	}
+	if err = dh.resourceMgr.DelNNiFromKVStore(ctx); err != nil {
+		return NewErrPersistence("clear", "nni", 0, nil, err).Log()
+	}
+	return nil
+}
+
+// DeleteDevice deletes the device instance from openolt handler array.  Also clears allocated resource manager resources.  Also reboots the OLT hardware!
+func (dh *DeviceHandler) DeleteDevice(ctx context.Context, device *voltha.Device) error {
+	log.Debug("Function entry delete device")
+	dh.lockDevice.Lock()
+	if dh.adminState == "deleted" {
+		dh.lockDevice.Unlock()
+		return nil
+	}
+	dh.adminState = "deleted"
+	dh.lockDevice.Unlock()
+	/* Clear the KV store data associated with the all the UNI ports
+	   This clears up flow data and also resource map data for various
+	   other pon resources like alloc_id and gemport_id
+	*/
+	if dh.resourceMgr != nil {
+		noOfPonPorts := dh.resourceMgr.DevInfo.GetPonPorts()
+		var ponPort uint32
+		for ponPort = 0; ponPort < noOfPonPorts; ponPort++ {
+			var onuGemData []rsrcMgr.OnuGemInfo
+			err := dh.resourceMgr.ResourceMgrs[ponPort].GetOnuGemInfo(ctx, ponPort, &onuGemData)
+			if err != nil {
+				return NewErrNotFound("onu", log.Fields{
+					"device-id": dh.device.Id,
+					"pon-port":  ponPort}, err).Log()
+			}
+			for _, onu := range onuGemData {
+				onuID := make([]uint32, 1)
+				log.Debugw("onu data ", log.Fields{"onu": onu})
+				if err = dh.clearUNIData(ctx, &onu); err != nil {
+					log.Errorw("Failed to clear data for onu", log.Fields{"onu-device": onu})
+				}
+				// Clear flowids for gem cache.
+				for _, gem := range onu.GemPorts {
+					dh.resourceMgr.DeleteFlowIDsForGem(ctx, ponPort, gem)
+				}
+				onuID[0] = onu.OnuID
+				dh.resourceMgr.FreeonuID(ctx, ponPort, onuID)
+			}
+			dh.resourceMgr.DeleteIntfIDGempMapPath(ctx, ponPort)
+			onuGemData = nil
+			err = dh.resourceMgr.DelOnuGemInfoForIntf(ctx, ponPort)
+			if err != nil {
+				log.Errorw("Failed to update onugem info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
+			}
+		}
+		/* Clear the flows from KV store associated with NNI port.
+		   There are mostly trap rules from NNI port (like LLDP)
+		*/
+		if err := dh.clearNNIData(ctx); err != nil {
+			log.Errorw("Failed to clear data for NNI port", log.Fields{"device-id": dh.deviceID})
+		}
+
+		/* Clear the resource pool for each PON port in the background */
+		go dh.resourceMgr.Delete(ctx)
+	}
+
+	/*Delete ONU map for the device*/
+	dh.onus.Range(func(key interface{}, value interface{}) bool {
+		dh.onus.Delete(key)
+		return true
+	})
+
+	log.Debug("Removed-device-from-Resource-manager-KV-store")
+	// Stop the Stats collector
+	dh.stopCollector <- true
+	// stop the heartbeat check routine
+	dh.stopHeartbeatCheck <- true
+	//Reset the state
+	if dh.Client != nil {
+		if _, err := dh.Client.Reboot(ctx, new(oop.Empty)); err != nil {
+			return NewErrAdapter("olt-reboot-failed", log.Fields{"device-id": dh.deviceID}, err).Log()
+		}
+	}
+	cloned := proto.Clone(device).(*voltha.Device)
+	cloned.OperStatus = voltha.OperStatus_UNKNOWN
+	cloned.ConnectStatus = voltha.ConnectStatus_UNREACHABLE
+	if err := dh.coreProxy.DeviceStateUpdate(ctx, cloned.Id, cloned.ConnectStatus, cloned.OperStatus); err != nil {
+		return NewErrAdapter("device-state-update-failed", log.Fields{
+			"device-id":      device.Id,
+			"connect-status": cloned.ConnectStatus,
+			"oper-status":    cloned.OperStatus}, err).Log()
+	}
+	return nil
+}
+
+//RebootDevice reboots the given device
+func (dh *DeviceHandler) RebootDevice(device *voltha.Device) error {
+	if _, err := dh.Client.Reboot(context.Background(), new(oop.Empty)); err != nil {
+		return NewErrAdapter("olt-reboot-failed", log.Fields{"device-id": dh.deviceID}, err).Log()
+	}
+	log.Debugw("rebooted-device-successfully", log.Fields{"deviceID": device.Id})
+	return nil
+}
+
+func (dh *DeviceHandler) handlePacketIndication(ctx context.Context, packetIn *oop.PacketIndication) error {
+	log.Debugw("Received packet-in", log.Fields{
+		"packet-indication": *packetIn,
+		"packet":            hex.EncodeToString(packetIn.Pkt),
+	})
+	logicalPortNum, err := dh.flowMgr.GetLogicalPortFromPacketIn(ctx, packetIn)
+	if err != nil {
+		return NewErrNotFound("logical-port", log.Fields{"packet": hex.EncodeToString(packetIn.Pkt)}, err).Log()
+	}
+	log.Debugw("sending packet-in to core", log.Fields{
+		"logicalPortNum": logicalPortNum,
+		"packet":         hex.EncodeToString(packetIn.Pkt),
+	})
+	if err := dh.coreProxy.SendPacketIn(context.TODO(), dh.device.Id, logicalPortNum, packetIn.Pkt); err != nil {
+		return NewErrCommunication("send-packet-in", log.Fields{
+			"destination": "core",
+			"source":      dh.deviceType,
+			"packet":      hex.EncodeToString(packetIn.Pkt)}, err).Log()
+	}
+	log.Debugw("Success sending packet-in to core!", log.Fields{
+		"packet": hex.EncodeToString(packetIn.Pkt),
+	})
+	return nil
+}
+
+// PacketOut sends packet-out from VOLTHA to OLT on the egress port provided
+func (dh *DeviceHandler) PacketOut(ctx context.Context, egressPortNo int, packet *of.OfpPacketOut) error {
+	log.Debugw("incoming-packet-out", log.Fields{
+		"deviceID":       dh.deviceID,
+		"egress_port_no": egressPortNo,
+		"pkt-length":     len(packet.Data),
+		"packet":         hex.EncodeToString(packet.Data),
+	})
+
+	egressPortType := IntfIDToPortTypeName(uint32(egressPortNo))
+	if egressPortType == voltha.Port_ETHERNET_UNI {
+		outerEthType := (uint16(packet.Data[12]) << 8) | uint16(packet.Data[13])
+		innerEthType := (uint16(packet.Data[16]) << 8) | uint16(packet.Data[17])
+		if outerEthType == 0x8942 || outerEthType == 0x88cc {
+			// Do not packet-out lldp packets on uni port.
+			// ONOS has no clue about uni/nni ports, it just packets out on all
+			// available ports on the Logical Switch. It should not be interested
+			// in the UNI links.
+			log.Debug("dropping-lldp-packet-out-on-uni")
+			return nil
+		}
+		if outerEthType == 0x88a8 || outerEthType == 0x8100 {
+			if innerEthType == 0x8100 {
+				// q-in-q 802.1ad or 802.1q double tagged packet.
+				// slice out the outer tag.
+				packet.Data = append(packet.Data[:12], packet.Data[16:]...)
+				log.Debugw("packet-now-single-tagged", log.Fields{"packetData": hex.EncodeToString(packet.Data)})
+			}
+		}
+		intfID := IntfIDFromUniPortNum(uint32(egressPortNo))
+		onuID := OnuIDFromPortNum(uint32(egressPortNo))
+		uniID := UniIDFromPortNum(uint32(egressPortNo))
+
+		gemPortID, err := dh.flowMgr.GetPacketOutGemPortID(ctx, intfID, onuID, uint32(egressPortNo))
+		if err != nil {
+			// In this case the openolt agent will receive the gemPortID as 0.
+			// The agent tries to retrieve the gemPortID in this case.
+			// This may not always succeed at the agent and packetOut may fail.
+			log.Errorw("failed-to-retrieve-gemport-id-for-packet-out", log.Fields{
+				"packet": hex.EncodeToString(packet.Data),
+			})
+		}
+
+		onuPkt := oop.OnuPacket{IntfId: intfID, OnuId: onuID, PortNo: uint32(egressPortNo), GemportId: gemPortID, Pkt: packet.Data}
+
+		log.Debugw("sending-packet-to-onu", log.Fields{
+			"egress_port_no": egressPortNo,
+			"IntfId":         intfID,
+			"onuID":          onuID,
+			"uniID":          uniID,
+			"gemPortID":      gemPortID,
+			"packet":         hex.EncodeToString(packet.Data),
+		})
+
+		if _, err := dh.Client.OnuPacketOut(ctx, &onuPkt); err != nil {
+			return NewErrCommunication("packet-out-send", log.Fields{
+				"source":             "adapter",
+				"destination":        "onu",
+				"egress-port-number": egressPortNo,
+				"interface-id":       intfID,
+				"oni-id":             onuID,
+				"uni-id":             uniID,
+				"gem-port-id":        gemPortID,
+				"packet":             hex.EncodeToString(packet.Data)}, err).Log()
+		}
+	} else if egressPortType == voltha.Port_ETHERNET_NNI {
+		nniIntfID, err := IntfIDFromNniPortNum(uint32(egressPortNo))
+		if err != nil {
+			return NewErrInvalidValue(log.Fields{"egress-nni-port": egressPortNo}, err).Log()
+		}
+		uplinkPkt := oop.UplinkPacket{IntfId: nniIntfID, Pkt: packet.Data}
+
+		log.Debugw("sending-packet-to-nni", log.Fields{
+			"uplink_pkt": uplinkPkt,
+			"packet":     hex.EncodeToString(packet.Data),
+		})
+
+		if _, err := dh.Client.UplinkPacketOut(ctx, &uplinkPkt); err != nil {
+			return NewErrCommunication("packet-out-to-nni", log.Fields{"packet": hex.EncodeToString(packet.Data)}, err).Log()
+		}
+	} else {
+		log.Warnw("Packet-out-to-this-interface-type-not-implemented", log.Fields{
+			"egress_port_no": egressPortNo,
+			"egressPortType": egressPortType,
+			"packet":         hex.EncodeToString(packet.Data),
+		})
+	}
+	return nil
+}
+
+func (dh *DeviceHandler) formOnuKey(intfID, onuID uint32) string {
+	return "" + strconv.Itoa(int(intfID)) + "." + strconv.Itoa(int(onuID))
+}
+
+func startHeartbeatCheck(dh *DeviceHandler) {
+	// start the heartbeat check towards the OLT.
+	var timerCheck *time.Timer
+
+	for {
+		heartbeatTimer := time.NewTimer(dh.openOLT.HeartbeatCheckInterval)
+		select {
+		case <-heartbeatTimer.C:
+			ctx, cancel := context.WithTimeout(context.Background(), dh.openOLT.GrpcTimeoutInterval)
+			if heartBeat, err := dh.Client.HeartbeatCheck(ctx, new(oop.Empty)); err != nil {
+				log.Error("Hearbeat failed")
+				if timerCheck == nil {
+					// start a after func, when expired will update the state to the core
+					timerCheck = time.AfterFunc(dh.openOLT.HeartbeatFailReportInterval, dh.updateStateUnreachable)
+				}
+			} else {
+				if timerCheck != nil {
+					if timerCheck.Stop() {
+						log.Debug("We got hearbeat within the timeout")
+					} else {
+
+						log.Debug("We got hearbeat after the timeout expired, changing the states")
+						go dh.notifyChildDevices("up")
+						if err := dh.coreProxy.DeviceStateUpdate(ctx, dh.device.Id, voltha.ConnectStatus_REACHABLE,
+							voltha.OperStatus_ACTIVE); err != nil {
+							log.Errorw("Failed to update device state", log.Fields{"deviceID": dh.device.Id, "error": err})
+						}
+					}
+					timerCheck = nil
+				}
+				log.Debugw("Hearbeat", log.Fields{"signature": heartBeat})
+			}
+			cancel()
+		case <-dh.stopHeartbeatCheck:
+			log.Debug("Stopping heart beat check")
+			return
+		}
+	}
+}
+
+func (dh *DeviceHandler) updateStateUnreachable() {
+
+	go dh.notifyChildDevices("unreachable")
+	if err := dh.coreProxy.DeviceStateUpdate(context.TODO(), dh.device.Id, voltha.ConnectStatus_UNREACHABLE, voltha.OperStatus_UNKNOWN); err != nil {
+		NewErrAdapter("device-state-update-failed", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
+	}
+}
+
+// EnablePort to enable Pon interface
+func (dh *DeviceHandler) EnablePort(port *voltha.Port) error {
+	log.Debugw("enable-port", log.Fields{"Device": dh.device, "port": port})
+	return dh.modifyPhyPort(port, true)
+}
+
+// DisablePort to disable pon interface
+func (dh *DeviceHandler) DisablePort(port *voltha.Port) error {
+	log.Debugw("disable-port", log.Fields{"Device": dh.device, "port": port})
+	return dh.modifyPhyPort(port, false)
+}
+
+//modifyPhyPort is common function to enable and disable the port. parm :enablePort, true to enablePort and false to disablePort.
+func (dh *DeviceHandler) modifyPhyPort(port *voltha.Port, enablePort bool) error {
+	ctx := context.Background()
+	log.Infow("modifyPhyPort", log.Fields{"port": port, "Enable": enablePort})
+	if port.GetType() == voltha.Port_ETHERNET_NNI {
+		// Bug is opened for VOL-2505 to support NNI disable feature.
+		log.Infow("voltha-supports-single-nni-hence-disable-of-nni-not-allowed",
+			log.Fields{"Device": dh.device, "port": port})
+		return NewErrAdapter("illegal-port-request", log.Fields{
+			"port-type":    port.GetType,
+			"enable-state": enablePort}, nil).Log()
+	}
+	// fetch interfaceid from PortNo
+	ponID := PortNoToIntfID(port.GetPortNo(), voltha.Port_PON_OLT)
+	ponIntf := &oop.Interface{IntfId: ponID}
+	var operStatus voltha.OperStatus_Types
+	if enablePort {
+		operStatus = voltha.OperStatus_ACTIVE
+		out, err := dh.Client.EnablePonIf(ctx, ponIntf)
+
+		if err != nil {
+			return NewErrAdapter("pon-port-enable-failed", log.Fields{
+				"device-id": dh.device.Id,
+				"port":      port}, err).Log()
+		}
+		// updating interface local cache for collecting stats
+		dh.activePorts.Store(ponID, true)
+		log.Infow("enabled-pon-port", log.Fields{"out": out, "DeviceID": dh.device, "Port": port})
+	} else {
+		operStatus = voltha.OperStatus_UNKNOWN
+		out, err := dh.Client.DisablePonIf(ctx, ponIntf)
+		if err != nil {
+			return NewErrAdapter("pon-port-disable-failed", log.Fields{
+				"device-id": dh.device.Id,
+				"port":      port}, err).Log()
+		}
+		// updating interface local cache for collecting stats
+		dh.activePorts.Store(ponID, false)
+		log.Infow("disabled-pon-port", log.Fields{"out": out, "DeviceID": dh.device, "Port": port})
+	}
+	if err := dh.coreProxy.PortStateUpdate(ctx, dh.deviceID, voltha.Port_PON_OLT, port.PortNo, operStatus); err != nil {
+		return NewErrAdapter("port-state-update-failed", log.Fields{
+			"device-id": dh.deviceID,
+			"port":      port.PortNo}, err).Log()
+	}
+	return nil
+}
+
+//disableAdminDownPorts disables the ports, if the corresponding port Adminstate is disabled on reboot and Renable device.
+func (dh *DeviceHandler) disableAdminDownPorts(device *voltha.Device) error {
+	cloned := proto.Clone(device).(*voltha.Device)
+	// Disable the port and update the oper_port_status to core
+	// if the Admin state of the port is disabled on reboot and re-enable device.
+	for _, port := range cloned.Ports {
+		if port.AdminState == common.AdminState_DISABLED {
+			if err := dh.DisablePort(port); err != nil {
+				return NewErrAdapter("port-disable-failed", log.Fields{
+					"device-id": dh.deviceID,
+					"port":      port}, err).Log()
+			}
+		}
+	}
+	return nil
+}
+
+//populateActivePorts to populate activePorts map
+func (dh *DeviceHandler) populateActivePorts(device *voltha.Device) {
+	log.Info("populateActiveports", log.Fields{"Device": device})
+	for _, port := range device.Ports {
+		if port.Type == voltha.Port_ETHERNET_NNI {
+			if port.OperStatus == voltha.OperStatus_ACTIVE {
+				dh.activePorts.Store(PortNoToIntfID(port.PortNo, voltha.Port_ETHERNET_NNI), true)
+			} else {
+				dh.activePorts.Store(PortNoToIntfID(port.PortNo, voltha.Port_ETHERNET_NNI), false)
+			}
+		}
+		if port.Type == voltha.Port_PON_OLT {
+			if port.OperStatus == voltha.OperStatus_ACTIVE {
+				dh.activePorts.Store(PortNoToIntfID(port.PortNo, voltha.Port_PON_OLT), true)
+			} else {
+				dh.activePorts.Store(PortNoToIntfID(port.PortNo, voltha.Port_PON_OLT), false)
+			}
+		}
+	}
+}
+
+// ChildDeviceLost deletes ONU and clears pon resources related to it.
+func (dh *DeviceHandler) ChildDeviceLost(ctx context.Context, pPortNo uint32, onuID uint32) error {
+	log.Debugw("child-device-lost", log.Fields{"pdeviceID": dh.device.Id})
+	IntfID := PortNoToIntfID(pPortNo, voltha.Port_PON_OLT)
+	onuKey := dh.formOnuKey(IntfID, onuID)
+	onuDevice, ok := dh.onus.Load(onuKey)
+	if !ok {
+		return NewErrAdapter("failed-to-load-onu-details",
+			log.Fields{
+				"device-id":    dh.deviceID,
+				"onu-id":       onuID,
+				"interface-id": IntfID}, nil).Log()
+	}
+	var sn *oop.SerialNumber
+	var err error
+	if sn, err = dh.deStringifySerialNumber(onuDevice.(*OnuDevice).serialNumber); err != nil {
+		return NewErrAdapter("failed-to-destringify-serial-number",
+			log.Fields{
+				"devicer-id":    dh.deviceID,
+				"serial-number": onuDevice.(*OnuDevice).serialNumber}, err).Log()
+	}
+	onu := &oop.Onu{IntfId: IntfID, OnuId: onuID, SerialNumber: sn}
+	if _, err := dh.Client.DeleteOnu(context.Background(), onu); err != nil {
+		return NewErrAdapter("failed-to-delete-onu", log.Fields{
+			"device-id": dh.deviceID,
+			"onu-id":    onuID}, err).Log()
+	}
+	//clear PON resources associated with ONU
+	var onuGemData []rsrcMgr.OnuGemInfo
+	if err := dh.resourceMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
+		log.Errorw("Failed-to-get-onu-info-for-pon-port ", log.Fields{
+			"device-id":    dh.deviceID,
+			"interface-id": IntfID,
+			"error":        err})
+	}
+
+	for i, onu := range onuGemData {
+		if onu.OnuID == onuID && onu.SerialNumber == onuDevice.(*OnuDevice).serialNumber {
+			log.Debugw("onu-data ", log.Fields{"onu": onu})
+			if err := dh.clearUNIData(ctx, &onu); err != nil {
+				log.Errorw("Failed-to-clear-uni-data-for-onu", log.Fields{
+					"device-id":  dh.deviceID,
+					"onu-device": onu,
+					"error":      err})
+			}
+			// Clear flowids for gem cache.
+			for _, gem := range onu.GemPorts {
+				dh.resourceMgr.DeleteFlowIDsForGem(ctx, IntfID, gem)
+			}
+			onuGemData = append(onuGemData[:i], onuGemData[i+1:]...)
+			dh.resourceMgr.UpdateOnuGemInfo(ctx, IntfID, onuGemData)
+
+			dh.resourceMgr.FreeonuID(ctx, IntfID, []uint32{onu.OnuID})
+			break
+		}
+	}
+	dh.onus.Delete(onuKey)
+	dh.discOnus.Delete(onuDevice.(*OnuDevice).serialNumber)
+	return nil
+}
diff --git a/internal/pkg/core/device_handler_test.go b/internal/pkg/core/device_handler_test.go
new file mode 100644
index 0000000..1d1192e
--- /dev/null
+++ b/internal/pkg/core/device_handler_test.go
@@ -0,0 +1,1171 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"net"
+	"reflect"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	fu "github.com/opencord/voltha-lib-go/v3/pkg/flows"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ponrmgr "github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager"
+	"github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
+	"github.com/opencord/voltha-openolt-adapter/pkg/mocks"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	of "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	oop "github.com/opencord/voltha-protos/v3/go/openolt"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+func init() {
+	_, _ = log.AddPackage(log.JSON, log.DebugLevel, nil)
+}
+
+func newMockCoreProxy() *mocks.MockCoreProxy {
+	mcp := mocks.MockCoreProxy{}
+	mcp.Devices = make(map[string]*voltha.Device)
+	var pm []*voltha.PmConfig
+	mcp.Devices["olt"] = &voltha.Device{
+
+		Id:           "olt",
+		Root:         true,
+		ParentId:     "logical_device",
+		ParentPortNo: 1,
+
+		Ports: []*voltha.Port{
+			{PortNo: 1, Label: "pon"},
+			{PortNo: 2, Label: "nni"},
+		},
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "olt",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+		PmConfigs: &voltha.PmConfigs{
+			DefaultFreq:  10,
+			Id:           "olt",
+			FreqOverride: false,
+			Grouped:      false,
+			Metrics:      pm,
+		},
+	}
+	mcp.Devices["onu1"] = &voltha.Device{
+
+		Id:           "1",
+		Root:         false,
+		ParentId:     "olt",
+		ParentPortNo: 1,
+
+		Ports: []*voltha.Port{
+			{PortNo: 1, Label: "pon"},
+			{PortNo: 2, Label: "uni"},
+		},
+		OperStatus: 4,
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			OnuId:          1,
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+		PmConfigs: &voltha.PmConfigs{
+			DefaultFreq:  10,
+			Id:           "olt",
+			FreqOverride: false,
+			Grouped:      false,
+			Metrics:      pm,
+		},
+	}
+	mcp.Devices["onu2"] = &voltha.Device{
+		Id:         "2",
+		Root:       false,
+		ParentId:   "olt",
+		OperStatus: 2,
+		Ports: []*voltha.Port{
+			{PortNo: 1, Label: "pon"},
+			{PortNo: 2, Label: "uni"},
+		},
+
+		ParentPortNo: 1,
+
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			OnuId:          2,
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+		PmConfigs: &voltha.PmConfigs{
+			DefaultFreq:  10,
+			Id:           "olt",
+			FreqOverride: false,
+			Grouped:      false,
+			Metrics:      pm,
+		},
+	}
+	return &mcp
+}
+func newMockDeviceHandler() *DeviceHandler {
+	device := &voltha.Device{
+		Id:       "olt",
+		Root:     true,
+		ParentId: "logical_device",
+		Ports: []*voltha.Port{
+			{PortNo: 1, Label: "pon", Type: voltha.Port_PON_OLT},
+			{PortNo: 2, Label: "nni", Type: voltha.Port_ETHERNET_NNI},
+		},
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "olt",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+	}
+	cp := newMockCoreProxy()
+	ap := &mocks.MockAdapterProxy{}
+	ep := &mocks.MockEventProxy{}
+	openOLT := &OpenOLT{coreProxy: cp, adapterProxy: ap, eventProxy: ep}
+	dh := NewDeviceHandler(cp, ap, ep, device, openOLT)
+	deviceInf := &oop.DeviceInfo{Vendor: "openolt", Ranges: nil, Model: "openolt", DeviceId: dh.deviceID}
+	dh.resourceMgr = &resourcemanager.OpenOltResourceMgr{DeviceID: dh.deviceID, DeviceType: dh.deviceType, DevInfo: deviceInf,
+		KVStore: &db.Backend{
+			Client: &mocks.MockKVClient{},
+		}}
+	dh.resourceMgr.ResourceMgrs = make(map[uint32]*ponrmgr.PONResourceManager)
+	ranges := make(map[string]interface{})
+	sharedIdxByType := make(map[string]string)
+	sharedIdxByType["ALLOC_ID"] = "ALLOC_ID"
+	sharedIdxByType["ONU_ID"] = "ONU_ID"
+	sharedIdxByType["GEMPORT_ID"] = "GEMPORT_ID"
+	sharedIdxByType["FLOW_ID"] = "FLOW_ID"
+	ranges["ONU_ID"] = uint32(0)
+	ranges["GEMPORT_ID"] = uint32(0)
+	ranges["ALLOC_ID"] = uint32(0)
+	ranges["FLOW_ID"] = uint32(0)
+	ranges["onu_id_shared"] = uint32(0)
+	ranges["alloc_id_shared"] = uint32(0)
+	ranges["gemport_id_shared"] = uint32(0)
+	ranges["flow_id_shared"] = uint32(0)
+
+	ponmgr := &ponrmgr.PONResourceManager{
+		DeviceID: "onu-1",
+		IntfIDs:  []uint32{1, 2},
+		KVStore: &db.Backend{
+			Client: &mocks.MockKVClient{},
+		},
+		PonResourceRanges: ranges,
+		SharedIdxByType:   sharedIdxByType,
+	}
+	dh.resourceMgr.ResourceMgrs[1] = ponmgr
+	dh.resourceMgr.ResourceMgrs[2] = ponmgr
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	dh.flowMgr = NewFlowManager(ctx, dh, dh.resourceMgr)
+	dh.Client = &mocks.MockOpenoltClient{}
+	dh.eventMgr = &OpenOltEventMgr{eventProxy: &mocks.MockEventProxy{}, handler: dh}
+	dh.transitionMap = &TransitionMap{}
+	dh.portStats = &OpenOltStatisticsMgr{}
+
+	var pmNames = []string{
+		"rx_bytes",
+		"rx_packets",
+		"rx_mcast_packets",
+		"rx_bcast_packets",
+		"tx_bytes",
+		"tx_packets",
+		"tx_mcast_packets",
+		"tx_bcast_packets",
+	}
+
+	dh.metrics = pmmetrics.NewPmMetrics(device.Id, pmmetrics.Frequency(2), pmmetrics.FrequencyOverride(false), pmmetrics.Grouped(false), pmmetrics.Metrics(pmNames))
+	return dh
+}
+
+func negativeDeviceHandler() *DeviceHandler {
+	dh := newMockDeviceHandler()
+	device := dh.device
+	device.Id = ""
+	dh.adminState = "down"
+	return dh
+}
+func Test_generateMacFromHost(t *testing.T) {
+	type args struct {
+		host string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    string
+		wantErr bool
+	}{
+		{"generateMacFromHost-1", args{host: "localhost"}, "00:00:7f:00:00:01", false},
+		{"generateMacFromHost-2", args{host: "10.10.10.10"}, "00:00:0a:0a:0a:0a", false},
+		//{"generateMacFromHost-3", args{host: "google.com"}, "00:00:d8:3a:c8:8e", false},
+		{"generateMacFromHost-4", args{host: "testing3"}, "", true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := generateMacFromHost(tt.args.host)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("generateMacFromHost() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if got != tt.want {
+				t.Errorf("generateMacFromHost() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+func Test_macifyIP(t *testing.T) {
+	type args struct {
+		ip net.IP
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{{
+		"macifyIP-1",
+		args{ip: net.ParseIP("10.10.10.10")},
+		"00:00:0a:0a:0a:0a",
+	},
+		{
+			"macifyIP-2",
+			args{ip: net.ParseIP("127.0.0.1")},
+			"00:00:7f:00:00:01",
+		},
+		{
+			"macifyIP-3",
+			args{ip: net.ParseIP("127.0.0.1/24")},
+			"",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := macifyIP(tt.args.ip); got != tt.want {
+				t.Errorf("macifyIP() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func sparseCompare(keys []string, spec, target interface{}) bool {
+	if spec == target {
+		return true
+	}
+	if spec == nil || target == nil {
+		return false
+	}
+	typeSpec := reflect.TypeOf(spec)
+	typeTarget := reflect.TypeOf(target)
+	if typeSpec != typeTarget {
+		return false
+	}
+
+	vSpec := reflect.ValueOf(spec)
+	vTarget := reflect.ValueOf(target)
+	if vSpec.Kind() == reflect.Ptr {
+		vSpec = vSpec.Elem()
+		vTarget = vTarget.Elem()
+	}
+
+	for _, key := range keys {
+		fSpec := vSpec.FieldByName(key)
+		fTarget := vTarget.FieldByName(key)
+		if !reflect.DeepEqual(fSpec.Interface(), fTarget.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func TestDeviceHandler_GetChildDevice(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		parentPort uint32
+		onuID      uint32
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		want          *voltha.Device
+		errType       reflect.Type
+	}{
+		{"GetChildDevice-1", dh1,
+			args{parentPort: 1,
+				onuID: 1},
+			&voltha.Device{
+				Id:           "1",
+				ParentId:     "olt",
+				ParentPortNo: 1,
+			},
+			nil,
+		},
+		{"GetChildDevice-2", dh2,
+			args{parentPort: 1,
+				onuID: 1},
+			nil,
+			reflect.TypeOf(&ErrNotFound{}),
+		},
+	}
+
+	/*
+	   --- FAIL: TestDeviceHandler_GetChildDevice/GetChildDevice-1 (0.00s)
+	       device_handler_test.go:309: GetportLabel() => want=(, <nil>) got=(id:"1" parent_id:"olt" parent_port_no:1 proxy_address:<channel_id:1 channel_group_id:1 onu_id:1 > oper_status:ACTIVE connect_status:UNREACHABLE ports:<port_no:1 label:"pon" > ports:<port_no:2 label:"uni" > pm_configs:<id:"olt" default_freq:10 > , <nil>)
+	   --- FAIL: TestDeviceHandler_GetChildDevice/GetChildDevice-2 (0.00s)
+	*/
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := tt.devicehandler.GetChildDevice(tt.args.parentPort, tt.args.onuID)
+			if reflect.TypeOf(err) != tt.errType || !sparseCompare([]string{"Id", "ParentId", "ParentPortNo"}, tt.want, got) {
+				t.Errorf("GetportLabel() => want=(%v, %v) got=(%v, %v)",
+					tt.want, tt.errType, got, reflect.TypeOf(err))
+				return
+			}
+			t.Log("onu device id", got)
+		})
+	}
+}
+
+func TestGetportLabel(t *testing.T) {
+	invalid := reflect.TypeOf(&ErrInvalidValue{})
+	type args struct {
+		portNum  uint32
+		portType voltha.Port_PortType
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    string
+		errType reflect.Type
+	}{
+		{"GetportLabel-1", args{portNum: 0, portType: 0}, "", invalid},
+		{"GetportLabel-2", args{portNum: 1, portType: 1}, "nni-1", nil},
+		{"GetportLabel-3", args{portNum: 2, portType: 2}, "", invalid},
+		{"GetportLabel-4", args{portNum: 3, portType: 3}, "pon-3", nil},
+		{"GetportLabel-5", args{portNum: 4, portType: 4}, "", invalid},
+		{"GetportLabel-6", args{portNum: 5, portType: 5}, "", invalid},
+		{"GetportLabel-7", args{portNum: 6, portType: 6}, "", invalid},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := GetportLabel(tt.args.portNum, tt.args.portType)
+			if reflect.TypeOf(err) != tt.errType || got != tt.want {
+				t.Errorf("GetportLabel() => want=(%v, %v) got=(%v, %v)",
+					tt.want, tt.errType, got, reflect.TypeOf(err))
+			}
+
+		})
+	}
+}
+
+func TestDeviceHandler_ProcessInterAdapterMessage(t *testing.T) {
+	dh := newMockDeviceHandler()
+	proxyAddr := dh.device.ProxyAddress
+	body := &ic.InterAdapterOmciMessage{
+		Message:      []byte("asdfasdfasdfasdfas"),
+		ProxyAddress: proxyAddr,
+	}
+	body2 := &ic.InterAdapterOmciMessage{
+		Message: []byte("asdfasdfasdfasdfas"),
+		//ProxyAddress: &voltha.Device_ProxyAddress{},
+	}
+	body3 := &ic.InterAdapterTechProfileDownloadMessage{}
+	var marshalledData *any.Any
+	var err error
+
+	if marshalledData, err = ptypes.MarshalAny(body); err != nil {
+		log.Errorw("cannot-marshal-request", log.Fields{"error": err})
+	}
+
+	var marshalledData1 *any.Any
+
+	if marshalledData1, err = ptypes.MarshalAny(body2); err != nil {
+		log.Errorw("cannot-marshal-request", log.Fields{"error": err})
+	}
+	var marshalledData2 *any.Any
+
+	if marshalledData2, err = ptypes.MarshalAny(body3); err != nil {
+		log.Errorw("cannot-marshal-request", log.Fields{"error": err})
+	}
+	type args struct {
+		msg *ic.InterAdapterMessage
+	}
+	invalid := reflect.TypeOf(&ErrInvalidValue{})
+	tests := []struct {
+		name    string
+		args    args
+		wantErr reflect.Type
+	}{
+		{"ProcessInterAdapterMessage-1", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_FLOW_REQUEST,
+			},
+			Body: marshalledData,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-2", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_FLOW_RESPONSE,
+			},
+			Body: marshalledData1,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-3", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_OMCI_REQUEST,
+			},
+			Body: marshalledData,
+		}}, reflect.TypeOf(&ErrCommunication{})},
+		{"ProcessInterAdapterMessage-4", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_OMCI_RESPONSE,
+			}, Body: marshalledData,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-5", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_METRICS_REQUEST,
+			}, Body: marshalledData1,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-6", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_METRICS_RESPONSE,
+			}, Body: marshalledData,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-7", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_ONU_IND_REQUEST,
+			}, Body: marshalledData,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-8", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_ONU_IND_RESPONSE,
+			}, Body: marshalledData,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-9", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_TECH_PROFILE_DOWNLOAD_REQUEST,
+			}, Body: marshalledData,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-10", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_DELETE_GEM_PORT_REQUEST,
+			}, Body: marshalledData2,
+		}}, invalid},
+		{"ProcessInterAdapterMessage-11", args{msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:   "012345",
+				Type: ic.InterAdapterMessageType_DELETE_TCONT_REQUEST,
+			}, Body: marshalledData2,
+		}}, invalid},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			if err := dh.ProcessInterAdapterMessage(tt.args.msg); reflect.TypeOf(err) != tt.wantErr {
+				t.Errorf("DeviceHandler.ProcessInterAdapterMessage() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_sendProxiedMessage(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	device1 := &voltha.Device{
+		Id:       "onu1",
+		Root:     false,
+		ParentId: "logical_device",
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "onu1",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+	}
+	device2 := device1
+	device2.ConnectStatus = 2
+	iaomciMsg1 := &ic.InterAdapterOmciMessage{
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "onu2",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+			//OnuId:          2,
+		},
+		ConnectStatus: 1,
+	}
+	iaomciMsg2 := &ic.InterAdapterOmciMessage{
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "onu3",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+	}
+	type args struct {
+		onuDevice *voltha.Device
+		omciMsg   *ic.InterAdapterOmciMessage
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+	}{
+		{"sendProxiedMessage-1", dh1, args{onuDevice: device1, omciMsg: &ic.InterAdapterOmciMessage{}}},
+		{"sendProxiedMessage-2", dh1, args{onuDevice: device2, omciMsg: &ic.InterAdapterOmciMessage{}}},
+		{"sendProxiedMessage-3", dh1, args{onuDevice: nil, omciMsg: iaomciMsg1}},
+		{"sendProxiedMessage-4", dh1, args{onuDevice: nil, omciMsg: iaomciMsg2}},
+		{"sendProxiedMessage-5", dh2, args{onuDevice: nil, omciMsg: iaomciMsg2}},
+		{"sendProxiedMessage-6", dh2, args{onuDevice: device1, omciMsg: &ic.InterAdapterOmciMessage{}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.devicehandler.sendProxiedMessage(tt.args.onuDevice, tt.args.omciMsg)
+		})
+	}
+}
+
+func TestDeviceHandler_SendPacketInToCore(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+
+	type args struct {
+		logicalPort   uint32
+		packetPayload []byte
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+	}{
+		{"SendPacketInToCore-1", dh1, args{logicalPort: 1, packetPayload: []byte("test1")}},
+		{"SendPacketInToCore-2", dh1, args{logicalPort: 1, packetPayload: []byte("")}},
+		{"SendPacketInToCore-3", dh2, args{logicalPort: 1, packetPayload: []byte("test1")}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.devicehandler.SendPacketInToCore(tt.args.logicalPort, tt.args.packetPayload)
+		})
+	}
+}
+
+func TestDeviceHandler_DisableDevice(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		wantErr       bool
+	}{
+		{"DisableDevice-1", dh1, args{device: dh1.device}, false},
+		{"DisableDevice-2", dh1, args{device: dh2.device}, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			if err := tt.devicehandler.DisableDevice(tt.args.device); (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.DisableDevice() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_ReenableDevice(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		wantErr       bool
+	}{
+		{"ReenableDevice-1", dh1, args{device: dh1.device}, false},
+		{"ReenableDevice-2", dh1, args{device: &voltha.Device{}}, true},
+		{"ReenableDevice-3", dh2, args{device: dh1.device}, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dh := tt.devicehandler
+			if err := dh.ReenableDevice(tt.args.device); (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.ReenableDevice() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_RebootDevice(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := newMockDeviceHandler()
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		wantErr       bool
+	}{
+		// TODO: Add test cases.
+		{"RebootDevice-1", dh1, args{device: dh1.device}, false},
+		{"RebootDevice-2", dh1, args{device: dh2.device}, true},
+		{"RebootDevice-3", dh2, args{device: dh2.device}, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			if err := tt.devicehandler.RebootDevice(tt.args.device); (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.RebootDevice() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_handleIndication(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	dh3 := newMockDeviceHandler()
+	dh3.onus = sync.Map{}
+	dh3.onus.Store("onu1", NewOnuDevice("onu1", "onu1", "onu1", 1, 1, "onu1"))
+	dh3.onus.Store("onu2", NewOnuDevice("onu2", "onu2", "onu2", 2, 2, "onu2"))
+
+	type args struct {
+		indication *oop.Indication
+	}
+	tests := []struct {
+		name          string
+		deviceHandler *DeviceHandler
+		args          args
+	}{
+		// TODO: Add test cases.
+		{"handleIndication-1", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OltInd{OltInd: &oop.OltIndication{OperState: "up"}}}}},
+		{"handleIndication-2", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OltInd{OltInd: &oop.OltIndication{OperState: "down"}}}}},
+		{"handleIndication-3", dh1, args{indication: &oop.Indication{Data: &oop.Indication_IntfInd{IntfInd: &oop.IntfIndication{IntfId: 1, OperState: "up"}}}}},
+		{"handleIndication-4", dh1, args{indication: &oop.Indication{Data: &oop.Indication_IntfInd{IntfInd: &oop.IntfIndication{IntfId: 1, OperState: "down"}}}}},
+		{"handleIndication-5", dh1, args{indication: &oop.Indication{Data: &oop.Indication_IntfOperInd{IntfOperInd: &oop.IntfOperIndication{Type: "nni", IntfId: 1, OperState: "up"}}}}},
+		{"handleIndication-6", dh1, args{indication: &oop.Indication{Data: &oop.Indication_IntfOperInd{IntfOperInd: &oop.IntfOperIndication{Type: "pon", IntfId: 1, OperState: "up"}}}}},
+		{"handleIndication-7", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OnuDiscInd{OnuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}}}}},
+		{"handleIndication-8", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "up", AdminState: "up"}}}}},
+		{"handleIndication-9", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "up", AdminState: "down"}}}}},
+		{"handleIndication-10", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "down", AdminState: "up"}}}}},
+		{"handleIndication-11", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "down", AdminState: "down"}}}}},
+		{"handleIndication-12", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OmciInd{OmciInd: &oop.OmciIndication{IntfId: 1, OnuId: 1, Pkt: []byte("onu123-random value")}}}}},
+		{"handleIndication-13", dh1, args{indication: &oop.Indication{Data: &oop.Indication_PktInd{PktInd: &oop.PacketIndication{IntfType: "nni", IntfId: 1, GemportId: 1, FlowId: 1234, PortNo: 1}}}}},
+		{"handleIndication-14", dh1, args{indication: &oop.Indication{Data: &oop.Indication_PortStats{PortStats: &oop.PortStatistics{IntfId: 1, RxBytes: 100, RxPackets: 100, RxUcastPackets: 100, RxMcastPackets: 100, RxBcastPackets: 100, RxErrorPackets: 100, TxBytes: 100, TxPackets: 100, TxUcastPackets: 100, TxMcastPackets: 100, TxBcastPackets: 100, TxErrorPackets: 100, RxCrcErrors: 100, BipErrors: 100, Timestamp: 1000}}}}},
+		{"handleIndication-15", dh1, args{indication: &oop.Indication{Data: &oop.Indication_FlowStats{FlowStats: &oop.FlowStatistics{RxBytes: 100, RxPackets: 100, TxBytes: 100, TxPackets: 100, Timestamp: 1000}}}}},
+		{"handleIndication-16", dh1, args{indication: &oop.Indication{Data: &oop.Indication_AlarmInd{AlarmInd: &oop.AlarmIndication{}}}}},
+		{"handleIndication-17", dh1, args{indication: &oop.Indication{Data: &oop.Indication_PktInd{PktInd: &oop.PacketIndication{IntfType: "nni", FlowId: 1234, PortNo: 1}}}}},
+		{"handleIndication-18", dh1, args{indication: &oop.Indication{Data: &oop.Indication_PktInd{PktInd: &oop.PacketIndication{}}}}},
+
+		// Negative testcases
+		{"handleIndication-19", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OltInd{OltInd: &oop.OltIndication{OperState: "up"}}}}},
+		{"handleIndication-20", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OltInd{OltInd: &oop.OltIndication{OperState: "down"}}}}},
+		{"handleIndication-21", dh2, args{indication: &oop.Indication{Data: &oop.Indication_IntfInd{IntfInd: &oop.IntfIndication{IntfId: 1, OperState: "up"}}}}},
+		{"handleIndication-22", dh2, args{indication: &oop.Indication{Data: &oop.Indication_IntfInd{IntfInd: &oop.IntfIndication{IntfId: 1, OperState: "down"}}}}},
+		{"handleIndication-23", dh2, args{indication: &oop.Indication{Data: &oop.Indication_IntfOperInd{IntfOperInd: &oop.IntfOperIndication{Type: "nni", IntfId: 1, OperState: "up"}}}}},
+		{"handleIndication-24", dh2, args{indication: &oop.Indication{Data: &oop.Indication_IntfOperInd{IntfOperInd: &oop.IntfOperIndication{Type: "pon", IntfId: 1, OperState: "up"}}}}},
+		{"handleIndication-25", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OnuDiscInd{OnuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}}}}},
+		{"handleIndication-26", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "up", AdminState: "up"}}}}},
+		{"handleIndication-27", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "up", AdminState: "down"}}}}},
+		{"handleIndication-28", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "down", AdminState: "up"}}}}},
+		{"handleIndication-29", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "down", AdminState: "down"}}}}},
+		{"handleIndication-30", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OmciInd{OmciInd: &oop.OmciIndication{IntfId: 1, OnuId: 1, Pkt: []byte("onu123-random value")}}}}},
+		{"handleIndication-31", dh2, args{indication: &oop.Indication{Data: &oop.Indication_PktInd{PktInd: &oop.PacketIndication{IntfType: "nni", IntfId: 1, GemportId: 1, FlowId: 1234, PortNo: 1}}}}},
+		{"handleIndication-32", dh2, args{indication: &oop.Indication{Data: &oop.Indication_PortStats{PortStats: &oop.PortStatistics{IntfId: 1, RxBytes: 100, RxPackets: 100, RxUcastPackets: 100, RxMcastPackets: 100, RxBcastPackets: 100, RxErrorPackets: 100, TxBytes: 100, TxPackets: 100, TxUcastPackets: 100, TxMcastPackets: 100, TxBcastPackets: 100, TxErrorPackets: 100, RxCrcErrors: 100, BipErrors: 100, Timestamp: 1000}}}}},
+		{"handleIndication-33", dh2, args{indication: &oop.Indication{Data: &oop.Indication_FlowStats{FlowStats: &oop.FlowStatistics{RxBytes: 100, RxPackets: 100, TxBytes: 100, TxPackets: 100, Timestamp: 1000}}}}},
+		{"handleIndication-34", dh2, args{indication: &oop.Indication{Data: &oop.Indication_AlarmInd{AlarmInd: &oop.AlarmIndication{}}}}},
+		//
+		{"handleIndication-35", dh3, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "up", AdminState: "up"}}}}},
+		{"handleIndication-36", dh3, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "down", AdminState: "up"}}}}},
+		{"handleIndication-37", dh3, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "up", AdminState: "down"}}}}},
+		{"handleIndication-38", dh3, args{indication: &oop.Indication{Data: &oop.Indication_OnuInd{OnuInd: &oop.OnuIndication{IntfId: 1, OnuId: 1, OperState: "down", AdminState: "down"}}}}},
+		{"handleIndication-30", dh1, args{indication: &oop.Indication{Data: &oop.Indication_OmciInd{OmciInd: &oop.OmciIndication{IntfId: 1, OnuId: 4, Pkt: []byte("onu123-random value")}}}}},
+		{"handleIndication-30", dh2, args{indication: &oop.Indication{Data: &oop.Indication_OmciInd{OmciInd: &oop.OmciIndication{IntfId: 1, OnuId: 4, Pkt: []byte("onu123-random value")}}}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dh := tt.deviceHandler
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			dh.handleIndication(ctx, tt.args.indication)
+		})
+	}
+}
+
+func TestDeviceHandler_addPort(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		intfID   uint32
+		portType voltha.Port_PortType
+		state    string
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+	}{
+		// State up
+		{"addPort.1", dh1, args{intfID: 1, portType: voltha.Port_UNKNOWN, state: "up"}},
+		{"addPort.2", dh1, args{intfID: 1, portType: voltha.Port_VENET_OLT, state: "up"}},
+		{"addPort.3", dh1, args{intfID: 1, portType: voltha.Port_VENET_ONU, state: "up"}},
+		{"addPort.4", dh1, args{intfID: 1, portType: voltha.Port_ETHERNET_NNI, state: "up"}},
+		{"addPort.5", dh1, args{intfID: 1, portType: voltha.Port_ETHERNET_UNI, state: "up"}},
+		{"addPort.6", dh1, args{intfID: 1, portType: voltha.Port_PON_OLT, state: "up"}},
+		{"addPort.7", dh1, args{intfID: 1, portType: voltha.Port_PON_ONU, state: "up"}},
+		{"addPort.8", dh1, args{intfID: 1, portType: 8, state: "up"}},
+		// state discovery
+		{"addPort.9", dh1, args{intfID: 1, portType: voltha.Port_UNKNOWN, state: "down"}},
+		{"addPort.10", dh1, args{intfID: 1, portType: voltha.Port_VENET_OLT, state: "down"}},
+		{"addPort.11", dh1, args{intfID: 1, portType: voltha.Port_VENET_ONU, state: "down"}},
+		{"addPort.12", dh1, args{intfID: 1, portType: voltha.Port_ETHERNET_NNI, state: "down"}},
+		{"addPort.13", dh1, args{intfID: 1, portType: voltha.Port_ETHERNET_UNI, state: "down"}},
+		{"addPort.14", dh1, args{intfID: 1, portType: voltha.Port_PON_OLT, state: "down"}},
+		{"addPort.15", dh1, args{intfID: 1, portType: voltha.Port_PON_ONU, state: "down"}},
+		{"addPort.16", dh1, args{intfID: 1, portType: 8, state: "down"}},
+
+		{"addPort.17", dh2, args{intfID: 1, portType: voltha.Port_ETHERNET_NNI, state: "up"}},
+		{"addPort.18", dh2, args{intfID: 1, portType: voltha.Port_ETHERNET_UNI, state: "up"}},
+		{"addPort.19", dh2, args{intfID: 1, portType: voltha.Port_ETHERNET_NNI, state: "down"}},
+		{"addPort.20", dh2, args{intfID: 1, portType: voltha.Port_ETHERNET_UNI, state: "down"}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.devicehandler.addPort(tt.args.intfID, tt.args.portType, tt.args.state)
+		})
+	}
+}
+
+func Test_macAddressToUint32Array(t *testing.T) {
+	type args struct {
+		mac string
+	}
+	tests := []struct {
+		name string
+		args args
+		want []uint32
+	}{
+		// TODO: Add test cases.
+		{"macAddressToUint32Array-1", args{mac: "00:00:00:00:00:01"}, []uint32{0, 0, 0, 0, 0, 1}},
+		{"macAddressToUint32Array-2", args{mac: "0abcdef"}, []uint32{11259375}},
+		{"macAddressToUint32Array-3", args{mac: "testing"}, []uint32{1, 2, 3, 4, 5, 6}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := macAddressToUint32Array(tt.args.mac); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("macAddressToUint32Array() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_handleOltIndication(t *testing.T) {
+
+	type args struct {
+		oltIndication *oop.OltIndication
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{"handleOltIndication-1", args{oltIndication: &oop.OltIndication{OperState: "up"}}},
+		{"handleOltIndication-2", args{oltIndication: &oop.OltIndication{OperState: "down"}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dh := newMockDeviceHandler()
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			dh.handleOltIndication(ctx, tt.args.oltIndication)
+		})
+	}
+}
+
+func TestDeviceHandler_AdoptDevice(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+	}{
+		// TODO: Add test cases.
+		{"AdoptDevice-1", dh1, args{device: dh1.device}},
+		{"AdoptDevice-2", dh2, args{device: dh2.device}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			//dh.doStateInit()
+			//	context.
+			//dh.AdoptDevice(tt.args.device)
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			tt.devicehandler.postInit(ctx)
+		})
+	}
+}
+
+func TestDeviceHandler_activateONU(t *testing.T) {
+	dh := newMockDeviceHandler()
+	dh1 := negativeDeviceHandler()
+	type args struct {
+		intfID       uint32
+		onuID        int64
+		serialNum    *oop.SerialNumber
+		serialNumber string
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+	}{
+		{"activateONU-1", dh, args{intfID: 1, onuID: 1, serialNum: &oop.SerialNumber{VendorId: []byte("onu1")}}},
+		{"activateONU-2", dh, args{intfID: 2, onuID: 2, serialNum: &oop.SerialNumber{VendorId: []byte("onu2")}}},
+		{"activateONU-3", dh1, args{intfID: 1, onuID: 1, serialNum: &oop.SerialNumber{VendorId: []byte("onu1")}}},
+		{"activateONU-4", dh1, args{intfID: 2, onuID: 2, serialNum: &oop.SerialNumber{VendorId: []byte("onu2")}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			tt.devicehandler.activateONU(ctx, tt.args.intfID, tt.args.onuID,
+				tt.args.serialNum, tt.args.serialNumber)
+		})
+	}
+}
+
+func TestDeviceHandler_start(t *testing.T) {
+	dh := newMockDeviceHandler()
+	dh1 := negativeDeviceHandler()
+	dh.start(context.Background())
+	dh.stop(context.Background())
+
+	dh1.start(context.Background())
+	dh1.stop(context.Background())
+
+}
+
+func TestDeviceHandler_PacketOut(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	acts := []*ofp.OfpAction{
+		fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA))),
+		fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+		fu.Output(1),
+	}
+	pktout := &ofp.OfpPacketOut{BufferId: 0, InPort: 1, Actions: acts, Data: []byte("AYDCAAAOAODsSE5TiMwCBwQA4OxITlIEBQUwLzUxBgIAFAgEMC81MQoJbG9jYWxob3N0EBwFAawbqqACAAAAoRAxLjMuNi4xLjQuMS40NDEz/gYAgMILAgD+GQCAwgkDAAAAAGQAAAAAAAAAAgICAgICAgL+GQCAwgoDAAAAAGQAAAAAAAAAAgICAgICAgIAAA==")}
+	type args struct {
+		egressPortNo int
+		packet       *of.OfpPacketOut
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		wantErr       bool
+	}{
+		// TODO: Add test cases.
+		//{"test1", args{egressPortNo: 0, packet: &ofp.OfpPacketOut{}}, true},
+		{"PacketOut-1", dh1, args{egressPortNo: 0, packet: pktout}, false},
+		{"PacketOut-2", dh2, args{egressPortNo: 1, packet: pktout}, false},
+		{"PacketOut-2", dh2, args{egressPortNo: 115000, packet: pktout}, false},
+		{"PacketOut-3", dh1, args{egressPortNo: 65536, packet: pktout}, false},
+		{"PacketOut-4", dh2, args{egressPortNo: 65535, packet: pktout}, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dh := tt.devicehandler
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			if err := dh.PacketOut(ctx, tt.args.egressPortNo, tt.args.packet); (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.PacketOut() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+//
+func TestDeviceHandler_doStateUp(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := newMockDeviceHandler()
+
+	dh2.deviceID = ""
+	dh3 := negativeDeviceHandler()
+
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		wantErr       bool
+	}{
+		{"dostateup-1", dh1, false},
+		{"dostateup-2", dh2, false},
+		{"dostateup-3", dh3, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			if err := tt.devicehandler.doStateUp(ctx); (err != nil) != tt.wantErr {
+				t.Logf("DeviceHandler.doStateUp() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+func TestDeviceHandler_doStateDown(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	dh3 := newMockDeviceHandler()
+	dh3.device.OperStatus = voltha.OperStatus_UNKNOWN
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		wantErr       bool
+	}{
+		{"dostatedown-1", dh1, false},
+		{"dostatedown-2", dh2, true},
+		{"dostatedown-2", dh3, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			if err := tt.devicehandler.doStateDown(ctx); (err != nil) != tt.wantErr {
+				t.Logf("DeviceHandler.doStateDown() error = %v", err)
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_GetOfpDeviceInfo(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		wantErr       bool
+	}{
+		// TODO: Add test cases.
+		{"GetOfpDeviceInfo-1", dh1, args{dh1.device}, false},
+		{"GetOfpDeviceInfo-2", dh1, args{&voltha.Device{}}, false},
+		{"GetOfpDeviceInfo-3", dh2, args{dh1.device}, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dh := tt.devicehandler
+			_, err := dh.GetOfpDeviceInfo(tt.args.device)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.GetOfpDeviceInfo() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_GetOfpPortInfo(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		device *voltha.Device
+		portNo int64
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+		wantErr       bool
+	}{
+		{"GetOfpPortInfo-1", dh1, args{device: dh1.device, portNo: 1}, false},
+		{"GetOfpPortInfo-2", dh2, args{device: dh2.device, portNo: 1}, false},
+		{"GetOfpPortInfo-3", dh1, args{device: dh1.device, portNo: 0}, false},
+		{"GetOfpPortInfo-4", dh2, args{device: dh2.device, portNo: 0}, false},
+		{"GetOfpPortInfo-5", dh1, args{device: &voltha.Device{}, portNo: 1}, false},
+		{"GetOfpPortInfo-6", dh2, args{device: &voltha.Device{}, portNo: 0}, false},
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			dh := tt.devicehandler
+			_, err := dh.GetOfpPortInfo(tt.args.device, tt.args.portNo)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.GetOfpPortInfo() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+		})
+	}
+}
+
+func TestDeviceHandler_onuDiscIndication(t *testing.T) {
+
+	dh1 := newMockDeviceHandler()
+	dh1.discOnus = sync.Map{}
+	dh1.discOnus.Store("onu1", true)
+	dh1.discOnus.Store("onu2", false)
+	dh2 := negativeDeviceHandler()
+	type args struct {
+		onuDiscInd *oop.OnuDiscIndication
+		sn         string
+	}
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+		args          args
+	}{
+		// TODO: Add test cases.
+		{"onuDiscIndication-1", dh1, args{onuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}}},
+		{"onuDiscIndication-2", dh1, args{onuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{}}}},
+		{"onuDiscIndication-3", dh1, args{onuDiscInd: &oop.OnuDiscIndication{SerialNumber: &oop.SerialNumber{}}}},
+		{"onuDiscIndication-4", dh1, args{onuDiscInd: &oop.OnuDiscIndication{}}},
+		{"onuDiscIndication-5", dh1, args{onuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}, sn: "onu1"}},
+		{"onuDiscIndication-6", dh1, args{onuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}, sn: "onu2"}},
+		{"onuDiscIndication-7", dh2, args{onuDiscInd: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			tt.devicehandler.onuDiscIndication(ctx, tt.args.onuDiscInd, tt.args.sn)
+		})
+	}
+}
+
+func TestDeviceHandler_populateDeviceInfo(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := negativeDeviceHandler()
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"populateDeviceInfo-1", dh1, false},
+		{"populateDeviceInfo-2", dh1, true},
+		{"populateDeviceInfo-3", dh1, true},
+		{"populateDeviceInfo-4", dh1, true},
+		{"populateDeviceInfo-5", dh2, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			_, err := tt.devicehandler.populateDeviceInfo()
+			if (err != nil) != tt.wantErr {
+				t.Errorf("DeviceHandler.populateDeviceInfo() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+
+		})
+	}
+}
+
+func TestDeviceHandler_readIndications(t *testing.T) {
+	dh1 := newMockDeviceHandler()
+	dh2 := newMockDeviceHandler()
+	dh2.adminState = "down"
+	dh3 := newMockDeviceHandler()
+	dh3.device.AdminState = voltha.AdminState_DISABLED
+	dh4 := negativeDeviceHandler()
+	tests := []struct {
+		name          string
+		devicehandler *DeviceHandler
+	}{
+		// TODO: Add test cases.
+		{"readIndications-1", dh1},
+		{"readIndications-2", dh2},
+		{"readIndications-3", dh2},
+		{"readIndications-4", dh2},
+		{"readIndications-5", dh2},
+		{"readIndications-6", dh3},
+		{"readIndications-7", dh3},
+		{"readIndications-8", dh4},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			tt.devicehandler.readIndications(ctx)
+		})
+	}
+}
+
+func Test_startCollector(t *testing.T) {
+	type args struct {
+		dh *DeviceHandler
+	}
+	dh := newMockDeviceHandler()
+	dh.portStats.NorthBoundPort = make(map[uint32]*NniPort)
+	dh.portStats.NorthBoundPort[0] = &NniPort{Name: "OLT-1"}
+	dh.portStats.SouthBoundPort = make(map[uint32]*PonPort)
+	dh.portStats.Device = dh
+	for i := 0; i < 16; i++ {
+		dh.portStats.SouthBoundPort[uint32(i)] = &PonPort{DeviceID: "OLT-1"}
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{"StartCollector-1", args{dh}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			go func() {
+				time.Sleep(66 * time.Second) // startCollector inside waits for 1 min, so we stop it after 6 secs of running
+				tt.args.dh.stopCollector <- true
+			}()
+			startCollector(tt.args.dh)
+		})
+	}
+}
diff --git a/internal/pkg/core/error.go b/internal/pkg/core/error.go
new file mode 100644
index 0000000..0de7dc8
--- /dev/null
+++ b/internal/pkg/core/error.go
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"encoding/json"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"strings"
+)
+
+const (
+	defaultLogAndReturnLevel = log.DebugLevel
+)
+
+func copy(src log.Fields) log.Fields {
+	dst := make(log.Fields)
+	for k, v := range src {
+		dst[k] = v
+	}
+	return dst
+}
+
+func merge(one, two log.Fields) log.Fields {
+	dst := make(log.Fields)
+	for k, v := range one {
+		dst[k] = v
+	}
+	for k, v := range two {
+		dst[k] = v
+	}
+	return dst
+}
+
+// LoggableError defined functions that can be used to log an object
+type LoggableError interface {
+	error
+	Log() error
+	LogAt(log.LogLevel) error
+}
+
+// ErrAdapter represents a basic adapter error that combines an name, field set
+// and wrapped error
+type ErrAdapter struct {
+	name    string
+	fields  log.Fields
+	wrapped error
+}
+
+// NewErrAdapter constructs a new error with the given values
+func NewErrAdapter(name string, fields log.Fields, wrapped error) LoggableError {
+	return &ErrAdapter{
+		name:    name,
+		fields:  copy(fields),
+		wrapped: wrapped,
+	}
+}
+
+// Name returns the error name
+func (e *ErrAdapter) Name() string {
+	return e.name
+}
+
+// Fields returns the fields associated with the error
+func (e *ErrAdapter) Fields() log.Fields {
+	return e.fields
+}
+
+// Unwrap returns the wrapped or nested error
+func (e *ErrAdapter) Unwrap() error {
+	return e.wrapped
+}
+
+// Error returns a string representation of the error
+func (e *ErrAdapter) Error() string {
+	var buf strings.Builder
+	buf.WriteString(e.name)
+	if len(e.fields) > 0 {
+		if val, err := json.Marshal(e.fields); err == nil {
+			buf.WriteString(": [")
+			buf.WriteString(string(val))
+			buf.WriteString("]")
+		}
+	}
+	if e.wrapped != nil {
+		buf.WriteString(": ")
+		buf.WriteString(e.wrapped.Error())
+	}
+	return buf.String()
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrAdapter) Log() error {
+	return e.LogAt(defaultLogAndReturnLevel)
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrAdapter) LogAt(level log.LogLevel) error {
+	logger := log.Debugw
+	switch level {
+	case log.InfoLevel:
+		logger = log.Infow
+	case log.WarnLevel:
+		logger = log.Warnw
+	case log.ErrorLevel:
+		logger = log.Errorw
+	case log.FatalLevel:
+		logger = log.Fatalw
+	}
+	local := e.fields
+	if e.wrapped != nil {
+		local = merge(e.fields, log.Fields{"wrapped": e.wrapped})
+	}
+	logger(e.name, local)
+	return e
+}
+
+// ErrInvalidValue represents an error condition with given value is not able to
+// be processed
+type ErrInvalidValue struct {
+	ErrAdapter
+}
+
+// NewErrInvalidValue constructs a new error based on the given values
+func NewErrInvalidValue(fields log.Fields, wrapped error) LoggableError {
+	return &ErrInvalidValue{
+		ErrAdapter{
+			name:    "invalid-value",
+			fields:  copy(fields),
+			wrapped: wrapped,
+		},
+	}
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrInvalidValue) Log() error {
+	_ = e.ErrAdapter.Log()
+	return e
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrInvalidValue) LogAt(level log.LogLevel) error {
+	_ = e.ErrAdapter.LogAt(level)
+	return e
+}
+
+// ErrNotFound represents an error condition when a value can not be located
+// given a field set of criteria
+type ErrNotFound struct {
+	ErrAdapter
+}
+
+// NewErrNotFound constructs a new error based on the given values
+func NewErrNotFound(target string, fields log.Fields, wrapped error) LoggableError {
+	return &ErrNotFound{
+		ErrAdapter{
+			name:    "not-found",
+			fields:  merge(fields, log.Fields{"target": target}),
+			wrapped: wrapped,
+		},
+	}
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrNotFound) Log() error {
+	_ = e.ErrAdapter.Log()
+	return e
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrNotFound) LogAt(level log.LogLevel) error {
+	_ = e.ErrAdapter.LogAt(level)
+	return e
+}
+
+// ErrPersistence representation an error condition when a persistence operation
+// did not succeed
+type ErrPersistence struct {
+	ErrAdapter
+}
+
+// NewErrPersistence constructs a new error based on the given values
+func NewErrPersistence(operation, entityType string, ID uint32, fields log.Fields, wrapped error) LoggableError {
+	return &ErrPersistence{
+		ErrAdapter{
+			name: "unable-to-persist",
+			fields: merge(fields, log.Fields{
+				"operation":   operation,
+				"entity-type": entityType,
+				"id":          fmt.Sprintf("0x%x", ID)}),
+			wrapped: wrapped,
+		},
+	}
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrPersistence) Log() error {
+	_ = e.ErrAdapter.Log()
+	return e
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrPersistence) LogAt(level log.LogLevel) error {
+	_ = e.ErrAdapter.LogAt(level)
+	return e
+}
+
+// ErrCommunication representation an error condition when an interprocess
+// message communication fails
+type ErrCommunication struct {
+	ErrAdapter
+}
+
+// NewErrCommunication constructs a new error based on the given values
+func NewErrCommunication(operation string, fields log.Fields, wrapped error) LoggableError {
+	return &ErrCommunication{
+		ErrAdapter{
+			name: "failed-communication",
+			fields: merge(fields, log.Fields{
+				"operation": operation}),
+			wrapped: wrapped,
+		},
+	}
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrCommunication) Log() error {
+	_ = e.ErrAdapter.Log()
+	return e
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrCommunication) LogAt(level log.LogLevel) error {
+	_ = e.ErrAdapter.LogAt(level)
+	return e
+}
+
+// ErrFlowOp represents an error condition when a flow operation to a device did
+// not succeed
+type ErrFlowOp struct {
+	ErrAdapter
+}
+
+// NewErrFlowOp constructs a new error based on the given values
+func NewErrFlowOp(operation string, ID uint32, fields log.Fields, wrapped error) LoggableError {
+	return &ErrPersistence{
+		ErrAdapter{
+			name: "unable-to-perform-flow-operation",
+			fields: merge(fields, log.Fields{
+				"operation": operation,
+				"id":        fmt.Sprintf("0x%x", ID)}),
+			wrapped: wrapped,
+		},
+	}
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrFlowOp) Log() error {
+	_ = e.ErrAdapter.Log()
+	return e
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrFlowOp) LogAt(level log.LogLevel) error {
+	_ = e.ErrAdapter.LogAt(level)
+	return e
+}
+
+// ErrTimeout represents an error condition when the deadline for performing an
+// operation has been exceeded
+type ErrTimeout struct {
+	ErrAdapter
+}
+
+// NewErrTimeout constructs a new error based on the given values
+func NewErrTimeout(operation string, fields log.Fields, wrapped error) LoggableError {
+	return &ErrTimeout{
+		ErrAdapter{
+			name:    "operation-timed-out",
+			fields:  merge(fields, log.Fields{"operation": operation}),
+			wrapped: wrapped,
+		},
+	}
+}
+
+// Log logs the error at the default level for log and return
+func (e *ErrTimeout) Log() error {
+	_ = e.ErrAdapter.Log()
+	return e
+}
+
+// LogAt logs the error at the specified level and then returns the error
+func (e *ErrTimeout) LogAt(level log.LogLevel) error {
+	_ = e.ErrAdapter.LogAt(level)
+	return e
+}
+
+var (
+	// ErrNotImplemented error returned when an unimplemented method is
+	// invoked
+	ErrNotImplemented = NewErrAdapter("not-implemented", nil, nil)
+
+	// ErrInvalidPortRange error returned when a given port is not in the
+	// valid range
+	ErrInvalidPortRange = NewErrAdapter("invalid-port-range", nil, nil)
+
+	// ErrStateTransition error returned when a state transition is fails
+	ErrStateTransition = NewErrAdapter("state-transition", nil, nil)
+
+	// ErrResourceManagerInstantiating error returned when an unexpected
+	// condition occcurs while instantiating the resource manager
+	ErrResourceManagerInstantiating = NewErrAdapter("resoure-manager-instantiating", nil, nil)
+)
diff --git a/internal/pkg/core/olt_platform.go b/internal/pkg/core/olt_platform.go
new file mode 100644
index 0000000..747cccf
--- /dev/null
+++ b/internal/pkg/core/olt_platform.go
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/flows"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+/*=====================================================================
+
+@TODO: Looks like this Flow id concept below is not used anywhere
+       Propose to remove the below documentation of Flow Id on confirmation
+       of the same
+
+Flow id
+
+    Identifies a flow within a single OLT
+    Flow Id is unique per OLT
+    Multiple GEM ports can map to same flow id
+
+     13    11              4      0
+    +--------+--------------+------+
+    | pon id |    onu id    | Flow |
+    |        |              | idx  |
+    +--------+--------------+------+
+
+    14 bits = 16384 flows (per OLT).
+
+    pon id = 4 bits = 16 PON ports
+    onu id = 7 bits = 128 ONUss per PON port
+    Flow index = 3 bits = 4 bi-directional flows per ONU
+                        = 8 uni-directional flows per ONU
+
+
+Logical (OF) UNI port number
+
+    OpenFlow port number corresponding to PON UNI
+
+     20        12              4      0
+    +--+--------+--------------+------+
+    |0 | pon id |    onu id    |uni id|
+    +--+--------+--------------+------+
+
+    pon id = 8 bits = 256 PON ports
+    onu id = 8 bits = 256 ONUs per PON port
+
+Logical (OF) NNI port number
+
+    OpenFlow port number corresponding to PON NNI
+
+     20                             0
+    +--+----------------------------+
+    |1 |                    intf_id |
+    +--+----------------------------+
+
+    No overlap with UNI port number space
+
+
+PON OLT (OF) port number
+
+    OpenFlow port number corresponding to PON OLT ports
+
+     31     28                                 0
+    +--------+------------------------~~~------+
+    |  0x2   |          pon intf id            |
+    +--------+------------------------~~~------+
+*/
+
+const (
+	// Number of bits for the physical UNI of the ONUs
+	bitsForUniID = 4
+	// Number of bits for the ONU ID
+	bitsForONUID = 8
+	// Number of bits for PON ID
+	bitsForPONID = 8
+	// Number of bits to differentiate between UNI and NNI Logical Port
+	bitsForUNINNIDiff = 1
+	//MaxOnusPerPon is Max number of ONUs on any PON port
+	MaxOnusPerPon = (1 << bitsForONUID)
+	//MaxPonsPerOlt is Max number of PON ports on any OLT
+	MaxPonsPerOlt = (1 << bitsForPONID)
+	//MaxUnisPerOnu is the Max number of UNI ports on any ONU
+	MaxUnisPerOnu = (1 << bitsForUniID)
+	//Bit position where the differentiation bit is located
+	nniUniDiffPos = (bitsForUniID + bitsForONUID + bitsForPONID)
+	//Bit position where the marker for PON port type of OF port is present
+	ponIntfMarkerPos = 28
+	//Value of marker used to distinguish PON port type of OF port
+	ponIntfMarkerValue = 0x2
+	// Number of bits for NNI ID
+	bitsforNNIID = 20
+	// minNniIntPortNum is used to store start range of nni port number (1 << 20) 1048576
+	minNniIntPortNum = (1 << bitsforNNIID)
+	// maxNniPortNum is used to store the maximum range of nni port number ((1 << 21)-1) 2097151
+	maxNniPortNum = ((1 << (bitsforNNIID + 1)) - 1)
+)
+
+//MinUpstreamPortID value
+var MinUpstreamPortID = 0xfffd
+
+//MaxUpstreamPortID value
+var MaxUpstreamPortID = 0xfffffffd
+
+var controllerPorts = []uint32{0xfffd, 0x7ffffffd, 0xfffffffd}
+
+//MkUniPortNum returns new UNIportNum based on intfID, inuID and uniID
+func MkUniPortNum(intfID, onuID, uniID uint32) uint32 {
+	var limit = int(onuID)
+	if limit > MaxOnusPerPon {
+		log.Warn("Warning: exceeded the MAX ONUS per PON")
+	}
+	return (intfID << (bitsForUniID + bitsForONUID)) | (onuID << bitsForUniID) | uniID
+}
+
+//OnuIDFromPortNum returns ONUID derived from portNumber
+func OnuIDFromPortNum(portNum uint32) uint32 {
+	return (portNum >> bitsForUniID) & (MaxOnusPerPon - 1)
+}
+
+//IntfIDFromUniPortNum returns IntfID derived from portNum
+func IntfIDFromUniPortNum(portNum uint32) uint32 {
+	return (portNum >> (bitsForUniID + bitsForONUID)) & (MaxPonsPerOlt - 1)
+}
+
+//UniIDFromPortNum return UniID derived from portNum
+func UniIDFromPortNum(portNum uint32) uint32 {
+	return (portNum) & (MaxUnisPerOnu - 1)
+}
+
+//IntfIDToPortNo returns portId derived from intftype, intfId and portType
+func IntfIDToPortNo(intfID uint32, intfType voltha.Port_PortType) uint32 {
+	if (intfType) == voltha.Port_ETHERNET_NNI {
+		return (1 << nniUniDiffPos) | intfID
+	}
+	if (intfType) == voltha.Port_PON_OLT {
+		return (ponIntfMarkerValue << ponIntfMarkerPos) | intfID
+	}
+	return 0
+}
+
+//PortNoToIntfID returns portnumber derived from interfaceID
+func PortNoToIntfID(portno uint32, intfType voltha.Port_PortType) uint32 {
+	if (intfType) == voltha.Port_ETHERNET_NNI {
+		return (1 << nniUniDiffPos) ^ portno
+	}
+	if (intfType) == voltha.Port_PON_OLT {
+		return (ponIntfMarkerValue << ponIntfMarkerPos) ^ portno
+	}
+	return 0
+}
+
+//IntfIDFromNniPortNum returns Intf ID derived from portNum
+func IntfIDFromNniPortNum(portNum uint32) (uint32, error) {
+	if portNum < minNniIntPortNum || portNum > maxNniPortNum {
+		log.Errorw("NNIPortNumber is not in valid range", log.Fields{"portNum": portNum})
+		return uint32(0), ErrInvalidPortRange
+	}
+	return (portNum & 0xFFFF), nil
+}
+
+//IntfIDToPortTypeName returns port type derived from the intfId
+func IntfIDToPortTypeName(intfID uint32) voltha.Port_PortType {
+	if ((ponIntfMarkerValue << ponIntfMarkerPos) ^ intfID) < MaxPonsPerOlt {
+		return voltha.Port_PON_OLT
+	}
+	if (intfID & (1 << nniUniDiffPos)) == (1 << nniUniDiffPos) {
+		return voltha.Port_ETHERNET_NNI
+	}
+	return voltha.Port_ETHERNET_UNI
+}
+
+//ExtractAccessFromFlow returns AccessDevice information
+func ExtractAccessFromFlow(inPort, outPort uint32) (uint32, uint32, uint32, uint32) {
+	if IsUpstream(outPort) {
+		return inPort, IntfIDFromUniPortNum(inPort), OnuIDFromPortNum(inPort), UniIDFromPortNum(inPort)
+	}
+	return outPort, IntfIDFromUniPortNum(outPort), OnuIDFromPortNum(outPort), UniIDFromPortNum(outPort)
+}
+
+//IsUpstream returns true for Upstream and false for downstream
+func IsUpstream(outPort uint32) bool {
+	for _, port := range controllerPorts {
+		if port == outPort {
+			return true
+		}
+	}
+	return (outPort & (1 << nniUniDiffPos)) == (1 << nniUniDiffPos)
+}
+
+//IsControllerBoundFlow returns true/false
+func IsControllerBoundFlow(outPort uint32) bool {
+	for _, port := range controllerPorts {
+		if port == outPort {
+			return true
+		}
+	}
+	return false
+}
+
+//OnuIDFromUniPortNum returns onuId from give portNum information.
+func OnuIDFromUniPortNum(portNum uint32) uint32 {
+	return (portNum >> bitsForUniID) & (MaxOnusPerPon - 1)
+}
+
+//FlowExtractInfo fetches uniport from the flow, based on which it gets and returns ponInf, onuID, uniID, inPort and ethType
+func FlowExtractInfo(flow *ofp.OfpFlowStats, flowDirection string) (uint32, uint32, uint32, uint32, uint32, uint32, error) {
+	var uniPortNo uint32
+	var ponIntf uint32
+	var onuID uint32
+	var uniID uint32
+	var inPort uint32
+	var ethType uint32
+
+	if flowDirection == "upstream" {
+		if uniPortNo = flows.GetChildPortFromTunnelId(flow); uniPortNo == 0 {
+			for _, field := range flows.GetOfbFields(flow) {
+				if field.GetType() == flows.IN_PORT {
+					uniPortNo = field.GetPort()
+					break
+				}
+			}
+		}
+	} else if flowDirection == "downstream" {
+		if uniPortNo = flows.GetChildPortFromTunnelId(flow); uniPortNo == 0 {
+			for _, field := range flows.GetOfbFields(flow) {
+				if field.GetType() == flows.METADATA {
+					for _, action := range flows.GetActions(flow) {
+						if action.Type == flows.OUTPUT {
+							if out := action.GetOutput(); out != nil {
+								uniPortNo = out.GetPort()
+							}
+							break
+						}
+					}
+				} else if field.GetType() == flows.IN_PORT {
+					inPort = field.GetPort()
+				} else if field.GetType() == flows.ETH_TYPE {
+					ethType = field.GetEthType()
+				}
+			}
+		}
+	}
+
+	if uniPortNo == 0 {
+		return 0, 0, 0, 0, 0, 0, NewErrNotFound("pon-interface", log.Fields{
+			"flow-direction": flowDirection}, nil)
+	}
+
+	ponIntf = IntfIDFromUniPortNum(uniPortNo)
+	onuID = OnuIDFromUniPortNum(uniPortNo)
+	uniID = UniIDFromPortNum(uniPortNo)
+
+	log.Debugw("flow extract info result",
+		log.Fields{"uniPortNo": uniPortNo, "ponIntf": ponIntf,
+			"onuID": onuID, "uniID": uniID, "inPort": inPort, "ethType": ethType})
+
+	return uniPortNo, ponIntf, onuID, uniID, inPort, ethType, nil
+}
diff --git a/internal/pkg/core/olt_platform_test.go b/internal/pkg/core/olt_platform_test.go
new file mode 100644
index 0000000..4f3d5bf
--- /dev/null
+++ b/internal/pkg/core/olt_platform_test.go
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"math"
+	"reflect"
+	"testing"
+
+	fu "github.com/opencord/voltha-lib-go/v3/pkg/flows"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+func TestMkUniPortNum(t *testing.T) {
+	type args struct {
+		intfID uint32
+		onuID  uint32
+		uniID  uint32
+	}
+	tests := []struct {
+		name string
+		args args
+		want uint32
+	}{
+		// TODO: Add test cases.
+		{"MkUniPortNum-1", args{1, 1, 1}, ((1 * 4096) + (1 * 16) + 1)},
+		{"MkUniPortNum-2", args{4, 5, 6}, ((4 * 4096) + (5 * 16) + 6)},
+		// Negative test cases to cover the log.warn
+		{"MkUniPortNum-3", args{4, 130, 6}, ((4 * 4096) + (130 * 16) + 6)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := MkUniPortNum(tt.args.intfID, tt.args.onuID, tt.args.uniID); got != tt.want {
+				t.Errorf("MkUniPortNum() = %v, want %v", got, tt.want)
+			} else {
+				t.Logf("Expected %v , Actual %v \n", tt.want, got)
+			}
+		})
+	}
+}
+
+func TestOnuIDFromPortNum(t *testing.T) {
+	type args struct {
+		portNum uint32
+	}
+	tests := []struct {
+		name string
+		args args
+		want uint32
+	}{
+		// TODO: Add test cases.
+		{"OnuIDFromPortNum-1", args{portNum: 8096}, ((8096 / 16) & 255)},
+		{"OnuIDFromPortNum-2", args{portNum: 9095}, ((9095 / 16) & 255)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := OnuIDFromPortNum(tt.args.portNum); got != tt.want {
+				t.Errorf("OnuIDFromPortNum() = %v, want %v", got, tt.want)
+			} else {
+				t.Logf("Expected %v , Actual %v \n", tt.want, got)
+			}
+		})
+	}
+}
+
+func TestIntfIDFromUniPortNum(t *testing.T) {
+	type args struct {
+		portNum uint32
+	}
+	tests := []struct {
+		name string
+		args args
+		want uint32
+	}{
+		// TODO: Add test cases.
+		{"IntfIDFromUniPortNum-1", args{portNum: 8096}, ((8096 / 4096) & 15)},
+		// Negative Testcase
+		{"IntfIDFromUniPortNum-2", args{portNum: 1024}, ((1024 / 4096) & 15)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IntfIDFromUniPortNum(tt.args.portNum); got != tt.want {
+				t.Errorf("IntfIDFromUniPortNum() = %v, want %v", got, tt.want)
+			} else {
+				t.Logf("Expected %v , Actual %v \n", tt.want, got)
+			}
+		})
+	}
+}
+
+func TestUniIDFromPortNum(t *testing.T) {
+	type args struct {
+		portNum uint32
+	}
+	tests := []struct {
+		name string
+		args args
+		want uint32
+	}{
+
+		// TODO: Add test cases.
+		{"UniIDFromPortNum-1", args{portNum: 8096}, (8096 & 15)},
+		{"UniIDFromPortNum-2", args{portNum: 1024}, (1024 & 15)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := UniIDFromPortNum(tt.args.portNum); got != tt.want {
+				t.Errorf("UniIDFromPortNum() = %v, want %v", got, tt.want)
+			} else {
+				t.Logf("Expected %v , Actual %v \n", tt.want, got)
+			}
+		})
+	}
+}
+
+func TestIntfIDToPortNo(t *testing.T) {
+	type args struct {
+		intfID   uint32
+		intfType voltha.Port_PortType
+	}
+	tests := []struct {
+		name string
+		args args
+		want uint32
+	}{
+		// TODO: Add test cases.
+		{"IntfIDToPortNo-1", args{intfID: 120, intfType: voltha.Port_ETHERNET_NNI}, (uint32(math.Pow(2, 20)) + 120)},
+		{"IntfIDToPortNo-2", args{intfID: 1024, intfType: voltha.Port_ETHERNET_UNI}, 0},
+		{"IntfIDToPortNo-3", args{intfID: 456, intfType: voltha.Port_PON_OLT}, (uint32(2*math.Pow(2, 28)) + 456)},
+		{"IntfIDToPortNo-4", args{intfID: 28, intfType: voltha.Port_PON_ONU}, 0},
+		{"IntfIDToPortNo-5", args{intfID: 45, intfType: voltha.Port_UNKNOWN}, 0},
+		{"IntfIDToPortNo-6", args{intfID: 45, intfType: voltha.Port_VENET_OLT}, 0},
+		{"IntfIDToPortNo-7", args{intfID: 45, intfType: voltha.Port_VENET_ONU}, 0},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IntfIDToPortNo(tt.args.intfID, tt.args.intfType); got != tt.want {
+				t.Errorf("IntfIDToPortNo() = %v, want %v", got, tt.want)
+			} else {
+				t.Logf("Expected %v , Actual %v \n", tt.want, got)
+			}
+		})
+	}
+}
+
+func TestIntfIDFromNniPortNum(t *testing.T) {
+	type args struct {
+		portNum uint32
+	}
+
+	tests := []struct {
+		name    string
+		args    args
+		want    uint32
+		wantErr error
+	}{
+		// TODO: Add test cases.
+		{"IntfIDFromNniPortNum-01", args{portNum: 8081}, 0, ErrInvalidPortRange},
+		{"IntfIDFromNniPortNum-02", args{portNum: 9090}, 0, ErrInvalidPortRange},
+		{"IntfIDFromNniPortNum-03", args{portNum: 0}, 0, ErrInvalidPortRange},
+		{"IntfIDFromNniPortNum-04", args{portNum: 65535}, 0, ErrInvalidPortRange},
+		{"IntfIDFromNniPortNum-05", args{portNum: 1048575}, 0, ErrInvalidPortRange},
+		{"IntfIDFromNniPortNum-06", args{portNum: 1048576}, 0, nil},
+		{"IntfIDFromNniPortNum-07", args{portNum: 1048577}, 1, nil},
+		{"IntfIDFromNniPortNum-08", args{portNum: 1048578}, 2, nil},
+		{"IntfIDFromNniPortNum-09", args{portNum: 1048579}, 3, nil},
+		{"IntfIDFromNniPortNum-10", args{portNum: 2097150}, 65534, nil},
+		{"IntfIDFromNniPortNum-11", args{portNum: 2097151}, 65535, nil},
+		{"IntfIDFromNniPortNum-12", args{portNum: 3000000}, 0, ErrInvalidPortRange},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := IntfIDFromNniPortNum(tt.args.portNum)
+			if got != tt.want || err != tt.wantErr {
+				t.Errorf("IntfIDFromNniPortNum(): FOR[%v] WANT[%v and %v] GOT[%v and %v]",
+					tt.args.portNum, tt.want, tt.wantErr, got, err)
+			}
+		})
+	}
+}
+
+func TestIntfIDToPortTypeName(t *testing.T) {
+	type args struct {
+		intfID uint32
+	}
+	var input uint32
+	input = uint32(2*math.Pow(2, 28)) | 3
+	tests := []struct {
+		name string
+		args args
+		want voltha.Port_PortType
+	}{
+		// TODO: Add test cases.
+		{"IntfIDToPortTypeName-1", args{intfID: 1048576}, voltha.Port_ETHERNET_NNI},
+		{"IntfIDToPortTypeName-2", args{intfID: 1000}, voltha.Port_ETHERNET_UNI},
+		{"IntfIDToPortTypeName-2", args{intfID: input}, voltha.Port_PON_OLT},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IntfIDToPortTypeName(tt.args.intfID); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("IntfIDToPortTypeName() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestExtractAccessFromFlow(t *testing.T) {
+	type args struct {
+		inPort  uint32
+		outPort uint32
+	}
+	tests := []struct {
+		name   string
+		args   args
+		port   uint32
+		IntfID uint32
+		onuID  uint32
+		uniID  uint32
+	}{
+		// TODO: Add test cases.
+		{"ExtractAccessFromFlow-1", args{inPort: 100, outPort: 1048576}, 100, 0, 6, 4},
+		{"ExtractAccessFromFlow-2", args{inPort: 1048576, outPort: 10}, 10, 0, 0, 10},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1, got2, got3 := ExtractAccessFromFlow(tt.args.inPort, tt.args.outPort)
+			if got != tt.port {
+				t.Errorf("ExtractAccessFromFlow() got = %v, want %v", got, tt.port)
+			}
+			if got1 != tt.IntfID {
+				t.Errorf("ExtractAccessFromFlow() got1 = %v, want %v", got1, tt.IntfID)
+			}
+			if got2 != tt.onuID {
+				t.Errorf("ExtractAccessFromFlow() got2 = %v, want %v", got2, tt.onuID)
+			}
+			if got3 != tt.uniID {
+				t.Errorf("ExtractAccessFromFlow() got3 = %v, want %v", got3, tt.uniID)
+			}
+		})
+	}
+	//t.Error()
+}
+
+func TestIsUpstream(t *testing.T) {
+	type args struct {
+		outPort uint32
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		// TODO: Add test cases.
+		{"TestIsUpstream-1", args{outPort: 65533}, true},
+		{"TestIsUpstream-2", args{outPort: 1048576}, true},
+		{"TestIsUpstream-3", args{outPort: 1048577}, true},
+		{"TestIsUpstream-4", args{outPort: 1048578}, true},
+		{"TestIsUpstream-6", args{outPort: 1000}, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IsUpstream(tt.args.outPort); got != tt.want {
+				t.Errorf("IsUpstream() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestIsControllerBoundFlow(t *testing.T) {
+	type args struct {
+		outPort uint32
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		// TODO: Add test cases.
+		{"IsControllerBoundFlow-1", args{outPort: 65533}, true},
+		{"IsControllerBoundFlow-2", args{outPort: 65536}, false},
+		{"IsControllerBoundFlow-3", args{outPort: 65537}, false},
+		{"IsControllerBoundFlow-4", args{outPort: 65538}, false},
+		{"IsControllerBoundFlow-5", args{outPort: 65539}, false},
+		{"IsControllerBoundFlow-6", args{outPort: 1000}, false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IsControllerBoundFlow(tt.args.outPort); got != tt.want {
+				t.Errorf("IsControllerBoundFlow() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestFlowExtractInfo(t *testing.T) {
+	fa := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(2),
+			fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2)),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+			fu.EthType(2048),
+		},
+
+		Actions: []*ofp.OfpAction{
+			fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+			fu.Output(1),
+		},
+	}
+	ofpstats := fu.MkFlowStat(fa)
+	type args struct {
+		flow          *ofp.OfpFlowStats
+		flowDirection string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    uint32
+		want1   uint32
+		want2   uint32
+		want3   uint32
+		want4   uint32
+		want5   uint32
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"FlowExtractInfo-1", args{flow: ofpstats, flowDirection: "upstream"}, 2, 0, 0, 2, 0, 0, false},
+
+		// Negative Testcases
+		{"FlowExtractInfo-2", args{flow: ofpstats, flowDirection: "downstream"}, 1, 0, 0, 1, 2, 2048, false},
+		{"FlowExtractInfo-3", args{flow: nil, flowDirection: "downstream"}, 0, 0, 0, 0, 0, 0, true},
+		{"FlowExtractInfo-4", args{flow: &ofp.OfpFlowStats{}, flowDirection: "downstream"}, 0, 0, 0, 0, 0, 0, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, got1, got2, got3, got4, got5, err := FlowExtractInfo(tt.args.flow, tt.args.flowDirection)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("FlowExtractInfo() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if got != tt.want {
+				t.Errorf("FlowExtractInfo() got = %v, want %v", got, tt.want)
+				return
+			}
+			if got1 != tt.want1 {
+				t.Errorf("FlowExtractInfo() got1 = %v, want %v", got1, tt.want1)
+				return
+			}
+			if got2 != tt.want2 {
+				t.Errorf("FlowExtractInfo() got2 = %v, want %v", got2, tt.want2)
+				return
+			}
+			if got3 != tt.want3 {
+				t.Errorf("FlowExtractInfo() got3 = %v, want %v", got3, tt.want3)
+				return
+			}
+			if got4 != tt.want4 {
+				t.Errorf("FlowExtractInfo() got4 = %v, want %v", got4, tt.want4)
+				return
+			}
+			if got5 != tt.want5 {
+				t.Errorf("FlowExtractInfo() got5 = %v, want %v", got5, tt.want5)
+				return
+			}
+		})
+	}
+}
diff --git a/internal/pkg/core/olt_state_transitions.go b/internal/pkg/core/olt_state_transitions.go
new file mode 100644
index 0000000..c10d17d
--- /dev/null
+++ b/internal/pkg/core/olt_state_transitions.go
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"reflect"
+	"runtime"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+// DeviceState OLT Device state
+type DeviceState int
+
+const (
+	// deviceStateNull OLT is not instantiated
+	deviceStateNull DeviceState = iota
+	// deviceStateInit OLT is instantiated
+	deviceStateInit
+	// deviceStateConnected Grpc session established with OLT
+	deviceStateConnected
+	// deviceStateUp Admin state of OLT is UP
+	deviceStateUp
+	// deviceStateDown Admin state of OLT is down
+	deviceStateDown
+)
+
+// Trigger for changing the state
+type Trigger int
+
+const (
+	// DeviceInit Go to Device init state
+	DeviceInit Trigger = iota
+	// GrpcConnected Go to connected state
+	GrpcConnected
+	// DeviceUpInd Go to Device up state
+	DeviceUpInd
+	// DeviceDownInd Go to Device down state
+	DeviceDownInd
+	// GrpcDisconnected Go to Device init state
+	GrpcDisconnected
+)
+
+// TransitionHandler function type for handling transition
+type TransitionHandler func(ctx context.Context) error
+
+// Transition to store state machine
+type Transition struct {
+	previousState []DeviceState
+	currentState  DeviceState
+	before        []TransitionHandler
+	after         []TransitionHandler
+}
+
+// TransitionMap to store all the states and current device state
+type TransitionMap struct {
+	transitions        map[Trigger]Transition
+	currentDeviceState DeviceState
+}
+
+//    OpenoltDevice state machine:
+//
+//        null ----> init ------> connected -----> up -----> down
+//                   ^ ^             |             ^         | |
+//                   | |             |             |         | |
+//                   | +-------------+             +---------+ |
+//                   |                                         |
+//                   +-----------------------------------------+
+
+// NewTransitionMap create a new state machine with all the transitions
+func NewTransitionMap(dh *DeviceHandler) *TransitionMap {
+	var transitionMap TransitionMap
+	transitionMap.currentDeviceState = deviceStateNull
+	transitionMap.transitions = make(map[Trigger]Transition)
+	// In doInit establish the grpc session
+	transitionMap.transitions[DeviceInit] =
+		Transition{
+			previousState: []DeviceState{deviceStateNull, deviceStateDown},
+			currentState:  deviceStateInit,
+			before:        []TransitionHandler{dh.doStateInit},
+			after:         []TransitionHandler{dh.postInit}}
+	// If gRpc session fails, re-establish the grpc session
+	transitionMap.transitions[GrpcDisconnected] =
+		Transition{
+			previousState: []DeviceState{deviceStateConnected, deviceStateDown},
+			currentState:  deviceStateInit,
+			before:        []TransitionHandler{dh.doStateInit},
+			after:         []TransitionHandler{dh.postInit}}
+	// in doConnected, create logical device and read the indications
+	transitionMap.transitions[GrpcConnected] =
+		Transition{
+			previousState: []DeviceState{deviceStateInit},
+			currentState:  deviceStateConnected,
+			before:        []TransitionHandler{dh.doStateConnected}}
+
+	// Once the olt UP is indication received, then do state up
+	transitionMap.transitions[DeviceUpInd] =
+		Transition{
+			previousState: []DeviceState{deviceStateConnected, deviceStateDown},
+			currentState:  deviceStateUp,
+			before:        []TransitionHandler{dh.doStateUp}}
+	// If olt DOWN indication comes then do sate down
+	transitionMap.transitions[DeviceDownInd] =
+		Transition{
+			previousState: []DeviceState{deviceStateUp},
+			currentState:  deviceStateDown,
+			before:        []TransitionHandler{dh.doStateDown}}
+
+	return &transitionMap
+}
+
+// funcName gets the handler function name
+func funcName(f interface{}) string {
+	p := reflect.ValueOf(f).Pointer()
+	rf := runtime.FuncForPC(p)
+	return rf.Name()
+}
+
+// isValidTransition checks for the new state transition is valid from current state
+func (tMap *TransitionMap) isValidTransition(trigger Trigger) bool {
+	// Validate the state transition
+	for _, state := range tMap.transitions[trigger].previousState {
+		if tMap.currentDeviceState == state {
+			return true
+		}
+	}
+	return false
+}
+
+// Handle moves the state machine to next state based on the trigger and invokes the before and
+// after handlers if the transition is a valid transition
+func (tMap *TransitionMap) Handle(ctx context.Context, trigger Trigger) {
+
+	// Check whether the transtion is valid from current state
+	if !tMap.isValidTransition(trigger) {
+		log.Errorw("Invalid transition triggered ", log.Fields{"CurrentState": tMap.currentDeviceState, "Trigger": trigger})
+		return
+	}
+
+	// Invoke the before handlers
+	beforeHandlers := tMap.transitions[trigger].before
+	if beforeHandlers == nil {
+		log.Debugw("No handlers for before", log.Fields{"trigger": trigger})
+	}
+	for _, handler := range beforeHandlers {
+		log.Debugw("running-before-handler", log.Fields{"handler": funcName(handler)})
+		if err := handler(ctx); err != nil {
+			// TODO handle error
+			log.Error(err)
+			return
+		}
+	}
+
+	// Update the state
+	tMap.currentDeviceState = tMap.transitions[trigger].currentState
+	log.Debugw("Updated device state ", log.Fields{"CurrentDeviceState": tMap.currentDeviceState})
+
+	// Invoke the after handlers
+	afterHandlers := tMap.transitions[trigger].after
+	if afterHandlers == nil {
+		log.Debugw("No handlers for after", log.Fields{"trigger": trigger})
+	}
+	for _, handler := range afterHandlers {
+		log.Debugw("running-after-handler", log.Fields{"handler": funcName(handler)})
+		if err := handler(ctx); err != nil {
+			// TODO handle error
+			log.Error(err)
+			return
+		}
+	}
+}
diff --git a/internal/pkg/core/olt_state_transitions_test.go b/internal/pkg/core/olt_state_transitions_test.go
new file mode 100644
index 0000000..75e7807
--- /dev/null
+++ b/internal/pkg/core/olt_state_transitions_test.go
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+import (
+	"context"
+	"reflect"
+	"testing"
+	"time"
+)
+
+/**
+Get's the transition Map with current state of the device.
+*/
+func getTranisitions() map[Trigger]Transition {
+	transitions := make(map[Trigger]Transition)
+	transition := Transition{
+		previousState: []DeviceState{deviceStateConnected},
+		currentState:  deviceStateConnected,
+	}
+	transitions[DeviceInit] = transition
+	return transitions
+}
+
+/**
+Get's the transition Map with after Transition func added.
+*/
+func getTranisitionsAfter() map[Trigger]Transition {
+	transitions := make(map[Trigger]Transition)
+	transition := Transition{
+		previousState: []DeviceState{deviceStateConnected},
+		currentState:  deviceStateConnected,
+		after: []TransitionHandler{func(ctx context.Context) error {
+			return nil
+		}, func(ctx context.Context) error {
+			return ErrStateTransition
+		}},
+	}
+	transitions[GrpcConnected] = transition
+	return transitions
+}
+
+/**
+Get's the transition Map with before Transition func added.
+*/
+func getTranisitionsBefore() map[Trigger]Transition {
+	transitions := make(map[Trigger]Transition)
+	transition := Transition{
+		previousState: []DeviceState{deviceStateConnected},
+		currentState:  deviceStateConnected,
+		before: []TransitionHandler{func(ctx context.Context) error {
+			return nil
+		}, func(ctx context.Context) error {
+			return ErrStateTransition
+		}},
+	}
+	transitions[GrpcConnected] = transition
+	return transitions
+}
+
+/**
+Check's Creation of transition Map, return's NewTransitionMap.
+*/
+func TestNewTransitionMap(t *testing.T) {
+	type args struct {
+		dh *DeviceHandler
+	}
+	tests := []struct {
+		name string
+		args args
+		want *TransitionMap
+	}{
+		{"NewTransitionMap-1", args{newMockDeviceHandler()}, &TransitionMap{}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := NewTransitionMap(tt.args.dh); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+				t.Errorf("NewTransitionMap() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+/**
+Checks the different transition of the device handled properly.
+*/
+func TestTransitionMap_Handle(t *testing.T) {
+	type fields struct {
+		transitions        map[Trigger]Transition
+		currentDeviceState DeviceState
+	}
+	type args struct {
+		trigger Trigger
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+	}{
+		{"Handle-1", fields{getTranisitions(), deviceStateDown}, args{GrpcConnected}},
+		{"Handle-2", fields{getTranisitions(), deviceStateConnected}, args{GrpcConnected}},
+		{"Handle-3", fields{getTranisitionsBefore(), deviceStateConnected}, args{GrpcConnected}},
+		{"Handle-4", fields{getTranisitionsAfter(), deviceStateConnected}, args{GrpcConnected}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tMap := &TransitionMap{
+				transitions:        tt.fields.transitions,
+				currentDeviceState: tt.fields.currentDeviceState,
+			}
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+			tMap.Handle(ctx, tt.args.trigger)
+		})
+	}
+}
+
+/**
+Check's if the transition is valid or not.
+*/
+func TestTransitionMap_isValidTransition(t *testing.T) {
+	type fields struct {
+		transitions        map[Trigger]Transition
+		currentDeviceState DeviceState
+	}
+	type args struct {
+		trigger Trigger
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+		want   bool
+	}{
+		{"isValidTransition-1", fields{getTranisitions(), deviceStateConnected}, args{DeviceInit},
+			true},
+		{"isValidTransition-2", fields{getTranisitions(), deviceStateDown}, args{GrpcConnected},
+			false},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tMap := &TransitionMap{
+				transitions:        tt.fields.transitions,
+				currentDeviceState: tt.fields.currentDeviceState,
+			}
+			if got := tMap.isValidTransition(tt.args.trigger); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+				t.Errorf("isValidTransition() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+/**
+Get's the After/Before transition method's function name.
+*/
+func Test_funcName(t *testing.T) {
+	type args struct {
+		f interface{}
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{"FuncName-1", args{newMockDeviceHandler()}, ""},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := funcName(tt.args.f); got != tt.want {
+				t.Errorf("funcName() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/internal/pkg/core/openolt.go b/internal/pkg/core/openolt.go
new file mode 100644
index 0000000..9a5167f
--- /dev/null
+++ b/internal/pkg/core/openolt.go
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+//OpenOLT structure holds the OLT information
+type OpenOLT struct {
+	deviceHandlers              map[string]*DeviceHandler
+	coreProxy                   adapterif.CoreProxy
+	adapterProxy                adapterif.AdapterProxy
+	eventProxy                  adapterif.EventProxy
+	kafkaICProxy                kafka.InterContainerProxy
+	config                      *config.AdapterFlags
+	numOnus                     int
+	KVStoreHost                 string
+	KVStorePort                 int
+	KVStoreType                 string
+	exitChannel                 chan int
+	HeartbeatCheckInterval      time.Duration
+	HeartbeatFailReportInterval time.Duration
+	GrpcTimeoutInterval         time.Duration
+	lockDeviceHandlersMap       sync.RWMutex
+}
+
+//NewOpenOLT returns a new instance of OpenOLT
+func NewOpenOLT(ctx context.Context, kafkaICProxy kafka.InterContainerProxy,
+	coreProxy adapterif.CoreProxy, adapterProxy adapterif.AdapterProxy,
+	eventProxy adapterif.EventProxy, cfg *config.AdapterFlags) *OpenOLT {
+	var openOLT OpenOLT
+	openOLT.exitChannel = make(chan int, 1)
+	openOLT.deviceHandlers = make(map[string]*DeviceHandler)
+	openOLT.kafkaICProxy = kafkaICProxy
+	openOLT.config = cfg
+	openOLT.numOnus = cfg.OnuNumber
+	openOLT.coreProxy = coreProxy
+	openOLT.adapterProxy = adapterProxy
+	openOLT.eventProxy = eventProxy
+	openOLT.KVStoreHost = cfg.KVStoreHost
+	openOLT.KVStorePort = cfg.KVStorePort
+	openOLT.KVStoreType = cfg.KVStoreType
+	openOLT.HeartbeatCheckInterval = cfg.HeartbeatCheckInterval
+	openOLT.HeartbeatFailReportInterval = cfg.HeartbeatFailReportInterval
+	openOLT.GrpcTimeoutInterval = cfg.GrpcTimeoutInterval
+	openOLT.lockDeviceHandlersMap = sync.RWMutex{}
+	return &openOLT
+}
+
+//Start starts (logs) the device manager
+func (oo *OpenOLT) Start(ctx context.Context) error {
+	log.Info("starting-device-manager")
+	log.Info("device-manager-started")
+	return nil
+}
+
+//Stop terminates the session
+func (oo *OpenOLT) Stop(ctx context.Context) error {
+	log.Info("stopping-device-manager")
+	oo.exitChannel <- 1
+	log.Info("device-manager-stopped")
+	return nil
+}
+
+func sendResponse(ctx context.Context, ch chan interface{}, result interface{}) {
+	if ctx.Err() == nil {
+		// Returned response only of the ctx has not been canceled/timeout/etc
+		// Channel is automatically closed when a context is Done
+		ch <- result
+		log.Debugw("sendResponse", log.Fields{"result": result})
+	} else {
+		// Should the transaction be reverted back?
+		log.Debugw("sendResponse-context-error", log.Fields{"context-error": ctx.Err()})
+	}
+}
+
+func (oo *OpenOLT) addDeviceHandlerToMap(agent *DeviceHandler) {
+	oo.lockDeviceHandlersMap.Lock()
+	defer oo.lockDeviceHandlersMap.Unlock()
+	if _, exist := oo.deviceHandlers[agent.deviceID]; !exist {
+		oo.deviceHandlers[agent.deviceID] = agent
+	}
+}
+
+func (oo *OpenOLT) deleteDeviceHandlerToMap(agent *DeviceHandler) {
+	oo.lockDeviceHandlersMap.Lock()
+	defer oo.lockDeviceHandlersMap.Unlock()
+	delete(oo.deviceHandlers, agent.deviceID)
+}
+
+func (oo *OpenOLT) getDeviceHandler(deviceID string) *DeviceHandler {
+	oo.lockDeviceHandlersMap.Lock()
+	defer oo.lockDeviceHandlersMap.Unlock()
+	if agent, ok := oo.deviceHandlers[deviceID]; ok {
+		return agent
+	}
+	return nil
+}
+
+//createDeviceTopic returns
+func (oo *OpenOLT) createDeviceTopic(device *voltha.Device) error {
+	log.Infow("create-device-topic", log.Fields{"deviceId": device.Id})
+	defaultTopic := oo.kafkaICProxy.GetDefaultTopic()
+	deviceTopic := kafka.Topic{Name: defaultTopic.Name + "_" + device.Id}
+	// TODO for the offset
+	if err := oo.kafkaICProxy.SubscribeWithDefaultRequestHandler(deviceTopic, 0); err != nil {
+		log.Infow("create-device-topic-failed", log.Fields{"deviceId": device.Id, "error": err})
+		return err
+	}
+	return nil
+}
+
+// Adopt_device creates a new device handler if not present already and then adopts the device
+func (oo *OpenOLT) Adopt_device(device *voltha.Device) error {
+	ctx := context.Background()
+	if device == nil {
+		return NewErrInvalidValue(log.Fields{"device": nil}, nil).Log()
+	}
+	log.Infow("adopt-device", log.Fields{"deviceId": device.Id})
+	var handler *DeviceHandler
+	if handler = oo.getDeviceHandler(device.Id); handler == nil {
+		handler := NewDeviceHandler(oo.coreProxy, oo.adapterProxy, oo.eventProxy, device, oo)
+		oo.addDeviceHandlerToMap(handler)
+		go handler.AdoptDevice(ctx, device)
+		// Launch the creation of the device topic
+		// go oo.createDeviceTopic(device)
+	}
+	return nil
+}
+
+//Get_ofp_device_info returns OFP information for the given device
+func (oo *OpenOLT) Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error) {
+	log.Infow("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		return handler.GetOfpDeviceInfo(device)
+	}
+	return nil, NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Get_ofp_port_info returns OFP port information for the given device
+func (oo *OpenOLT) Get_ofp_port_info(device *voltha.Device, portNo int64) (*ic.PortCapability, error) {
+	log.Infow("Get_ofp_port_info", log.Fields{"deviceId": device.Id})
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		return handler.GetOfpPortInfo(device, portNo)
+	}
+	return nil, NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Process_inter_adapter_message sends messages to a target device (between adapters)
+func (oo *OpenOLT) Process_inter_adapter_message(msg *ic.InterAdapterMessage) error {
+	log.Infow("Process_inter_adapter_message", log.Fields{"msgId": msg.Header.Id})
+	targetDevice := msg.Header.ProxyDeviceId // Request?
+	if targetDevice == "" && msg.Header.ToDeviceId != "" {
+		// Typical response
+		targetDevice = msg.Header.ToDeviceId
+	}
+	if handler := oo.getDeviceHandler(targetDevice); handler != nil {
+		return handler.ProcessInterAdapterMessage(msg)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": targetDevice}, nil).Log()
+}
+
+//Adapter_descriptor not implemented
+func (oo *OpenOLT) Adapter_descriptor() error {
+	return ErrNotImplemented
+}
+
+//Device_types unimplemented
+func (oo *OpenOLT) Device_types() (*voltha.DeviceTypes, error) {
+	return nil, ErrNotImplemented
+}
+
+//Health  returns unimplemented
+func (oo *OpenOLT) Health() (*voltha.HealthStatus, error) {
+	return nil, ErrNotImplemented
+}
+
+//Reconcile_device unimplemented
+func (oo *OpenOLT) Reconcile_device(device *voltha.Device) error {
+	ctx := context.Background()
+	if device == nil {
+		return NewErrInvalidValue(log.Fields{"device": nil}, nil).Log()
+	}
+	log.Infow("reconcile-device", log.Fields{"deviceId": device.Id})
+	var handler *DeviceHandler
+	if handler = oo.getDeviceHandler(device.Id); handler == nil {
+		handler := NewDeviceHandler(oo.coreProxy, oo.adapterProxy, oo.eventProxy, device, oo)
+		oo.addDeviceHandlerToMap(handler)
+		handler.transitionMap = NewTransitionMap(handler)
+		handler.transitionMap.Handle(ctx, DeviceInit)
+	}
+	return nil
+}
+
+//Abandon_device unimplemented
+func (oo *OpenOLT) Abandon_device(device *voltha.Device) error {
+	return ErrNotImplemented
+}
+
+//Disable_device disables the given device
+func (oo *OpenOLT) Disable_device(device *voltha.Device) error {
+	log.Infow("disable-device", log.Fields{"deviceId": device.Id})
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		return handler.DisableDevice(device)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Reenable_device enables the olt device after disable
+func (oo *OpenOLT) Reenable_device(device *voltha.Device) error {
+	log.Infow("reenable-device", log.Fields{"deviceId": device.Id})
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		return handler.ReenableDevice(device)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Reboot_device reboots the given device
+func (oo *OpenOLT) Reboot_device(device *voltha.Device) error {
+	log.Infow("reboot-device", log.Fields{"deviceId": device.Id})
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		return handler.RebootDevice(device)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Self_test_device unimplented
+func (oo *OpenOLT) Self_test_device(device *voltha.Device) error {
+	return ErrNotImplemented
+}
+
+//Delete_device unimplemented
+func (oo *OpenOLT) Delete_device(device *voltha.Device) error {
+	log.Infow("delete-device", log.Fields{"deviceId": device.Id})
+	ctx := context.Background()
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		if err := handler.DeleteDevice(ctx, device); err != nil {
+			log.Errorw("failed-to-handle-delete-device", log.Fields{"device-id": device.Id})
+		}
+		oo.deleteDeviceHandlerToMap(handler)
+		return nil
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Get_device_details unimplemented
+func (oo *OpenOLT) Get_device_details(device *voltha.Device) error {
+	return ErrNotImplemented
+}
+
+//Update_flows_bulk returns
+func (oo *OpenOLT) Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error {
+	return ErrNotImplemented
+}
+
+//Update_flows_incrementally updates (add/remove) the flows on a given device
+func (oo *OpenOLT) Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
+	log.Debugw("Update_flows_incrementally", log.Fields{"deviceId": device.Id, "flows": flows, "flowMetadata": flowMetadata})
+	ctx := context.Background()
+	if handler := oo.getDeviceHandler(device.Id); handler != nil {
+		return handler.UpdateFlowsIncrementally(ctx, device, flows, groups, flowMetadata)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil).Log()
+}
+
+//Update_pm_config returns PmConfigs nil or error
+func (oo *OpenOLT) Update_pm_config(device *voltha.Device, pmConfigs *voltha.PmConfigs) error {
+	return ErrNotImplemented
+}
+
+//Receive_packet_out sends packet out to the device
+func (oo *OpenOLT) Receive_packet_out(deviceID string, egressPortNo int, packet *openflow_13.OfpPacketOut) error {
+	log.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceID, "egress_port_no": egressPortNo, "pkt": packet})
+	ctx := context.Background()
+	if handler := oo.getDeviceHandler(deviceID); handler != nil {
+		return handler.PacketOut(ctx, egressPortNo, packet)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": deviceID}, nil).Log()
+}
+
+//Suppress_event unimplemented
+func (oo *OpenOLT) Suppress_event(filter *voltha.EventFilter) error {
+	return ErrNotImplemented
+}
+
+//Unsuppress_event  unimplemented
+func (oo *OpenOLT) Unsuppress_event(filter *voltha.EventFilter) error {
+	return ErrNotImplemented
+}
+
+//Download_image unimplemented
+func (oo *OpenOLT) Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+	return nil, ErrNotImplemented
+}
+
+//Get_image_download_status unimplemented
+func (oo *OpenOLT) Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+	return nil, ErrNotImplemented
+}
+
+//Cancel_image_download unimplemented
+func (oo *OpenOLT) Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+	return nil, ErrNotImplemented
+}
+
+//Activate_image_update unimplemented
+func (oo *OpenOLT) Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+	return nil, ErrNotImplemented
+}
+
+//Revert_image_update unimplemented
+func (oo *OpenOLT) Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+	return nil, ErrNotImplemented
+}
+
+// Enable_port to Enable PON/NNI interface
+func (oo *OpenOLT) Enable_port(deviceID string, port *voltha.Port) error {
+	log.Infow("Enable_port", log.Fields{"deviceId": deviceID, "port": port})
+	return oo.enableDisablePort(deviceID, port, true)
+}
+
+// Disable_port to Disable pon/nni interface
+func (oo *OpenOLT) Disable_port(deviceID string, port *voltha.Port) error {
+	log.Infow("Disable_port", log.Fields{"deviceId": deviceID, "port": port})
+	return oo.enableDisablePort(deviceID, port, false)
+}
+
+// enableDisablePort to Disable pon or Enable PON interface
+func (oo *OpenOLT) enableDisablePort(deviceID string, port *voltha.Port, enablePort bool) error {
+	log.Infow("enableDisablePort", log.Fields{"deviceId": deviceID, "port": port})
+	if port == nil {
+		return NewErrInvalidValue(log.Fields{
+			"reason":    "port cannot be nil",
+			"device-id": deviceID,
+			"port":      nil}, nil).Log()
+	}
+	if handler := oo.getDeviceHandler(deviceID); handler != nil {
+		log.Debugw("Enable_Disable_Port", log.Fields{"deviceId": deviceID, "port": port})
+		if enablePort {
+			if err := handler.EnablePort(port); err != nil {
+				log.Errorw("error-occurred-during-enable-port", log.Fields{"deviceID": deviceID, "port": port, "error": err})
+				return err
+			}
+		} else {
+			if err := handler.DisablePort(port); err != nil {
+				log.Errorw("error-occurred-during-disable-port", log.Fields{"Device": deviceID, "port": port})
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+//Child_device_lost deletes the ONU and its references from PONResources
+func (oo *OpenOLT) Child_device_lost(deviceID string, pPortNo uint32, onuID uint32) error {
+	log.Infow("Child-device-lost", log.Fields{"parentId": deviceID})
+	ctx := context.Background()
+	if handler := oo.getDeviceHandler(deviceID); handler != nil {
+		return handler.ChildDeviceLost(ctx, pPortNo, onuID)
+	}
+	return NewErrNotFound("device-handler", log.Fields{"device-id": deviceID}, nil).Log()
+}
diff --git a/internal/pkg/core/openolt_eventmgr.go b/internal/pkg/core/openolt_eventmgr.go
new file mode 100644
index 0000000..4e21d11
--- /dev/null
+++ b/internal/pkg/core/openolt_eventmgr.go
@@ -0,0 +1,504 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package core provides APIs for the openOLT adapter
+package core
+
+import (
+	ctx "context"
+	"fmt"
+	"strconv"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-protos/v3/go/common"
+	oop "github.com/opencord/voltha-protos/v3/go/openolt"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+const (
+	onuDiscoveryEvent           = "ONU_DISCOVERY"
+	onuLosEvent                 = "ONU_LOSS_OF_SIGNAL"
+	onuLobEvent                 = "ONU_LOSS_OF_BURST"
+	onuLopcMissEvent            = "ONU_LOPC_MISS"
+	onuLopcMicErrorEvent        = "ONU_LOPC_MIC_ERROR"
+	oltLosEvent                 = "OLT_LOSS_OF_SIGNAL"
+	oltIndicationDown           = "OLT_DOWN_INDICATION"
+	onuDyingGaspEvent           = "ONU_DYING_GASP"
+	onuSignalsFailEvent         = "ONU_SIGNALS_FAIL"
+	onuStartupFailEvent         = "ONU_STARTUP_FAIL"
+	onuSignalDegradeEvent       = "ONU_SIGNAL_DEGRADE"
+	onuDriftOfWindowEvent       = "ONU_DRIFT_OF_WINDOW"
+	onuActivationFailEvent      = "ONU_ACTIVATION_FAIL"
+	onuProcessingErrorEvent     = "ONU_PROCESSING_ERROR"
+	onuTiwiEvent                = "ONU_TRANSMISSION_WARNING"
+	onuLossOmciEvent            = "ONU_LOSS_OF_OMCI_CHANNEL"
+	onuLossOfKeySyncEvent       = "ONU_LOSS_OF_KEY_SYNC"
+	onuLossOfFrameEvent         = "ONU_LOSS_OF_FRAME"
+	onuLossOfPloamEvent         = "ONU_LOSS_OF_PLOAM"
+	ponIntfDownIndiction        = "OLT_PON_INTERFACE_DOWN"
+	onuDeactivationFailureEvent = "ONU_DEACTIVATION_FAILURE"
+)
+
+const (
+	pon           = voltha.EventSubCategory_PON
+	olt           = voltha.EventSubCategory_OLT
+	ont           = voltha.EventSubCategory_ONT
+	onu           = voltha.EventSubCategory_ONU
+	nni           = voltha.EventSubCategory_NNI
+	service       = voltha.EventCategory_SERVICE
+	security      = voltha.EventCategory_SECURITY
+	equipment     = voltha.EventCategory_EQUIPMENT
+	processing    = voltha.EventCategory_PROCESSING
+	environment   = voltha.EventCategory_ENVIRONMENT
+	communication = voltha.EventCategory_COMMUNICATION
+)
+
+const (
+	// statusCheckOn represents status check On
+	statusCheckOn = "on"
+	// statusCheckOff represents status check Off
+	statusCheckOff = "off"
+	// operationStateUp represents operation state Up
+	operationStateUp = "up"
+	// operationStateDown represents operation state Down
+	operationStateDown = "down"
+	// base10 represents base 10 conversion
+	base10 = 10
+)
+
+// OpenOltEventMgr struct contains
+type OpenOltEventMgr struct {
+	eventProxy adapterif.EventProxy
+	handler    *DeviceHandler
+}
+
+// NewEventMgr is a Function to get a new event manager struct for the OpenOLT to process and publish OpenOLT event
+func NewEventMgr(eventProxy adapterif.EventProxy, handler *DeviceHandler) *OpenOltEventMgr {
+	var em OpenOltEventMgr
+	em.eventProxy = eventProxy
+	em.handler = handler
+	return &em
+}
+
+// ProcessEvents is function to process and publish OpenOLT event
+func (em *OpenOltEventMgr) ProcessEvents(alarmInd *oop.AlarmIndication, deviceID string, raisedTs int64) error {
+	var err error
+	switch alarmInd.Data.(type) {
+	case *oop.AlarmIndication_LosInd:
+		log.Infow("Received LOS indication", log.Fields{"alarm_ind": alarmInd})
+		err = em.oltLosIndication(alarmInd.GetLosInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuAlarmInd:
+		log.Infow("Received onu alarm indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuAlarmIndication(alarmInd.GetOnuAlarmInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_DyingGaspInd:
+		log.Infow("Received dying gasp indication", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuDyingGaspIndication(alarmInd.GetDyingGaspInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuActivationFailInd:
+		log.Infow("Received onu activation fail indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuActivationFailIndication(alarmInd.GetOnuActivationFailInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuLossOmciInd:
+		log.Infow("Received onu loss omci indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuLossOmciIndication(alarmInd.GetOnuLossOmciInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuDriftOfWindowInd:
+		log.Infow("Received onu drift of window indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuDriftOfWindowIndication(alarmInd.GetOnuDriftOfWindowInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuSignalDegradeInd:
+		log.Infow("Received onu signal degrade indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuSignalDegradeIndication(alarmInd.GetOnuSignalDegradeInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuSignalsFailInd:
+		log.Infow("Received onu signal fail indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuSignalsFailIndication(alarmInd.GetOnuSignalsFailInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuStartupFailInd:
+		log.Infow("Received onu startup fail indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuStartupFailedIndication(alarmInd.GetOnuStartupFailInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuTiwiInd:
+		log.Infow("Received onu transmission warning indication ", log.Fields{"alarm_ind": alarmInd})
+		log.Infow("Not implemented yet", log.Fields{"alarm_ind": "Onu-Transmission-indication"})
+	case *oop.AlarmIndication_OnuLossOfSyncFailInd:
+		log.Infow("Received onu Loss of Sync Fail indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuLossOfSyncIndication(alarmInd.GetOnuLossOfSyncFailInd(), deviceID, raisedTs)
+	case *oop.AlarmIndication_OnuItuPonStatsInd:
+		log.Infow("Received onu Itu Pon Stats indication ", log.Fields{"alarm_ind": alarmInd})
+		log.Infow("Not implemented yet", log.Fields{"alarm_ind": alarmInd})
+	case *oop.AlarmIndication_OnuDeactivationFailureInd:
+		log.Infow("Received onu deactivation failure indication ", log.Fields{"alarm_ind": alarmInd})
+		err = em.onuDeactivationFailureIndication(alarmInd.GetOnuDeactivationFailureInd(), deviceID, raisedTs)
+	default:
+		err = NewErrInvalidValue(log.Fields{"indication-type": alarmInd}, nil)
+	}
+	if err != nil {
+		return NewErrCommunication("publish-message", log.Fields{"indication-type": alarmInd}, err).Log()
+	}
+	return nil
+}
+
+// oltUpDownIndication handles Up and Down state of an OLT
+func (em *OpenOltEventMgr) oltUpDownIndication(oltIndication *oop.OltIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["oper-state"] = oltIndication.OperState
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if oltIndication.OperState == operationStateDown {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", oltIndicationDown, "RAISE_EVENT")
+	} else if oltIndication.OperState == operationStateUp {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", oltIndicationDown, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+		log.Errorw("Failed to send OLT event", log.Fields{"err": err})
+		return err
+	}
+	log.Infow("OLT UpDown event sent to KAFKA", log.Fields{})
+	return nil
+}
+
+// OnuDiscoveryIndication is an exported method to handle ONU discovery event
+func (em *OpenOltEventMgr) OnuDiscoveryIndication(onuDisc *oop.OnuDiscIndication, deviceID string, OnuID uint32, serialNumber string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["onu-id"] = strconv.FormatUint(uint64(OnuID), base10)
+	context["intf-id"] = strconv.FormatUint(uint64(onuDisc.IntfId), base10)
+	context["serial-number"] = serialNumber
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	de.DeviceEventName = fmt.Sprintf("%s_%s", onuDiscoveryEvent, "RAISE_EVENT")
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, equipment, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU discovery event", log.Fields{"serial-number": serialNumber, "intf-id": onuDisc.IntfId})
+		return err
+	}
+	log.Infow("ONU discovery event sent to KAFKA", log.Fields{"serial-number": serialNumber, "intf-id": onuDisc.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) oltLosIndication(oltLos *oop.LosIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["intf-id"] = strconv.FormatUint(uint64(oltLos.IntfId), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if oltLos.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", oltLosEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", oltLosEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+		log.Errorw("Failed to send OLT loss of signal event", log.Fields{"intf-id": oltLos.IntfId})
+		return err
+	}
+	log.Infow("OLT LOS event sent to KAFKA", log.Fields{"intf-id": oltLos.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuDyingGaspIndication(dgi *oop.DyingGaspIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	var serialNumber string
+	context := make(map[string]string)
+	/* Populating event context */
+	serialNumber = ""
+	onu := em.handler.formOnuKey(dgi.IntfId, dgi.OnuId)
+	if onu, ok := em.handler.onus.Load(onu); ok {
+		serialNumber = onu.(*OnuDevice).serialNumber
+	}
+	context["serial-number"] = serialNumber
+	context["intf-id"] = strconv.FormatUint(uint64(dgi.IntfId), base10)
+	context["onu-id"] = strconv.FormatUint(uint64(dgi.OnuId), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	de.DeviceEventName = fmt.Sprintf("%s_%s", onuDyingGaspEvent, "EVENT")
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU Dying gasp event", log.Fields{"intf-id": dgi.IntfId, "onu-id": dgi.OnuId})
+		return err
+	}
+	log.Infow("ONU dying gasp event sent to KAFKA", log.Fields{"intf-id": dgi.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuAlarmIndication(onuAlarm *oop.OnuAlarmIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["intf-id"] = strconv.FormatUint(uint64(onuAlarm.IntfId), base10)
+	context["onu-id"] = strconv.FormatUint(uint64(onuAlarm.OnuId), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuAlarm.LosStatus == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLosEvent, "RAISE_EVENT")
+	} else if onuAlarm.LosStatus == statusCheckOff {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLosEvent, "CLEAR_EVENT")
+	} else if onuAlarm.LobStatus == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLobEvent, "RAISE_EVENT")
+	} else if onuAlarm.LobStatus == statusCheckOff {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLobEvent, "CLEAR_EVENT")
+	} else if onuAlarm.LopcMissStatus == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLopcMissEvent, "RAISE_EVENT")
+	} else if onuAlarm.LopcMissStatus == statusCheckOff {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLopcMissEvent, "CLEAR_EVENT")
+	} else if onuAlarm.LopcMicErrorStatus == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLopcMicErrorEvent, "RAISE_EVENT")
+	} else if onuAlarm.LopcMicErrorStatus == statusCheckOff {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLopcMicErrorEvent, "CLEAR_EVENT")
+	} else if onuAlarm.LofiStatus == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfFrameEvent, "RAISE_EVENT")
+	} else if onuAlarm.LofiStatus == statusCheckOff {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfFrameEvent, "CLEAR_EVENT")
+	} else if onuAlarm.LoamiStatus == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfPloamEvent, "RAISE_EVENT")
+	} else if onuAlarm.LoamiStatus == statusCheckOff {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfPloamEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, onu, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU Los event", log.Fields{"onu-id": onuAlarm.OnuId, "intf-id": onuAlarm.IntfId})
+		return err
+	}
+	log.Infow("ONU LOS event sent to KAFKA", log.Fields{"onu-id": onuAlarm.OnuId, "intf-id": onuAlarm.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuActivationFailIndication(oaf *oop.OnuActivationFailureIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["intf-id"] = strconv.FormatUint(uint64(oaf.IntfId), base10)
+	context["onu-id"] = strconv.FormatUint(uint64(oaf.OnuId), base10)
+	context["fail-reason"] = strconv.FormatUint(uint64(oaf.FailReason), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	de.DeviceEventName = fmt.Sprintf("%s_%s", onuActivationFailEvent, "RAISE_EVENT")
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, equipment, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU activation failure event", log.Fields{"onu-id": oaf.OnuId, "intf-id": oaf.IntfId})
+		return err
+	}
+	log.Infow("ONU activation failure event sent to KAFKA", log.Fields{"onu-id": oaf.OnuId, "intf-id": oaf.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuLossOmciIndication(onuLossOmci *oop.OnuLossOfOmciChannelIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["intf-id"] = strconv.FormatUint(uint64(onuLossOmci.IntfId), base10)
+	context["onu-id"] = strconv.FormatUint(uint64(onuLossOmci.OnuId), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuLossOmci.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOmciEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOmciEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU loss of OMCI channel event", log.Fields{"onu-id": onuLossOmci.OnuId, "intf-id": onuLossOmci.IntfId})
+		return err
+	}
+	log.Infow("ONU loss of OMCI channel event sent to KAFKA", log.Fields{"onu-id": onuLossOmci.OnuId, "intf-id": onuLossOmci.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuDriftOfWindowIndication(onuDriftWindow *oop.OnuDriftOfWindowIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["intf-id"] = strconv.FormatUint(uint64(onuDriftWindow.IntfId), base10)
+	context["onu-id"] = strconv.FormatUint(uint64(onuDriftWindow.OnuId), base10)
+	context["drift"] = strconv.FormatUint(uint64(onuDriftWindow.Drift), base10)
+	context["new-eqd"] = strconv.FormatUint(uint64(onuDriftWindow.NewEqd), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuDriftWindow.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuDriftOfWindowEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuDriftOfWindowEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU drift of window event", log.Fields{"onu-id": onuDriftWindow.OnuId, "intf-id": onuDriftWindow.IntfId})
+		return err
+	}
+	log.Infow("ONU drift of window event sent to KAFKA", log.Fields{"onu-id": onuDriftWindow.OnuId, "intf-id": onuDriftWindow.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuSignalDegradeIndication(onuSignalDegrade *oop.OnuSignalDegradeIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["intf-id"] = strconv.FormatUint(uint64(onuSignalDegrade.IntfId), base10)
+	context["onu-id"] = strconv.FormatUint(uint64(onuSignalDegrade.OnuId), base10)
+	context["inverse-bit-error-rate"] = strconv.FormatUint(uint64(onuSignalDegrade.InverseBitErrorRate), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuSignalDegrade.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalDegradeEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalDegradeEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU signals degrade event", log.Fields{"onu-id": onuSignalDegrade.OnuId, "intf-id": onuSignalDegrade.IntfId})
+		return err
+	}
+	log.Infow("ONU signal degrade event sent to KAFKA", log.Fields{"onu-id": onuSignalDegrade.OnuId, "intf-id": onuSignalDegrade.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuSignalsFailIndication(onuSignalsFail *oop.OnuSignalsFailureIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["onu-id"] = strconv.FormatUint(uint64(onuSignalsFail.OnuId), base10)
+	context["intf-id"] = strconv.FormatUint(uint64(onuSignalsFail.IntfId), base10)
+	context["inverse-bit-error-rate"] = strconv.FormatUint(uint64(onuSignalsFail.InverseBitErrorRate), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuSignalsFail.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalsFailEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalsFailEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU signals fail event", log.Fields{"onu-id": onuSignalsFail.OnuId, "intf-id": onuSignalsFail.IntfId})
+		return err
+	}
+	log.Infow("ONU signals fail event sent to KAFKA", log.Fields{"onu-id": onuSignalsFail.OnuId, "intf-id": onuSignalsFail.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuStartupFailedIndication(onuStartupFail *oop.OnuStartupFailureIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["onu-id"] = strconv.FormatUint(uint64(onuStartupFail.OnuId), base10)
+	context["intf-id"] = strconv.FormatUint(uint64(onuStartupFail.IntfId), base10)
+
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuStartupFail.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuStartupFailEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuStartupFailEvent, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU startup fail event", log.Fields{"onu-id": onuStartupFail.OnuId, "intf-id": onuStartupFail.IntfId})
+		return err
+	}
+	log.Infow("ONU startup fail event sent to KAFKA", log.Fields{"onu-id": onuStartupFail.OnuId, "intf-id": onuStartupFail.IntfId})
+	return nil
+}
+
+func (em *OpenOltEventMgr) onuLossOfSyncIndication(onuLOKI *oop.OnuLossOfKeySyncFailureIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["onu-id"] = strconv.FormatUint(uint64(onuLOKI.OnuId), base10)
+	context["intf-id"] = strconv.FormatUint(uint64(onuLOKI.IntfId), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	if onuLOKI.Status == statusCheckOn {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfKeySyncEvent, "RAISE_EVENT")
+	} else {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfKeySyncEvent, "CLEAR_EVENT")
+	}
+
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, security, onu, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU loss of key sync event", log.Fields{"onu-id": onuLOKI.OnuId, "intf-id": onuLOKI.IntfId})
+		return err
+	}
+	log.Infow("ONU loss of key sync event sent to KAFKA", log.Fields{"onu-id": onuLOKI.OnuId, "intf-id": onuLOKI.IntfId})
+	return nil
+}
+
+// oltIntfOperIndication handles Up and Down state of an OLT PON ports
+func (em *OpenOltEventMgr) oltIntfOperIndication(ifindication *oop.IntfOperIndication, deviceID string, raisedTs int64) {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	portID := IntfIDToPortNo(ifindication.IntfId, voltha.Port_PON_OLT)
+	device, err := em.handler.coreProxy.GetDevice(ctx.Background(), deviceID, deviceID)
+	if err != nil {
+		log.Errorw("Error while fetching Device object", log.Fields{"DeviceId": deviceID})
+	}
+	for _, port := range device.Ports {
+		if port.PortNo == portID {
+			// Events are suppressed if the Port Adminstate is not enabled.
+			if port.AdminState != common.AdminState_ENABLED {
+				log.Infow("Port disable/enable event not generated because, The port is not enabled by operator", log.Fields{"deviceId": deviceID, "port": port})
+				return
+			}
+			break
+		}
+	}
+	/* Populating event context */
+	context["oper-state"] = ifindication.GetOperState()
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+
+	if ifindication.GetOperState() == operationStateDown {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", ponIntfDownIndiction, "RAISE_EVENT")
+	} else if ifindication.OperState == operationStateUp {
+		de.DeviceEventName = fmt.Sprintf("%s_%s", ponIntfDownIndiction, "CLEAR_EVENT")
+	}
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+		log.Errorw("failed-to-send-olt-intf-oper-status-event", log.Fields{"err": err})
+	}
+	log.Info("sent-olt-intf-oper-status-event-to-kafka")
+}
+
+func (em *OpenOltEventMgr) onuDeactivationFailureIndication(onuDFI *oop.OnuDeactivationFailureIndication, deviceID string, raisedTs int64) error {
+	var de voltha.DeviceEvent
+	context := make(map[string]string)
+	/* Populating event context */
+	context["onu-id"] = strconv.FormatUint(uint64(onuDFI.OnuId), base10)
+	context["intf-id"] = strconv.FormatUint(uint64(onuDFI.IntfId), base10)
+	context["failure-reason"] = strconv.FormatUint(uint64(onuDFI.FailReason), base10)
+	/* Populating device event body */
+	de.Context = context
+	de.ResourceId = deviceID
+	de.DeviceEventName = onuDeactivationFailureEvent
+
+	/* Send event to KAFKA */
+	if err := em.eventProxy.SendDeviceEvent(&de, equipment, onu, raisedTs); err != nil {
+		log.Errorw("Failed to send ONU deactivation failure event", log.Fields{"onu-id": onuDFI.OnuId, "intf-id": onuDFI.IntfId})
+		return err
+	}
+	log.Infow("ONU deactivation failure event sent to KAFKA", log.Fields{"onu-id": onuDFI.OnuId, "intf-id": onuDFI.IntfId})
+	return nil
+}
diff --git a/internal/pkg/core/openolt_eventmgr_test.go b/internal/pkg/core/openolt_eventmgr_test.go
new file mode 100644
index 0000000..d5f1520
--- /dev/null
+++ b/internal/pkg/core/openolt_eventmgr_test.go
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package core provides APIs for the openOLT adapter
+package core
+
+import (
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/opencord/voltha-openolt-adapter/pkg/mocks"
+	oop "github.com/opencord/voltha-protos/v3/go/openolt"
+)
+
+func mockEventMgr() *OpenOltEventMgr {
+	ep := &mocks.MockEventProxy{}
+	dh := &DeviceHandler{}
+	dh.onus = sync.Map{}
+	dh.onus.Store(dh.formOnuKey(1, 1), &OnuDevice{deviceID: "TEST_ONU",
+		deviceType:   "ONU",
+		serialNumber: "TEST_ONU_123",
+		onuID:        1, intfID: 1})
+	return NewEventMgr(ep, dh)
+}
+func TestOpenOltEventMgr_ProcessEvents(t *testing.T) {
+	em := mockEventMgr()
+	type args struct {
+		alarmInd *oop.AlarmIndication
+		deviceID string
+		raisedTs int64
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		// LosIndication alarms
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_LosInd{LosInd: &oop.LosIndication{IntfId: 1, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_LosInd{LosInd: &oop.LosIndication{IntfId: 1}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_LosInd{LosInd: &oop.LosIndication{IntfId: 1, Status: "on"}}}}},
+
+		// OnuAlarmIndication alams
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LosStatus: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LosStatus: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LobStatus: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LobStatus: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LopcMissStatus: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LopcMissStatus: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LopcMicErrorStatus: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LopcMicErrorStatus: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LofiStatus: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LofiStatus: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LoamiStatus: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LoamiStatus: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuAlarmInd{OnuAlarmInd: &oop.OnuAlarmIndication{IntfId: 1, OnuId: 3, LosStatus: "on"}}}}},
+
+		// AlarmIndication_DyingGaspInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_DyingGaspInd{DyingGaspInd: &oop.DyingGaspIndication{IntfId: 1, OnuId: 1, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_DyingGaspInd{DyingGaspInd: &oop.DyingGaspIndication{IntfId: 1, OnuId: 1, Status: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_DyingGaspInd{DyingGaspInd: &oop.DyingGaspIndication{IntfId: 1, OnuId: 1}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_DyingGaspInd{DyingGaspInd: &oop.DyingGaspIndication{IntfId: 1, OnuId: 1}}}}},
+
+		// AlarmIndication_OnuActivationFailInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuActivationFailInd{OnuActivationFailInd: &oop.OnuActivationFailureIndication{IntfId: 1, OnuId: 3}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuActivationFailInd{OnuActivationFailInd: &oop.OnuActivationFailureIndication{IntfId: 1, OnuId: 3}}}}},
+
+		// AlarmIndication_OnuLossOmciInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuLossOmciInd{OnuLossOmciInd: &oop.OnuLossOfOmciChannelIndication{IntfId: 1, OnuId: 3, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuLossOmciInd{OnuLossOmciInd: &oop.OnuLossOfOmciChannelIndication{IntfId: 1, OnuId: 3, Status: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuLossOmciInd{OnuLossOmciInd: &oop.OnuLossOfOmciChannelIndication{IntfId: 1, OnuId: 3, Status: "on"}}}}},
+
+		// AlarmIndication_OnuDriftOfWindowInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuDriftOfWindowInd{OnuDriftOfWindowInd: &oop.OnuDriftOfWindowIndication{IntfId: 1, OnuId: 3, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuDriftOfWindowInd{OnuDriftOfWindowInd: &oop.OnuDriftOfWindowIndication{IntfId: 1, OnuId: 3, Status: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuDriftOfWindowInd{OnuDriftOfWindowInd: &oop.OnuDriftOfWindowIndication{IntfId: 1, OnuId: 3, Drift: 10, NewEqd: 10}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuDriftOfWindowInd{OnuDriftOfWindowInd: &oop.OnuDriftOfWindowIndication{IntfId: 1, OnuId: 3, Status: "on"}}}}},
+
+		// AlarmIndication_OnuSignalDegradeInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalDegradeInd{OnuSignalDegradeInd: &oop.OnuSignalDegradeIndication{IntfId: 1, OnuId: 3, Status: "on", InverseBitErrorRate: 100}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalDegradeInd{OnuSignalDegradeInd: &oop.OnuSignalDegradeIndication{IntfId: 1, OnuId: 3, Status: "off", InverseBitErrorRate: 100}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalDegradeInd{OnuSignalDegradeInd: &oop.OnuSignalDegradeIndication{IntfId: 1, OnuId: 3, InverseBitErrorRate: 100}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalDegradeInd{OnuSignalDegradeInd: &oop.OnuSignalDegradeIndication{IntfId: 1, OnuId: 3, InverseBitErrorRate: 100}}}}},
+
+		// AlarmIndication_OnuSignalsFailInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalsFailInd{OnuSignalsFailInd: &oop.OnuSignalsFailureIndication{IntfId: 1, OnuId: 3, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalsFailInd{OnuSignalsFailInd: &oop.OnuSignalsFailureIndication{IntfId: 1, OnuId: 3, Status: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalsFailInd{OnuSignalsFailInd: &oop.OnuSignalsFailureIndication{IntfId: 1, OnuId: 3, InverseBitErrorRate: 100}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuSignalsFailInd{OnuSignalsFailInd: &oop.OnuSignalsFailureIndication{IntfId: 1, OnuId: 3, InverseBitErrorRate: 100}}}}},
+
+		// AlarmIndication_OnuProcessingErrorInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuProcessingErrorInd{OnuProcessingErrorInd: &oop.OnuProcessingErrorIndication{IntfId: 1, OnuId: 3}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuProcessingErrorInd{OnuProcessingErrorInd: &oop.OnuProcessingErrorIndication{IntfId: 1, OnuId: 3}}}}},
+
+		// AlarmIndication_OnuTiwiInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuTiwiInd{OnuTiwiInd: &oop.OnuTransmissionInterferenceWarning{IntfId: 1, OnuId: 3, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuTiwiInd{OnuTiwiInd: &oop.OnuTransmissionInterferenceWarning{IntfId: 1, OnuId: 3, Status: "on", Drift: 100}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuTiwiInd{OnuTiwiInd: &oop.OnuTransmissionInterferenceWarning{IntfId: 1, OnuId: 3}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuTiwiInd{OnuTiwiInd: &oop.OnuTransmissionInterferenceWarning{IntfId: 1, OnuId: 3}}}}},
+
+		// AlarmIndication_onuLossOfKeySyncInd
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuLossOfSyncFailInd{OnuLossOfSyncFailInd: &oop.OnuLossOfKeySyncFailureIndication{IntfId: 1, OnuId: 3, Status: "on"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+		{"ProcessEvents-", args{alarmInd: &oop.AlarmIndication{Data: &oop.AlarmIndication_OnuLossOfSyncFailInd{OnuLossOfSyncFailInd: &oop.OnuLossOfKeySyncFailureIndication{IntfId: 1, OnuId: 3, Status: "off"}}}, deviceID: "olt", raisedTs: time.Now().Unix()}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			em.ProcessEvents(tt.args.alarmInd, tt.args.deviceID, tt.args.raisedTs)
+		})
+	}
+}
+
+func TestOpenOltEventMgr_OnuDiscoveryIndication(t *testing.T) {
+	em := mockEventMgr()
+	type args struct {
+		onuDisc      *oop.OnuDiscIndication
+		deviceID     string
+		OnuID        uint32
+		serialNumber string
+		raisedTs     int64
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{"OnuDiscoveryIndication", args{onuDisc: &oop.OnuDiscIndication{IntfId: 1, SerialNumber: &oop.SerialNumber{VendorId: []byte("TWSH"), VendorSpecific: []byte("1234")}}, deviceID: "olt", OnuID: 3, serialNumber: "1234", raisedTs: time.Now().Unix()}},
+		{"OnuDiscoveryIndication", args{onuDisc: &oop.OnuDiscIndication{}, raisedTs: time.Now().Unix()}},
+		{"OnuDiscoveryIndication", args{onuDisc: &oop.OnuDiscIndication{}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			em.OnuDiscoveryIndication(tt.args.onuDisc, tt.args.deviceID, tt.args.OnuID, tt.args.serialNumber, tt.args.raisedTs)
+		})
+	}
+}
diff --git a/internal/pkg/core/openolt_flowmgr.go b/internal/pkg/core/openolt_flowmgr.go
new file mode 100644
index 0000000..fe03a79
--- /dev/null
+++ b/internal/pkg/core/openolt_flowmgr.go
@@ -0,0 +1,3044 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"crypto/md5"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"math/big"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/flows"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp "github.com/opencord/voltha-lib-go/v3/pkg/techprofile"
+	rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
+	"github.com/opencord/voltha-protos/v3/go/common"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	openoltpb2 "github.com/opencord/voltha-protos/v3/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+
+	//deepcopy "github.com/getlantern/deepcopy"
+	"github.com/EagleChen/mapmutex"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// Flow categories
+
+	//HsiaFlow flow category
+	HsiaFlow = "HSIA_FLOW"
+
+	//EapolFlow flow category
+	EapolFlow = "EAPOL_FLOW"
+
+	//DhcpFlow flow category
+	DhcpFlow = "DHCP_FLOW"
+
+	//MulticastFlow flow category
+	MulticastFlow = "MULTICAST_FLOW"
+
+	//IgmpFlow flow category
+	IgmpFlow = "IGMP_FLOW"
+
+	//IPProtoDhcp flow category
+	IPProtoDhcp = 17
+
+	//IPProtoIgmp flow category
+	IPProtoIgmp = 2
+
+	//EapEthType eapethtype value
+	EapEthType = 0x888e
+	//LldpEthType lldp ethtype value
+	LldpEthType = 0x88cc
+	//IPv4EthType IPv4 ethernet type value
+	IPv4EthType = 0x800
+
+	//IgmpProto proto value
+	IgmpProto = 2
+
+	//FIXME - see also BRDCM_DEFAULT_VLAN in broadcom_onu.py
+
+	//ReservedVlan Transparent Vlan
+	ReservedVlan = 4095
+
+	//DefaultMgmtVlan default vlan value
+	DefaultMgmtVlan = 4091
+
+	// Openolt Flow
+
+	//Upstream constant
+	Upstream = "upstream"
+	//Downstream constant
+	Downstream = "downstream"
+	//Multicast constant
+	Multicast = "multicast"
+	//PacketTagType constant
+	PacketTagType = "pkt_tag_type"
+	//Untagged constant
+	Untagged = "untagged"
+	//SingleTag constant
+	SingleTag = "single_tag"
+	//DoubleTag constant
+	DoubleTag = "double_tag"
+
+	// classifierInfo
+
+	//EthType constant
+	EthType = "eth_type"
+	//EthDst constant
+	EthDst = "eth_dst"
+	//TPID constant
+	TPID = "tpid"
+	//IPProto constant
+	IPProto = "ip_proto"
+	//InPort constant
+	InPort = "in_port"
+	//VlanVid constant
+	VlanVid = "vlan_vid"
+	//VlanPcp constant
+	VlanPcp = "vlan_pcp"
+
+	//UDPDst constant
+	UDPDst = "udp_dst"
+	//UDPSrc constant
+	UDPSrc = "udp_src"
+	//Ipv4Dst constant
+	Ipv4Dst = "ipv4_dst"
+	//Ipv4Src constant
+	Ipv4Src = "ipv4_src"
+	//Metadata constant
+	Metadata = "metadata"
+	//TunnelID constant
+	TunnelID = "tunnel_id"
+	//Output constant
+	Output = "output"
+	//GroupID constant
+	GroupID = "group_id"
+	// Actions
+
+	//PopVlan constant
+	PopVlan = "pop_vlan"
+	//PushVlan constant
+	PushVlan = "push_vlan"
+	//TrapToHost constant
+	TrapToHost = "trap_to_host"
+	//MaxMeterBand constant
+	MaxMeterBand = 2
+	//VlanPCPMask contant
+	VlanPCPMask = 0xFF
+	//VlanvIDMask constant
+	VlanvIDMask = 0xFFF
+	//IntfID constant
+	IntfID = "intfId"
+	//OnuID constant
+	OnuID = "onuId"
+	//UniID constant
+	UniID = "uniId"
+	//PortNo constant
+	PortNo = "portNo"
+	//AllocID constant
+	AllocID = "allocId"
+
+	//NoneOnuID constant
+	NoneOnuID = -1
+	//NoneUniID constant
+	NoneUniID = -1
+	//NoneGemPortID constant
+	NoneGemPortID = -1
+
+	// BinaryStringPrefix is binary string prefix
+	BinaryStringPrefix = "0b"
+	// BinaryBit1 is binary bit 1 expressed as a character
+	BinaryBit1 = '1'
+)
+
+type gemPortKey struct {
+	intfID  uint32
+	gemPort uint32
+}
+
+type pendingFlowDeleteKey struct {
+	intfID uint32
+	onuID  uint32
+	uniID  uint32
+}
+
+type tpLockKey struct {
+	intfID uint32
+	onuID  uint32
+	uniID  uint32
+}
+
+type schedQueue struct {
+	direction    tp_pb.Direction
+	intfID       uint32
+	onuID        uint32
+	uniID        uint32
+	tpID         uint32
+	uniPort      uint32
+	tpInst       *tp.TechProfile
+	meterID      uint32
+	flowMetadata *voltha.FlowMetadata
+}
+
+type queueInfoBrief struct {
+	gemPortID       uint32
+	servicePriority uint32
+}
+
+//OpenOltFlowMgr creates the Structure of OpenOltFlowMgr obj
+type OpenOltFlowMgr struct {
+	techprofile        map[uint32]tp.TechProfileIf
+	deviceHandler      *DeviceHandler
+	resourceMgr        *rsrcMgr.OpenOltResourceMgr
+	onuIdsLock         sync.RWMutex
+	flowsUsedByGemPort map[gemPortKey][]uint32            //gem port id to flow ids
+	packetInGemPort    map[rsrcMgr.PacketInInfoKey]uint32 //packet in gem port local cache
+	onuGemInfo         map[uint32][]rsrcMgr.OnuGemInfo    //onu, gem and uni info local cache
+	lockCache          sync.RWMutex
+	pendingFlowDelete  sync.Map
+	// The mapmutex.Mutex can be fine tuned to use mapmutex.NewCustomizedMapMutex
+	perUserFlowHandleLock    *mapmutex.Mutex
+	interfaceToMcastQueueMap map[uint32]*queueInfoBrief /*pon interface -> multicast queue map. Required to assign GEM to a bucket during group population*/
+}
+
+//NewFlowManager creates OpenOltFlowMgr object and initializes the parameters
+func NewFlowManager(ctx context.Context, dh *DeviceHandler, rMgr *rsrcMgr.OpenOltResourceMgr) *OpenOltFlowMgr {
+	log.Info("Initializing flow manager")
+	var flowMgr OpenOltFlowMgr
+	var err error
+	var idx uint32
+
+	flowMgr.deviceHandler = dh
+	flowMgr.resourceMgr = rMgr
+	flowMgr.techprofile = make(map[uint32]tp.TechProfileIf)
+	if err = flowMgr.populateTechProfilePerPonPort(); err != nil {
+		log.Error("Error while populating tech profile mgr\n")
+		return nil
+	}
+	flowMgr.onuIdsLock = sync.RWMutex{}
+	flowMgr.flowsUsedByGemPort = make(map[gemPortKey][]uint32)
+	flowMgr.packetInGemPort = make(map[rsrcMgr.PacketInInfoKey]uint32)
+	flowMgr.onuGemInfo = make(map[uint32][]rsrcMgr.OnuGemInfo)
+	ponPorts := rMgr.DevInfo.GetPonPorts()
+	//Load the onugem info cache from kv store on flowmanager start
+	for idx = 0; idx < ponPorts; idx++ {
+		if flowMgr.onuGemInfo[idx], err = rMgr.GetOnuGemInfo(ctx, idx); err != nil {
+			log.Error("Failed to load onu gem info cache")
+		}
+		//Load flowID list per gem map per interface from the kvstore.
+		flowMgr.loadFlowIDlistForGem(ctx, idx)
+	}
+	flowMgr.lockCache = sync.RWMutex{}
+	flowMgr.pendingFlowDelete = sync.Map{}
+	flowMgr.perUserFlowHandleLock = mapmutex.NewMapMutex()
+	flowMgr.interfaceToMcastQueueMap = make(map[uint32]*queueInfoBrief)
+	//load interface to multicast queue map from kv store
+	flowMgr.loadInterfaceToMulticastQueueMap(ctx)
+	log.Info("Initialization of  flow manager success!!")
+	return &flowMgr
+}
+
+func (f *OpenOltFlowMgr) generateStoredFlowID(flowID uint32, direction string) (uint64, error) {
+	if direction == Upstream {
+		log.Debug("upstream flow, shifting id")
+		return 0x1<<15 | uint64(flowID), nil
+	} else if direction == Downstream {
+		log.Debug("downstream flow, not shifting id")
+		return uint64(flowID), nil
+	} else if direction == Multicast {
+		log.Debug("multicast flow, shifting id")
+		return 0x2<<15 | uint64(flowID), nil
+	} else {
+		return 0, NewErrInvalidValue(log.Fields{"direction": direction}, nil).Log()
+	}
+}
+
+func (f *OpenOltFlowMgr) registerFlow(ctx context.Context, flowFromCore *ofp.OfpFlowStats, deviceFlow *openoltpb2.Flow) {
+	log.Debug("Registering Flow for Device ", log.Fields{"flow": flowFromCore},
+		log.Fields{"device": f.deviceHandler.deviceID})
+	gemPK := gemPortKey{uint32(deviceFlow.AccessIntfId), uint32(deviceFlow.GemportId)}
+	flowIDList, ok := f.flowsUsedByGemPort[gemPK]
+	if !ok {
+		flowIDList = []uint32{deviceFlow.FlowId}
+	}
+	flowIDList = appendUnique(flowIDList, deviceFlow.FlowId)
+	f.flowsUsedByGemPort[gemPK] = flowIDList
+	// update the flowids for a gem to the KVstore
+	f.resourceMgr.UpdateFlowIDsForGem(ctx, uint32(deviceFlow.AccessIntfId), uint32(deviceFlow.GemportId), flowIDList)
+}
+
+func (f *OpenOltFlowMgr) divideAndAddFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32,
+	classifierInfo map[string]interface{}, actionInfo map[string]interface{}, flow *ofp.OfpFlowStats, TpID uint32,
+	UsMeterID uint32, DsMeterID uint32, flowMetadata *voltha.FlowMetadata) {
+	var allocID uint32
+	var gemPorts []uint32
+	var TpInst *tp.TechProfile
+
+	log.Infow("Dividing flow", log.Fields{"intfId": intfID, "onuId": onuID, "uniId": uniID, "portNo": portNo,
+		"classifier": classifierInfo, "action": actionInfo, "UsMeterID": UsMeterID, "DsMeterID": DsMeterID, "TpID": TpID})
+	// only create tcont/gemports if there is actually an onu id.  otherwise BAL throws an error.  Usually this
+	// is because the flow is an NNI flow and there would be no onu resources associated with it
+	// TODO: properly deal with NNI flows
+	if onuID <= 0 {
+		log.Errorw("No onu id for flow", log.Fields{"portNo": portNo, "classifer": classifierInfo, "action": actionInfo})
+		return
+	}
+
+	uni := getUniPortPath(intfID, int32(onuID), int32(uniID))
+	log.Debugw("Uni port name", log.Fields{"uni": uni})
+
+	tpLockMapKey := tpLockKey{intfID, onuID, uniID}
+	if f.perUserFlowHandleLock.TryLock(tpLockMapKey) {
+		allocID, gemPorts, TpInst = f.createTcontGemports(ctx, intfID, onuID, uniID, uni, portNo, TpID, UsMeterID, DsMeterID, flowMetadata)
+		if allocID == 0 || gemPorts == nil || TpInst == nil {
+			log.Error("alloc-id-gem-ports-tp-unavailable")
+			f.perUserFlowHandleLock.Unlock(tpLockMapKey)
+			return
+		}
+		args := make(map[string]uint32)
+		args[IntfID] = intfID
+		args[OnuID] = onuID
+		args[UniID] = uniID
+		args[PortNo] = portNo
+		args[AllocID] = allocID
+
+		/* Flows can be added specific to gemport if p-bits are received.
+		 * If no pbit mentioned then adding flows for all gemports
+		 */
+		f.checkAndAddFlow(ctx, args, classifierInfo, actionInfo, flow, TpInst, gemPorts, TpID, uni)
+		f.perUserFlowHandleLock.Unlock(tpLockMapKey)
+	} else {
+		log.Errorw("failed to acquire per user flow handle lock",
+			log.Fields{"intfId": intfID, "onuId": onuID, "uniId": uniID})
+		return
+	}
+}
+
+// CreateSchedulerQueues creates traffic schedulers on the device with the given scheduler configuration and traffic shaping info
+func (f *OpenOltFlowMgr) CreateSchedulerQueues(ctx context.Context, sq schedQueue) error {
+
+	log.Debugw("CreateSchedulerQueues", log.Fields{"Dir": sq.direction, "IntfID": sq.intfID,
+		"OnuID": sq.onuID, "UniID": sq.uniID, "TpID": sq.tpID, "MeterID": sq.meterID,
+		"TpInst": sq.tpInst, "flowMetadata": sq.flowMetadata})
+
+	Direction, err := verifyMeterIDAndGetDirection(sq.meterID, sq.direction)
+	if err != nil {
+		return err
+	}
+
+	/* Lets make a simple assumption that if the meter-id is present on the KV store,
+	 * then the scheduler and queues configuration is applied on the OLT device
+	 * in the given direction.
+	 */
+
+	var SchedCfg *tp_pb.SchedulerConfig
+	KvStoreMeter, err := f.resourceMgr.GetMeterIDForOnu(ctx, Direction, sq.intfID, sq.onuID, sq.uniID, sq.tpID)
+	if err != nil {
+		log.Error("Failed to get meter for intf %d, onuid %d, uniid %d", sq.intfID, sq.onuID, sq.uniID)
+		return err
+	}
+	if KvStoreMeter != nil {
+		if KvStoreMeter.MeterId == sq.meterID {
+			log.Debug("Scheduler already created for upstream")
+			return nil
+		}
+		return NewErrInvalidValue(log.Fields{
+			"unsupported":       "meter-id",
+			"kv-store-meter-id": KvStoreMeter.MeterId,
+			"meter-id-in-flow":  sq.meterID}, nil).Log()
+	}
+
+	log.Debugw("Meter-does-not-exist-Creating-new", log.Fields{"MeterID": sq.meterID, "Direction": Direction})
+
+	if sq.direction == tp_pb.Direction_UPSTREAM {
+		SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(sq.tpInst)
+	} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
+		SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(sq.tpInst)
+	}
+
+	if err != nil {
+		log.Errorw("Unable to get Scheduler config", log.Fields{"IntfID": sq.intfID, "Direction": sq.direction, "Error": err})
+		return err
+	}
+
+	var meterConfig *ofp.OfpMeterConfig
+	if sq.flowMetadata != nil {
+		for _, meter := range sq.flowMetadata.Meters {
+			if sq.meterID == meter.MeterId {
+				meterConfig = meter
+				log.Debugw("Found-meter-config-from-flowmetadata", log.Fields{"meterConfig": meterConfig})
+				break
+			}
+		}
+	} else {
+		log.Error("Flow-metadata-is-not-present-in-flow")
+	}
+	if meterConfig == nil {
+		return NewErrNotFound("meterbands", log.Fields{
+			"reason":        "Could-not-get-meterbands-from-flowMetadata",
+			"flow-metadata": sq.flowMetadata,
+			"meter-id":      sq.meterID}, nil).Log()
+	} else if len(meterConfig.Bands) < MaxMeterBand {
+		log.Errorw("Invalid-number-of-bands-in-meter", log.Fields{"Bands": meterConfig.Bands, "MeterID": sq.meterID})
+		return NewErrInvalidValue(log.Fields{
+			"reason":          "Invalid-number-of-bands-in-meter",
+			"meterband-count": len(meterConfig.Bands),
+			"metabands":       meterConfig.Bands,
+			"meter-id":        sq.meterID}, nil).Log()
+	}
+	cir := meterConfig.Bands[0].Rate
+	cbs := meterConfig.Bands[0].BurstSize
+	eir := meterConfig.Bands[1].Rate
+	ebs := meterConfig.Bands[1].BurstSize
+	pir := cir + eir
+	pbs := cbs + ebs
+	TrafficShaping := &tp_pb.TrafficShapingInfo{Cir: cir, Cbs: cbs, Pir: pir, Pbs: pbs}
+
+	TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile[sq.intfID].GetTrafficScheduler(sq.tpInst, SchedCfg, TrafficShaping)}
+
+	if err := f.pushSchedulerQueuesToDevice(ctx, sq, TrafficShaping, TrafficSched); err != nil {
+		log.Errorw("Failed to push traffic scheduler and queues to device", log.Fields{"intfID": sq.intfID, "direction": sq.direction})
+		return err
+	}
+
+	/* After we successfully applied the scheduler configuration on the OLT device,
+	 * store the meter id on the KV store, for further reference.
+	 */
+	if err := f.resourceMgr.UpdateMeterIDForOnu(ctx, Direction, sq.intfID, sq.onuID, sq.uniID, sq.tpID, meterConfig); err != nil {
+		log.Error("Failed to update meter id for onu %d, meterid %d", sq.onuID, sq.meterID)
+		return err
+	}
+	log.Debugw("updated-meter-info into KV store successfully", log.Fields{"Direction": Direction,
+		"Meter": meterConfig})
+	return nil
+}
+
+func (f *OpenOltFlowMgr) pushSchedulerQueuesToDevice(ctx context.Context, sq schedQueue, TrafficShaping *tp_pb.TrafficShapingInfo, TrafficSched []*tp_pb.TrafficScheduler) error {
+
+	trafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(sq.tpInst, sq.direction)
+
+	if err != nil {
+		log.Errorw("Unable to construct traffic queue configuration", log.Fields{"intfID": sq.intfID, "direction": sq.direction})
+		return err
+	}
+
+	log.Debugw("Sending Traffic scheduler create to device", log.Fields{"Direction": sq.direction, "TrafficScheds": TrafficSched})
+	if _, err := f.deviceHandler.Client.CreateTrafficSchedulers(ctx, &tp_pb.TrafficSchedulers{
+		IntfId: sq.intfID, OnuId: sq.onuID,
+		UniId: sq.uniID, PortNo: sq.uniPort,
+		TrafficScheds: TrafficSched}); err != nil {
+		log.Errorw("Failed to create traffic schedulers", log.Fields{"error": err})
+		return err
+	}
+
+	// On receiving the CreateTrafficQueues request, the driver should create corresponding
+	// downstream queues.
+	log.Debugw("Sending Traffic Queues create to device", log.Fields{"Direction": sq.direction, "TrafficQueues": trafficQueues})
+	if _, err := f.deviceHandler.Client.CreateTrafficQueues(ctx,
+		&tp_pb.TrafficQueues{IntfId: sq.intfID, OnuId: sq.onuID,
+			UniId: sq.uniID, PortNo: sq.uniPort,
+			TrafficQueues: trafficQueues}); err != nil {
+		log.Errorw("Failed to create traffic queues in device", log.Fields{"error": err})
+		return err
+	}
+
+	if sq.direction == tp_pb.Direction_DOWNSTREAM {
+		multicastTrafficQueues := f.techprofile[sq.intfID].GetMulticastTrafficQueues(sq.tpInst)
+		if len(multicastTrafficQueues) > 0 {
+			if _, present := f.interfaceToMcastQueueMap[sq.intfID]; !present {
+				//assumed that there is only one queue per PON for the multicast service
+				//the default queue with multicastQueuePerPonPort.Priority per a pon interface is used for multicast service
+				//just put it in interfaceToMcastQueueMap to use for building group members
+				multicastQueuePerPonPort := multicastTrafficQueues[0]
+				f.interfaceToMcastQueueMap[sq.intfID] = &queueInfoBrief{
+					gemPortID:       multicastQueuePerPonPort.GemportId,
+					servicePriority: multicastQueuePerPonPort.Priority,
+				}
+				//also store the queue info in kv store
+				f.resourceMgr.AddMcastQueueForIntf(ctx, sq.intfID,
+					multicastQueuePerPonPort.GemportId,
+					multicastQueuePerPonPort.Priority)
+			}
+		}
+	}
+	return nil
+}
+
+// RemoveSchedulerQueues removes the traffic schedulers from the device based on the given scheduler configuration and traffic shaping info
+func (f *OpenOltFlowMgr) RemoveSchedulerQueues(ctx context.Context, sq schedQueue) error {
+
+	var Direction string
+	var SchedCfg *tp_pb.SchedulerConfig
+	var err error
+	log.Debugw("Removing schedulers and Queues in OLT", log.Fields{"Direction": sq.direction, "IntfID": sq.intfID,
+		"OnuID": sq.onuID, "UniID": sq.uniID, "UniPort": sq.uniPort})
+	if sq.direction == tp_pb.Direction_UPSTREAM {
+		SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(sq.tpInst)
+		Direction = "upstream"
+	} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
+		SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(sq.tpInst)
+		Direction = "downstream"
+	}
+
+	if err != nil {
+		log.Errorw("Unable to get Scheduler config", log.Fields{"IntID": sq.intfID, "Direction": sq.direction, "Error": err})
+		return err
+	}
+
+	KVStoreMeter, err := f.resourceMgr.GetMeterIDForOnu(ctx, Direction, sq.intfID, sq.onuID, sq.uniID, sq.tpID)
+	if err != nil {
+		log.Errorf("Failed to get Meter for Onu %d", sq.onuID)
+		return err
+	}
+	if KVStoreMeter == nil {
+		log.Debugw("No-meter-has-been-installed-yet", log.Fields{"direction": Direction, "IntfID": sq.intfID, "OnuID": sq.onuID, "UniID": sq.uniID})
+		return nil
+	}
+	cir := KVStoreMeter.Bands[0].Rate
+	cbs := KVStoreMeter.Bands[0].BurstSize
+	eir := KVStoreMeter.Bands[1].Rate
+	ebs := KVStoreMeter.Bands[1].BurstSize
+	pir := cir + eir
+	pbs := cbs + ebs
+
+	TrafficShaping := &tp_pb.TrafficShapingInfo{Cir: cir, Cbs: cbs, Pir: pir, Pbs: pbs}
+
+	TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile[sq.intfID].GetTrafficScheduler(sq.tpInst, SchedCfg, TrafficShaping)}
+
+	TrafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(sq.tpInst, sq.direction)
+	if err != nil {
+		log.Errorw("Unable to construct traffic queue configuration", log.Fields{"intfID": sq.intfID, "direction": sq.direction})
+		return err
+	}
+
+	if _, err = f.deviceHandler.Client.RemoveTrafficQueues(ctx,
+		&tp_pb.TrafficQueues{IntfId: sq.intfID, OnuId: sq.onuID,
+			UniId: sq.uniID, PortNo: sq.uniPort,
+			TrafficQueues: TrafficQueues}); err != nil {
+		log.Errorw("Failed to remove traffic queues", log.Fields{"error": err})
+		return err
+	}
+	log.Debug("Removed traffic queues successfully")
+	if _, err = f.deviceHandler.Client.RemoveTrafficSchedulers(ctx, &tp_pb.TrafficSchedulers{
+		IntfId: sq.intfID, OnuId: sq.onuID,
+		UniId: sq.uniID, PortNo: sq.uniPort,
+		TrafficScheds: TrafficSched}); err != nil {
+		log.Errorw("failed to remove traffic schedulers", log.Fields{"error": err})
+		return err
+	}
+
+	log.Debug("Removed traffic schedulers successfully")
+
+	/* After we successfully remove the scheduler configuration on the OLT device,
+	 * delete the meter id on the KV store.
+	 */
+	err = f.resourceMgr.RemoveMeterIDForOnu(ctx, Direction, sq.intfID, sq.onuID, sq.uniID, sq.tpID)
+	if err != nil {
+		log.Errorf("Failed to remove meter for onu %d, meter id %d", sq.onuID, KVStoreMeter.MeterId)
+		return err
+	}
+	log.Debugw("Removed-meter-from-KV-store successfully", log.Fields{"MeterId": KVStoreMeter.MeterId, "dir": Direction})
+	return err
+}
+
+// This function allocates tconts and GEM ports for an ONU
+func (f *OpenOltFlowMgr) createTcontGemports(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, uni string, uniPort uint32, TpID uint32, UsMeterID uint32, DsMeterID uint32, flowMetadata *voltha.FlowMetadata) (uint32, []uint32, *tp.TechProfile) {
+	var allocIDs []uint32
+	var allgemPortIDs []uint32
+	var gemPortIDs []uint32
+	tpInstanceExists := false
+	var err error
+
+	allocIDs = f.resourceMgr.GetCurrentAllocIDsForOnu(ctx, intfID, onuID, uniID)
+	allgemPortIDs = f.resourceMgr.GetCurrentGEMPortIDsForOnu(ctx, intfID, onuID, uniID)
+
+	tpPath := f.getTPpath(intfID, uni, TpID)
+
+	log.Infow("creating-new-tcont-and-gem", log.Fields{"pon": intfID, "onu": onuID, "uni": uniID})
+
+	// Check tech profile instance already exists for derived port name
+	techProfileInstance, _ := f.techprofile[intfID].GetTPInstanceFromKVStore(ctx, TpID, tpPath)
+	if techProfileInstance == nil {
+		log.Infow("tp-instance-not-found--creating-new", log.Fields{"path": tpPath})
+		techProfileInstance, err = f.techprofile[intfID].CreateTechProfInstance(ctx, TpID, uni, intfID)
+		if err != nil {
+			// This should not happen, something wrong in KV backend transaction
+			log.Errorw("tp-instance-create-failed", log.Fields{"error": err, "tpID": TpID})
+			return 0, nil, nil
+		}
+		f.resourceMgr.UpdateTechProfileIDForOnu(ctx, intfID, onuID, uniID, TpID)
+	} else {
+		log.Debugw("Tech-profile-instance-already-exist-for-given port-name", log.Fields{"uni": uni})
+		tpInstanceExists = true
+	}
+	if UsMeterID != 0 {
+		sq := schedQueue{direction: tp_pb.Direction_UPSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
+			uniPort: uniPort, tpInst: techProfileInstance, meterID: UsMeterID, flowMetadata: flowMetadata}
+		if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
+			log.Errorw("CreateSchedulerQueues Failed-upstream", log.Fields{"error": err, "meterID": UsMeterID})
+			return 0, nil, nil
+		}
+	}
+	if DsMeterID != 0 {
+		sq := schedQueue{direction: tp_pb.Direction_DOWNSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
+			uniPort: uniPort, tpInst: techProfileInstance, meterID: DsMeterID, flowMetadata: flowMetadata}
+		if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
+			log.Errorw("CreateSchedulerQueues Failed-downstream", log.Fields{"error": err, "meterID": DsMeterID})
+			return 0, nil, nil
+		}
+	}
+
+	allocID := techProfileInstance.UsScheduler.AllocID
+	for _, gem := range techProfileInstance.UpstreamGemPortAttributeList {
+		gemPortIDs = append(gemPortIDs, gem.GemportID)
+	}
+
+	if tpInstanceExists {
+		return allocID, gemPortIDs, techProfileInstance
+	}
+
+	allocIDs = appendUnique(allocIDs, allocID)
+	for _, gemPortID := range gemPortIDs {
+		allgemPortIDs = appendUnique(allgemPortIDs, gemPortID)
+	}
+
+	log.Debugw("Allocated Tcont and GEM ports", log.Fields{"allocIDs": allocIDs, "gemports": allgemPortIDs})
+	// Send Tconts and GEM ports to KV store
+	f.storeTcontsGEMPortsIntoKVStore(ctx, intfID, onuID, uniID, allocIDs, allgemPortIDs)
+	return allocID, gemPortIDs, techProfileInstance
+}
+
+func (f *OpenOltFlowMgr) storeTcontsGEMPortsIntoKVStore(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID []uint32, gemPortIDs []uint32) {
+
+	log.Debugw("Storing allocated Tconts and GEM ports into KV store",
+		log.Fields{"intfId": intfID, "onuId": onuID, "uniId": uniID, "allocID": allocID, "gemPortIDs": gemPortIDs})
+	/* Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV store  */
+	if err := f.resourceMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocID); err != nil {
+		log.Error("Errow while uploading allocID to KV store")
+	}
+	if err := f.resourceMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs); err != nil {
+		log.Error("Errow while uploading GEMports to KV store")
+	}
+	if err := f.resourceMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, gemPortIDs, intfID, onuID, uniID); err != nil {
+		log.Error("Errow while uploading gemtopon map to KV store")
+	}
+	log.Debug("Stored tconts and GEM into KV store successfully")
+	for _, gemPort := range gemPortIDs {
+		f.addGemPortToOnuInfoMap(ctx, intfID, onuID, gemPort)
+	}
+}
+
+func (f *OpenOltFlowMgr) populateTechProfilePerPonPort() error {
+	var tpCount int
+	for _, techRange := range f.resourceMgr.DevInfo.Ranges {
+		for _, intfID := range techRange.IntfIds {
+			f.techprofile[intfID] = f.resourceMgr.ResourceMgrs[uint32(intfID)].TechProfileMgr
+			tpCount++
+			log.Debugw("Init tech profile done", log.Fields{"intfID": intfID})
+		}
+	}
+	//Make sure we have as many tech_profiles as there are pon ports on the device
+	if tpCount != int(f.resourceMgr.DevInfo.GetPonPorts()) {
+		return NewErrInvalidValue(log.Fields{
+			"reason":             "TP count does not match number of PON ports",
+			"tech-profile-count": tpCount,
+			"pon-port-count":     f.resourceMgr.DevInfo.GetPonPorts()}, nil).Log()
+	}
+	log.Infow("Populated techprofile for ponports successfully",
+		log.Fields{"numofTech": tpCount, "numPonPorts": f.resourceMgr.DevInfo.GetPonPorts()})
+	return nil
+}
+
+func (f *OpenOltFlowMgr) addUpstreamDataFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32,
+	portNo uint32, uplinkClassifier map[string]interface{},
+	uplinkAction map[string]interface{}, logicalFlow *ofp.OfpFlowStats,
+	allocID uint32, gemportID uint32) error {
+	uplinkClassifier[PacketTagType] = SingleTag
+	log.Debugw("Adding upstream data flow", log.Fields{"uplinkClassifier": uplinkClassifier, "uplinkAction": uplinkAction})
+	return f.addHSIAFlow(ctx, intfID, onuID, uniID, portNo, uplinkClassifier, uplinkAction,
+		Upstream, logicalFlow, allocID, gemportID)
+	/* TODO: Install Secondary EAP on the subscriber vlan */
+}
+
+func (f *OpenOltFlowMgr) addDownstreamDataFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32,
+	portNo uint32, downlinkClassifier map[string]interface{},
+	downlinkAction map[string]interface{}, logicalFlow *ofp.OfpFlowStats,
+	allocID uint32, gemportID uint32) error {
+	downlinkClassifier[PacketTagType] = DoubleTag
+	log.Debugw("Adding downstream data flow", log.Fields{"downlinkClassifier": downlinkClassifier,
+		"downlinkAction": downlinkAction})
+	// Ignore Downlink trap flow given by core, cannot do anything with this flow */
+	if vlan, exists := downlinkClassifier[VlanVid]; exists {
+		if vlan.(uint32) == (uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 4000) { //private VLAN given by core
+			if metadata, exists := downlinkClassifier[Metadata]; exists { // inport is filled in metadata by core
+				if uint32(metadata.(uint64)) == MkUniPortNum(intfID, onuID, uniID) {
+					log.Infow("Ignoring DL trap device flow from core", log.Fields{"flow": logicalFlow})
+					return nil
+				}
+			}
+		}
+	}
+
+	/* Already this info available classifier? */
+	downlinkAction[PopVlan] = true
+	// vlan_vid is a uint32.  must be type asserted as such or conversion fails
+	dlClVid, ok := downlinkClassifier[VlanVid].(uint32)
+	if ok {
+		downlinkAction[VlanVid] = dlClVid & 0xfff
+	} else {
+		return NewErrInvalidValue(log.Fields{
+			"reason":  "failed to convert VLANID classifier",
+			"vlan-id": VlanVid}, nil).Log()
+	}
+
+	return f.addHSIAFlow(ctx, intfID, onuID, uniID, portNo, downlinkClassifier, downlinkAction,
+		Downstream, logicalFlow, allocID, gemportID)
+}
+
+func (f *OpenOltFlowMgr) addHSIAFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{},
+	action map[string]interface{}, direction string, logicalFlow *ofp.OfpFlowStats,
+	allocID uint32, gemPortID uint32) error {
+	/* One of the OLT platform (Broadcom BAL) requires that symmetric
+	   flows require the same flow_id to be used across UL and DL.
+	   Since HSIA flow is the only symmetric flow currently, we need to
+	   re-use the flow_id across both direction. The 'flow_category'
+	   takes priority over flow_cookie to find any available HSIA_FLOW
+	   id for the ONU.
+	*/
+	log.Debugw("Adding HSIA flow", log.Fields{"intfId": intfID, "onuId": onuID, "uniId": uniID, "classifier": classifier,
+		"action": action, "direction": direction, "allocId": allocID, "gemPortId": gemPortID,
+		"logicalFlow": *logicalFlow})
+	var vlanPbit uint32 = 0xff // means no pbit
+	if _, ok := classifier[VlanPcp]; ok {
+		vlanPbit = classifier[VlanPcp].(uint32)
+		log.Debugw("Found pbit in the flow", log.Fields{"VlanPbit": vlanPbit})
+	} else {
+		log.Debugw("pbit-not-found-in-flow", log.Fields{"vlan-pcp": VlanPcp})
+	}
+	flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("flow-already-exists")
+		return nil
+	}
+	flowID, err := f.resourceMgr.GetFlowID(ctx, intfID, int32(onuID), int32(uniID), gemPortID, flowStoreCookie, HsiaFlow, vlanPbit)
+	if err != nil {
+		return NewErrNotFound("hsia-flow-id", log.Fields{"direction": direction}, err).Log()
+	}
+	classifierProto, err := makeOpenOltClassifierField(classifier)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(action)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": action}, err).Log()
+	}
+	log.Debugw("Created action proto", log.Fields{"action": *actionProto})
+	networkIntfID, err := getNniIntfID(classifier, action)
+	if err != nil {
+		return NewErrNotFound("nni-interface-id",
+			log.Fields{
+				"classifier": classifier,
+				"action":     action,
+			}, err).Log()
+	}
+	flow := openoltpb2.Flow{AccessIntfId: int32(intfID),
+		OnuId:         int32(onuID),
+		UniId:         int32(uniID),
+		FlowId:        flowID,
+		FlowType:      direction,
+		AllocId:       int32(allocID),
+		NetworkIntfId: int32(networkIntfID),
+		GemportId:     int32(gemPortID),
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+	if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
+		return NewErrFlowOp("add", flowID, nil, err).Log()
+	}
+	log.Debug("HSIA flow added to device successfully", log.Fields{"direction": direction})
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &flow, flowStoreCookie, HsiaFlow, flowID, logicalFlow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, flow.AccessIntfId,
+		flow.OnuId,
+		flow.UniId,
+		flow.FlowId /*flowCategory,*/, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", flowID, log.Fields{"flow": flow}, err).Log()
+	}
+	return nil
+}
+
+func (f *OpenOltFlowMgr) addDHCPTrapFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{}, action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32, gemPortID uint32) error {
+
+	networkIntfID, err := getNniIntfID(classifier, action)
+	if err != nil {
+		return NewErrNotFound("nni-interface-id", log.Fields{
+			"classifier": classifier,
+			"action":     action},
+			err).Log()
+	}
+
+	// Clear the action map
+	for k := range action {
+		delete(action, k)
+	}
+
+	action[TrapToHost] = true
+	classifier[UDPSrc] = uint32(68)
+	classifier[UDPDst] = uint32(67)
+	classifier[PacketTagType] = SingleTag
+	delete(classifier, VlanVid)
+
+	flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("Flow-exists--not-re-adding")
+		return nil
+	}
+
+	flowID, err := f.resourceMgr.GetFlowID(ctx, intfID, int32(onuID), int32(uniID), gemPortID, flowStoreCookie, DhcpFlow, 0 /*classifier[VLAN_PCP].(uint32)*/)
+
+	if err != nil {
+		return NewErrNotFound("flow", log.Fields{
+			"interface-id": intfID,
+			"gem-port":     gemPortID,
+			"cookie":       flowStoreCookie},
+			err).Log()
+	}
+
+	log.Debugw("Creating UL DHCP flow", log.Fields{"ul_classifier": classifier, "ul_action": action, "uplinkFlowId": flowID})
+
+	classifierProto, err := makeOpenOltClassifierField(classifier)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(action)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": action}, err).Log()
+	}
+
+	dhcpFlow := openoltpb2.Flow{AccessIntfId: int32(intfID),
+		OnuId:         int32(onuID),
+		UniId:         int32(uniID),
+		FlowId:        flowID,
+		FlowType:      Upstream,
+		AllocId:       int32(allocID),
+		NetworkIntfId: int32(networkIntfID),
+		GemportId:     int32(gemPortID),
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+
+	if err := f.addFlowToDevice(ctx, logicalFlow, &dhcpFlow); err != nil {
+		return NewErrFlowOp("add", flowID, log.Fields{"dhcp-flow": dhcpFlow}, err).Log()
+	}
+	log.Debug("DHCP UL flow added to device successfully")
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &dhcpFlow, flowStoreCookie, "DHCP", flowID, logicalFlow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, dhcpFlow.AccessIntfId,
+		dhcpFlow.OnuId,
+		dhcpFlow.UniId,
+		dhcpFlow.FlowId, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", dhcpFlow.FlowId, log.Fields{"flow": dhcpFlow}, err).Log()
+	}
+
+	return nil
+}
+
+//addIGMPTrapFlow creates IGMP trap-to-host flow
+func (f *OpenOltFlowMgr) addIGMPTrapFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{},
+	action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32, gemPortID uint32) error {
+	return f.addUpstreamTrapFlow(ctx, intfID, onuID, uniID, portNo, classifier, action, logicalFlow, allocID, gemPortID, IgmpFlow)
+}
+
+//addUpstreamTrapFlow creates a trap-to-host flow
+func (f *OpenOltFlowMgr) addUpstreamTrapFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{},
+	action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32, gemPortID uint32, flowType string) error {
+
+	networkIntfID, err := getNniIntfID(classifier, action)
+	if err != nil {
+		return NewErrNotFound("nni-interface-id", log.Fields{
+			"classifier": classifier,
+			"action":     action},
+			err).Log()
+	}
+
+	// Clear the action map
+	for k := range action {
+		delete(action, k)
+	}
+
+	action[TrapToHost] = true
+	classifier[PacketTagType] = SingleTag
+	delete(classifier, VlanVid)
+
+	flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkIntfID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("Flow-exists-not-re-adding")
+		return nil
+	}
+
+	flowID, err := f.resourceMgr.GetFlowID(ctx, intfID, int32(onuID), int32(uniID), gemPortID, flowStoreCookie, flowType, 0, 0 /*classifier[VLAN_PCP].(uint32)*/)
+
+	if err != nil {
+		return NewErrNotFound("flow-id", log.Fields{
+			"interface-id": intfID,
+			"oni-id":       onuID,
+			"cookie":       flowStoreCookie,
+			"flow-type":    flowType},
+			err).Log()
+	}
+
+	log.Debugw("Creating upstream trap flow", log.Fields{"ul_classifier": classifier, "ul_action": action, "uplinkFlowId": flowID, "flowType": flowType})
+
+	classifierProto, err := makeOpenOltClassifierField(classifier)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(action)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": action}, err).Log()
+	}
+
+	flow := openoltpb2.Flow{AccessIntfId: int32(intfID),
+		OnuId:         int32(onuID),
+		UniId:         int32(uniID),
+		FlowId:        flowID,
+		FlowType:      Upstream,
+		AllocId:       int32(allocID),
+		NetworkIntfId: int32(networkIntfID),
+		GemportId:     int32(gemPortID),
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+
+	if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
+		return NewErrFlowOp("add", flowID, log.Fields{"flow": flow}, err).Log()
+	}
+	log.Debugf("%s UL flow added to device successfully", flowType)
+
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &flow, flowStoreCookie, flowType, flowID, logicalFlow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, flow.AccessIntfId,
+		flow.OnuId,
+		flow.UniId,
+		flow.FlowId, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", flow.FlowId, log.Fields{"flow": flow}, err).Log()
+	}
+
+	return nil
+}
+
+// Add EAPOL flow to  device with mac, vlanId as classifier for upstream and downstream
+func (f *OpenOltFlowMgr) addEAPOLFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{}, action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32, gemPortID uint32, vlanID uint32) error {
+	log.Debugw("Adding EAPOL to device", log.Fields{"intfId": intfID, "onuId": onuID, "portNo": portNo, "allocId": allocID, "gemPortId": gemPortID, "vlanId": vlanID, "flow": logicalFlow})
+
+	uplinkClassifier := make(map[string]interface{})
+	uplinkAction := make(map[string]interface{})
+
+	// Fill Classfier
+	uplinkClassifier[EthType] = uint32(EapEthType)
+	uplinkClassifier[PacketTagType] = SingleTag
+	uplinkClassifier[VlanVid] = vlanID
+	// Fill action
+	uplinkAction[TrapToHost] = true
+	flowStoreCookie := getFlowStoreCookie(uplinkClassifier, gemPortID)
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("Flow-exists-not-re-adding")
+		return nil
+	}
+	//Add Uplink EAPOL Flow
+	uplinkFlowID, err := f.resourceMgr.GetFlowID(ctx, intfID, int32(onuID), int32(uniID), gemPortID, flowStoreCookie, "", 0)
+	if err != nil {
+		return NewErrNotFound("flow-id", log.Fields{
+			"interface-id": intfID,
+			"onu-id":       onuID,
+			"coookie":      flowStoreCookie},
+			err).Log()
+	}
+	log.Debugw("Creating UL EAPOL flow", log.Fields{"ul_classifier": uplinkClassifier, "ul_action": uplinkAction, "uplinkFlowId": uplinkFlowID})
+
+	classifierProto, err := makeOpenOltClassifierField(uplinkClassifier)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": uplinkClassifier}, err).Log()
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(uplinkAction)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": uplinkAction}, err).Log()
+	}
+	log.Debugw("Created action proto", log.Fields{"action": *actionProto})
+	networkIntfID, err := getNniIntfID(classifier, action)
+	if err != nil {
+		return NewErrNotFound("nni-interface-id", log.Fields{
+			"classifier": classifier,
+			"action":     action},
+			err).Log()
+	}
+
+	upstreamFlow := openoltpb2.Flow{AccessIntfId: int32(intfID),
+		OnuId:         int32(onuID),
+		UniId:         int32(uniID),
+		FlowId:        uplinkFlowID,
+		FlowType:      Upstream,
+		AllocId:       int32(allocID),
+		NetworkIntfId: int32(networkIntfID),
+		GemportId:     int32(gemPortID),
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+	if err := f.addFlowToDevice(ctx, logicalFlow, &upstreamFlow); err != nil {
+		return NewErrFlowOp("add", uplinkFlowID, log.Fields{"flow": upstreamFlow}, err).Log()
+	}
+	log.Debug("EAPOL UL flow added to device successfully")
+	flowCategory := "EAPOL"
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &upstreamFlow, flowStoreCookie, flowCategory, uplinkFlowID, logicalFlow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, upstreamFlow.AccessIntfId,
+		upstreamFlow.OnuId,
+		upstreamFlow.UniId,
+		upstreamFlow.FlowId,
+		/* lowCategory, */
+		flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", upstreamFlow.FlowId, log.Fields{"flow": upstreamFlow}, err).Log()
+	}
+
+	log.Debugw("Added EAPOL flows to device successfully", log.Fields{"flow": logicalFlow})
+	return nil
+}
+
+func makeOpenOltClassifierField(classifierInfo map[string]interface{}) (*openoltpb2.Classifier, error) {
+	var classifier openoltpb2.Classifier
+
+	classifier.EthType, _ = classifierInfo[EthType].(uint32)
+	classifier.IpProto, _ = classifierInfo[IPProto].(uint32)
+	if vlanID, ok := classifierInfo[VlanVid].(uint32); ok {
+		vid := vlanID & VlanvIDMask
+		if vid != ReservedVlan {
+			classifier.OVid = vid
+		}
+	}
+	if metadata, ok := classifierInfo[Metadata].(uint64); ok {
+		vid := uint32(metadata)
+		if vid != ReservedVlan {
+			classifier.IVid = vid
+		}
+	}
+	// Use VlanPCPMask (0xff) to signify NO PCP. Else use valid PCP (0 to 7)
+	if vlanPcp, ok := classifierInfo[VlanPcp].(uint32); ok {
+		classifier.OPbits = vlanPcp
+	} else {
+		classifier.OPbits = VlanPCPMask
+	}
+	classifier.SrcPort, _ = classifierInfo[UDPSrc].(uint32)
+	classifier.DstPort, _ = classifierInfo[UDPDst].(uint32)
+	classifier.DstIp, _ = classifierInfo[Ipv4Dst].(uint32)
+	classifier.SrcIp, _ = classifierInfo[Ipv4Src].(uint32)
+	classifier.DstMac, _ = classifierInfo[EthDst].([]uint8)
+	if pktTagType, ok := classifierInfo[PacketTagType].(string); ok {
+		classifier.PktTagType = pktTagType
+
+		switch pktTagType {
+		case SingleTag:
+		case DoubleTag:
+		case Untagged:
+		default:
+			return nil, NewErrInvalidValue(log.Fields{"packet-tag-type": pktTagType}, nil).Log()
+		}
+	}
+	return &classifier, nil
+}
+
+func makeOpenOltActionField(actionInfo map[string]interface{}) (*openoltpb2.Action, error) {
+	var actionCmd openoltpb2.ActionCmd
+	var action openoltpb2.Action
+	action.Cmd = &actionCmd
+	if _, ok := actionInfo[PopVlan]; ok {
+		action.OVid = actionInfo[VlanVid].(uint32)
+		action.Cmd.RemoveOuterTag = true
+	} else if _, ok := actionInfo[PushVlan]; ok {
+		action.OVid = actionInfo[VlanVid].(uint32)
+		action.Cmd.AddOuterTag = true
+	} else if _, ok := actionInfo[TrapToHost]; ok {
+		action.Cmd.TrapToHost = actionInfo[TrapToHost].(bool)
+	} else {
+		return nil, NewErrInvalidValue(log.Fields{"action-command": actionInfo}, nil).Log()
+	}
+	return &action, nil
+}
+
+func (f *OpenOltFlowMgr) getTPpath(intfID uint32, uni string, TpID uint32) string {
+	return f.techprofile[intfID].GetTechProfileInstanceKVPath(TpID, uni)
+}
+
+// DeleteTechProfileInstances removes the tech profile instances from persistent storage
+func (f *OpenOltFlowMgr) DeleteTechProfileInstances(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, sn string) error {
+	tpIDList := f.resourceMgr.GetTechProfileIDForOnu(ctx, intfID, onuID, uniID)
+	uniPortName := fmt.Sprintf("pon-{%d}/onu-{%d}/uni-{%d}", intfID, onuID, uniID)
+	for _, tpID := range tpIDList {
+		if err := f.DeleteTechProfileInstance(ctx, intfID, onuID, uniID, uniPortName, tpID); err != nil {
+			log.Debugw("Failed-to-delete-tp-instance-from-kv-store", log.Fields{"tp-id": tpID, "uni-port-name": uniPortName})
+			// return err
+			// We should continue to delete tech-profile instances for other TP IDs
+		}
+	}
+	return nil
+}
+
+// DeleteTechProfileInstance removes the tech profile instance from persistent storage
+func (f *OpenOltFlowMgr) DeleteTechProfileInstance(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, uniPortName string, tpID uint32) error {
+	if uniPortName == "" {
+		uniPortName = fmt.Sprintf("pon-{%d}/onu-{%d}/uni-{%d}", intfID, onuID, uniID)
+	}
+	if err := f.techprofile[intfID].DeleteTechProfileInstance(ctx, tpID, uniPortName); err != nil {
+		log.Debugw("Failed-to-delete-tp-instance-from-kv-store", log.Fields{"tp-id": tpID, "uni-port-name": uniPortName})
+		return err
+	}
+	return nil
+}
+
+func getFlowStoreCookie(classifier map[string]interface{}, gemPortID uint32) uint64 {
+	if len(classifier) == 0 { // should never happen
+		log.Error("Invalid classfier object")
+		return 0
+	}
+	log.Debugw("generating flow store cookie", log.Fields{"classifier": classifier, "gemPortID": gemPortID})
+	var jsonData []byte
+	var flowString string
+	var err error
+	// TODO: Do we need to marshall ??
+	if jsonData, err = json.Marshal(classifier); err != nil {
+		log.Error("Failed to encode classifier")
+		return 0
+	}
+	flowString = string(jsonData)
+	if gemPortID != 0 {
+		flowString = fmt.Sprintf("%s%s", string(jsonData), string(gemPortID))
+	}
+	h := md5.New()
+	_, _ = h.Write([]byte(flowString))
+	hash := big.NewInt(0)
+	hash.SetBytes(h.Sum(nil))
+	generatedHash := hash.Uint64()
+	log.Debugw("hash generated", log.Fields{"hash": generatedHash})
+	return generatedHash
+}
+
+func (f *OpenOltFlowMgr) getUpdatedFlowInfo(ctx context.Context, flow *openoltpb2.Flow, flowStoreCookie uint64, flowCategory string, deviceFlowID uint32, logicalFlowID uint64) *[]rsrcMgr.FlowInfo {
+	var flows = []rsrcMgr.FlowInfo{{Flow: flow, FlowCategory: flowCategory, FlowStoreCookie: flowStoreCookie, LogicalFlowID: logicalFlowID}}
+	var intfID uint32
+	/* For flows which trap out of the NNI, the AccessIntfId is invalid
+	   (set to -1). In such cases, we need to refer to the NetworkIntfId .
+	*/
+	if flow.AccessIntfId != -1 {
+		intfID = uint32(flow.AccessIntfId)
+	} else {
+		intfID = uint32(flow.NetworkIntfId)
+	}
+	// Get existing flows matching flowid for given subscriber from KV store
+	existingFlows := f.resourceMgr.GetFlowIDInfo(ctx, intfID, flow.OnuId, flow.UniId, flow.FlowId)
+	if existingFlows != nil {
+		log.Debugw("Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
+		//for _, f := range *existingFlows {
+		//	flows = append(flows, f)
+		//}
+		flows = append(flows, *existingFlows...)
+	}
+	log.Debugw("Updated flows for given flowID and onuid", log.Fields{"updatedflow": flows, "flowid": flow.FlowId, "onu": flow.OnuId})
+	return &flows
+}
+
+//func (f *OpenOltFlowMgr) getUpdatedFlowInfo(flow *openolt_pb2.Flow, flowStoreCookie uint64, flowCategory string) *[]rsrcMgr.FlowInfo {
+//	var flows []rsrcMgr.FlowInfo = []rsrcMgr.FlowInfo{rsrcMgr.FlowInfo{Flow: flow, FlowCategory: flowCategory, FlowStoreCookie: flowStoreCookie}}
+//	var intfId uint32
+//	/* For flows which trap out of the NNI, the AccessIntfId is invalid
+//	   (set to -1). In such cases, we need to refer to the NetworkIntfId .
+//	*/
+//	if flow.AccessIntfId != -1 {
+//		intfId = uint32(flow.AccessIntfId)
+//	} else {
+//		intfId = uint32(flow.NetworkIntfId)
+//	}
+//	// Get existing flows matching flowid for given subscriber from KV store
+//	existingFlows := f.resourceMgr.GetFlowIDInfo(intfId, uint32(flow.OnuId), uint32(flow.UniId), flow.FlowId)
+//	if existingFlows != nil {
+//		log.Debugw("Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
+//		for _, f := range *existingFlows {
+//			flows = append(flows, f)
+//		}
+//	}
+//	log.Debugw("Updated flows for given flowID and onuid", log.Fields{"updatedflow": flows, "flowid": flow.FlowId, "onu": flow.OnuId})
+//	return &flows
+//}
+
+func (f *OpenOltFlowMgr) updateFlowInfoToKVStore(ctx context.Context, intfID int32, onuID int32, uniID int32, flowID uint32, flows *[]rsrcMgr.FlowInfo) error {
+	log.Debugw("Storing flow(s) into KV store", log.Fields{"flows": *flows})
+	if err := f.resourceMgr.UpdateFlowIDInfo(ctx, intfID, onuID, uniID, flowID, flows); err != nil {
+		log.Debug("Error while Storing flow into KV store")
+		return err
+	}
+	log.Info("Stored flow(s) into KV store successfully!")
+	return nil
+}
+
+func (f *OpenOltFlowMgr) addFlowToDevice(ctx context.Context, logicalFlow *ofp.OfpFlowStats, deviceFlow *openoltpb2.Flow) error {
+
+	var intfID uint32
+	/* For flows which trap out of the NNI, the AccessIntfId is invalid
+	   (set to -1). In such cases, we need to refer to the NetworkIntfId .
+	*/
+	if deviceFlow.AccessIntfId != -1 {
+		intfID = uint32(deviceFlow.AccessIntfId)
+	} else {
+		// REVIST : Why ponport is given as network port?
+		intfID = uint32(deviceFlow.NetworkIntfId)
+	}
+
+	log.Debugw("Sending flow to device via grpc", log.Fields{"flow": *deviceFlow})
+	_, err := f.deviceHandler.Client.FlowAdd(context.Background(), deviceFlow)
+
+	st, _ := status.FromError(err)
+	if st.Code() == codes.AlreadyExists {
+		log.Debug("Flow already exists", log.Fields{"err": err, "deviceFlow": deviceFlow})
+		return nil
+	}
+
+	if err != nil {
+		log.Errorw("Failed to Add flow to device", log.Fields{"err": err, "deviceFlow": deviceFlow})
+		f.resourceMgr.FreeFlowID(ctx, intfID, deviceFlow.OnuId, deviceFlow.UniId, deviceFlow.FlowId)
+		return err
+	}
+	if deviceFlow.GemportId != -1 {
+		// No need to register the flow if it is a trap on nni flow.
+		f.registerFlow(ctx, logicalFlow, deviceFlow)
+	}
+	log.Debugw("Flow added to device successfully ", log.Fields{"flow": *deviceFlow})
+	return nil
+}
+
+func (f *OpenOltFlowMgr) removeFlowFromDevice(deviceFlow *openoltpb2.Flow) error {
+	log.Debugw("Sending flow to device via grpc", log.Fields{"flow": *deviceFlow})
+	_, err := f.deviceHandler.Client.FlowRemove(context.Background(), deviceFlow)
+	if err != nil {
+		if f.deviceHandler.device.ConnectStatus == common.ConnectStatus_UNREACHABLE {
+			log.Warnw("Can not remove flow from device since it's unreachable", log.Fields{"err": err, "deviceFlow": deviceFlow})
+			//Assume the flow is removed
+			return nil
+		}
+		log.Errorw("Failed to Remove flow from device", log.Fields{"err": err, "deviceFlow": deviceFlow})
+		return err
+
+	}
+	log.Debugw("Flow removed from device successfully ", log.Fields{"flow": *deviceFlow})
+	return nil
+}
+
+/*func register_flow(deviceFlow *openolt_pb2.Flow, logicalFlow *ofp.OfpFlowStats){
+ //update core flows_proxy : flows_proxy.update('/', flows)
+}
+
+func generateStoredId(flowId uint32, direction string)uint32{
+
+	if direction == Upstream{
+		log.Debug("Upstream flow shifting flowid")
+		return ((0x1 << 15) | flowId)
+	}else if direction == Downstream{
+		log.Debug("Downstream flow not shifting flowid")
+		return flowId
+	}else{
+		log.Errorw("Unrecognized direction",log.Fields{"direction": direction})
+		return flowId
+	}
+}
+
+*/
+
+func (f *OpenOltFlowMgr) addLLDPFlow(ctx context.Context, flow *ofp.OfpFlowStats, portNo uint32) error {
+
+	classifierInfo := make(map[string]interface{})
+	actionInfo := make(map[string]interface{})
+
+	classifierInfo[EthType] = uint32(LldpEthType)
+	classifierInfo[PacketTagType] = Untagged
+	actionInfo[TrapToHost] = true
+
+	// LLDP flow is installed to trap LLDP packets on the NNI port.
+	// We manage flow_id resource pool on per PON port basis.
+	// Since this situation is tricky, as a hack, we pass the NNI port
+	// index (network_intf_id) as PON port Index for the flow_id resource
+	// pool. Also, there is no ONU Id available for trapping LLDP packets
+	// on NNI port, use onu_id as -1 (invalid)
+	// ****************** CAVEAT *******************
+	// This logic works if the NNI Port Id falls within the same valid
+	// range of PON Port Ids. If this doesn't work for some OLT Vendor
+	// we need to have a re-look at this.
+	// *********************************************
+
+	var onuID = -1
+	var uniID = -1
+	var gemPortID = -1
+
+	networkInterfaceID, err := IntfIDFromNniPortNum(portNo)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"nni-port-number": portNo}, err).Log()
+	}
+	var flowStoreCookie = getFlowStoreCookie(classifierInfo, uint32(0))
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("Flow-exists--not-re-adding")
+		return nil
+	}
+	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0)
+
+	if err != nil {
+		return NewErrNotFound("flow-id", log.Fields{
+			"interface-id": networkInterfaceID,
+			"onu-id":       onuID,
+			"uni-id":       uniID,
+			"gem-port-id":  gemPortID,
+			"cookie":       flowStoreCookie},
+			err).Log()
+	}
+	classifierProto, err := makeOpenOltClassifierField(classifierInfo)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifierInfo}, err).Log()
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(actionInfo)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": actionInfo}, err).Log()
+	}
+	log.Debugw("Created action proto", log.Fields{"action": *actionProto})
+
+	downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
+		OnuId:         int32(onuID), // OnuId not required
+		UniId:         int32(uniID), // UniId not used
+		FlowId:        flowID,
+		FlowType:      Downstream,
+		NetworkIntfId: int32(networkInterfaceID),
+		GemportId:     int32(gemPortID),
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(flow.Priority),
+		Cookie:        flow.Cookie,
+		PortNo:        portNo}
+	if err := f.addFlowToDevice(ctx, flow, &downstreamflow); err != nil {
+		return NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err).Log()
+	}
+	log.Debug("LLDP trap on NNI flow added to device successfully")
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, flow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
+		int32(onuID),
+		int32(uniID),
+		flowID, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", flowID, log.Fields{"flow": downstreamflow}, err).Log()
+	}
+	return nil
+}
+
+func getUniPortPath(intfID uint32, onuID int32, uniID int32) string {
+	return fmt.Sprintf("pon-{%d}/onu-{%d}/uni-{%d}", intfID, onuID, uniID)
+}
+
+//getOnuChildDevice to fetch onu
+func (f *OpenOltFlowMgr) getOnuChildDevice(intfID uint32, onuID uint32) (*voltha.Device, error) {
+	log.Debugw("GetChildDevice", log.Fields{"pon port": intfID, "onuId": onuID})
+	parentPortNo := IntfIDToPortNo(intfID, voltha.Port_PON_OLT)
+	onuDevice, err := f.deviceHandler.GetChildDevice(parentPortNo, onuID)
+	if err != nil {
+		return nil, NewErrNotFound("onu", log.Fields{
+			"interface-id": parentPortNo,
+			"onu-id":       onuID},
+			err).Log()
+	}
+	log.Debugw("Successfully received child device from core", log.Fields{"child_device": *onuDevice})
+	return onuDevice, nil
+}
+
+func findNextFlow(flow *ofp.OfpFlowStats) *ofp.OfpFlowStats {
+	log.Info("unimplemented flow : %v", flow)
+	return nil
+}
+
+func (f *OpenOltFlowMgr) clearFlowsAndSchedulerForLogicalPort(childDevice *voltha.Device, logicalPort *voltha.LogicalPort) {
+	log.Info("unimplemented device %v, logicalport %v", childDevice, logicalPort)
+}
+
+func (f *OpenOltFlowMgr) decodeStoredID(id uint64) (uint64, string) {
+	if id>>15 == 0x1 {
+		return id & 0x7fff, Upstream
+	}
+	return id, Downstream
+}
+
+func (f *OpenOltFlowMgr) sendDeleteGemPortToChild(intfID uint32, onuID uint32, uniID uint32, gemPortID uint32, tpPath string) error {
+	onuDevice, err := f.getOnuChildDevice(intfID, onuID)
+	if err != nil {
+		log.Errorw("error fetching child device from core", log.Fields{"onuId": onuID})
+		return err
+	}
+
+	delGemPortMsg := &ic.InterAdapterDeleteGemPortMessage{UniId: uniID, TpPath: tpPath, GemPortId: gemPortID}
+	log.Debugw("sending gem port delete to openonu adapter", log.Fields{"msg": *delGemPortMsg})
+	if sendErr := f.deviceHandler.AdapterProxy.SendInterAdapterMessage(context.Background(),
+		delGemPortMsg,
+		ic.InterAdapterMessageType_DELETE_GEM_PORT_REQUEST,
+		f.deviceHandler.deviceType,
+		onuDevice.Type,
+		onuDevice.Id,
+		onuDevice.ProxyAddress.DeviceId, ""); sendErr != nil {
+		log.Errorw("failure sending del gem port to onu adapter", log.Fields{"fromAdapter": f.deviceHandler.deviceType,
+			"toAdapter": onuDevice.Type, "onuId": onuDevice.Id,
+			"proxyDeviceId": onuDevice.ProxyAddress.DeviceId})
+		return sendErr
+	}
+	log.Debugw("success sending del gem port to onu adapter", log.Fields{"msg": delGemPortMsg})
+	return nil
+}
+
+func (f *OpenOltFlowMgr) sendDeleteTcontToChild(intfID uint32, onuID uint32, uniID uint32, allocID uint32, tpPath string) error {
+	onuDevice, err := f.getOnuChildDevice(intfID, onuID)
+	if err != nil {
+		log.Errorw("error fetching child device from core", log.Fields{"onuId": onuID})
+		return err
+	}
+
+	delTcontMsg := &ic.InterAdapterDeleteTcontMessage{UniId: uniID, TpPath: tpPath, AllocId: allocID}
+	log.Debugw("sending tcont delete to openonu adapter", log.Fields{"msg": *delTcontMsg})
+	if sendErr := f.deviceHandler.AdapterProxy.SendInterAdapterMessage(context.Background(),
+		delTcontMsg,
+		ic.InterAdapterMessageType_DELETE_TCONT_REQUEST,
+		f.deviceHandler.deviceType,
+		onuDevice.Type,
+		onuDevice.Id,
+		onuDevice.ProxyAddress.DeviceId, ""); sendErr != nil {
+		log.Errorw("failure sending del tcont to onu adapter", log.Fields{"fromAdapter": f.deviceHandler.deviceType,
+			"toAdapter": onuDevice.Type, "onuId": onuDevice.Id,
+			"proxyDeviceId": onuDevice.ProxyAddress.DeviceId})
+		return sendErr
+	}
+	log.Debugw("success sending del tcont to onu adapter", log.Fields{"msg": delTcontMsg})
+	return nil
+}
+
+func (f *OpenOltFlowMgr) deletePendingFlows(Intf uint32, onuID int32, uniID int32) {
+	pnFlDelKey := pendingFlowDeleteKey{Intf, uint32(onuID), uint32(uniID)}
+	if val, ok := f.pendingFlowDelete.Load(pnFlDelKey); ok {
+		if val.(int) > 0 {
+			pnFlDels := val.(int) - 1
+			if pnFlDels > 0 {
+				log.Debugw("flow delete succeeded, more pending",
+					log.Fields{"intf": Intf, "onuID": onuID, "uniID": uniID, "currPendingFlowCnt": pnFlDels})
+				f.pendingFlowDelete.Store(pnFlDelKey, pnFlDels)
+			} else {
+				log.Debugw("all pending flow deletes handled, removing entry from map",
+					log.Fields{"intf": Intf, "onuID": onuID, "uniID": uniID})
+				f.pendingFlowDelete.Delete(pnFlDelKey)
+			}
+		}
+	} else {
+		log.Debugw("no pending delete flows found",
+			log.Fields{"intf": Intf, "onuID": onuID, "uniID": uniID})
+
+	}
+
+}
+
+// Once the gemport is released for a given onu, it also has to be cleared from local cache
+// which was used for deriving the gemport->logicalPortNo during packet-in.
+// Otherwise stale info continues to exist after gemport is freed and wrong logicalPortNo
+// is conveyed to ONOS during packet-in OF message.
+func (f *OpenOltFlowMgr) deleteGemPortFromLocalCache(intfID uint32, onuID uint32, gemPortID uint32) {
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+	onugem := f.onuGemInfo[intfID]
+	for i, onu := range onugem {
+		if onu.OnuID == onuID {
+			for j, gem := range onu.GemPorts {
+				// If the gemport is found, delete it from local cache.
+				if gem == gemPortID {
+					onu.GemPorts = append(onu.GemPorts[:j], onu.GemPorts[j+1:]...)
+					onugem[i] = onu
+					log.Debugw("removed gemport from local cache",
+						log.Fields{"intfID": intfID, "onuID": onuID, "deletedGemPortID": gemPortID, "gemPorts": onu.GemPorts})
+					break
+				}
+			}
+			break
+		}
+	}
+}
+
+//clearResources clears pon resources in kv store and the device
+func (f *OpenOltFlowMgr) clearResources(ctx context.Context, flow *ofp.OfpFlowStats, Intf uint32, onuID int32, uniID int32,
+	gemPortID int32, flowID uint32, flowDirection string,
+	portNum uint32, updatedFlows []rsrcMgr.FlowInfo) error {
+
+	tpID, err := getTpIDFromFlow(flow)
+	if err != nil {
+		log.Error("metadata-is-not-present-invalid-flow-to-process", log.Fields{"pon": Intf, "onuID": onuID, "uniID": uniID})
+		return err
+	}
+
+	if len(updatedFlows) >= 0 {
+		// There are still flows referencing the same flow_id.
+		// So the flow should not be freed yet.
+		// For ex: Case of HSIA where same flow is shared
+		// between DS and US.
+		f.updateFlowInfoToKVStore(ctx, int32(Intf), int32(onuID), int32(uniID), flowID, &updatedFlows)
+		if len(updatedFlows) == 0 {
+			// Do this for subscriber flows only (not trap from NNI flows)
+			if onuID != -1 && uniID != -1 {
+				pnFlDelKey := pendingFlowDeleteKey{Intf, uint32(onuID), uint32(uniID)}
+				if val, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok {
+					log.Debugw("creating entry for pending flow delete",
+						log.Fields{"intf": Intf, "onuID": onuID, "uniID": uniID})
+					f.pendingFlowDelete.Store(pnFlDelKey, 1)
+				} else {
+					pnFlDels := val.(int) + 1
+					log.Debugw("updating flow delete entry",
+						log.Fields{"intf": Intf, "onuID": onuID, "uniID": uniID, "currPendingFlowCnt": pnFlDels})
+					f.pendingFlowDelete.Store(pnFlDelKey, pnFlDels)
+				}
+
+				defer f.deletePendingFlows(Intf, onuID, uniID)
+			}
+
+			log.Debugw("Releasing flow Id to resource manager", log.Fields{"Intf": Intf, "onuId": onuID, "uniId": uniID, "flowId": flowID})
+			f.resourceMgr.FreeFlowID(ctx, Intf, int32(onuID), int32(uniID), flowID)
+
+			uni := getUniPortPath(Intf, onuID, uniID)
+			tpPath := f.getTPpath(Intf, uni, tpID)
+			log.Debugw("Getting-techprofile-instance-for-subscriber", log.Fields{"TP-PATH": tpPath})
+			techprofileInst, err := f.techprofile[Intf].GetTPInstanceFromKVStore(ctx, tpID, tpPath)
+			if err != nil { // This should not happen, something wrong in KV backend transaction
+				log.Errorw("Error in fetching tech profile instance from KV store", log.Fields{"tpID": 20, "path": tpPath})
+				return err
+			}
+			if techprofileInst == nil {
+				log.Errorw("Tech-profile-instance-does-not-exist-in-KV Store", log.Fields{"tpPath": tpPath})
+				return err
+			}
+
+			gemPK := gemPortKey{Intf, uint32(gemPortID)}
+			if f.isGemPortUsedByAnotherFlow(gemPK) {
+				flowIDs := f.flowsUsedByGemPort[gemPK]
+				for i, flowIDinMap := range flowIDs {
+					if flowIDinMap == flowID {
+						flowIDs = append(flowIDs[:i], flowIDs[i+1:]...)
+						// everytime flowsUsedByGemPort cache is updated the same should be updated
+						// in kv store by calling UpdateFlowIDsForGem
+						f.flowsUsedByGemPort[gemPK] = flowIDs
+						f.resourceMgr.UpdateFlowIDsForGem(ctx, Intf, uint32(gemPortID), flowIDs)
+						break
+					}
+				}
+				log.Debugw("Gem port id is still used by other flows", log.Fields{"gemPortID": gemPortID, "usedByFlows": flowIDs})
+				return nil
+			}
+			log.Debugf("Gem port id %d is not used by another flow - releasing the gem port", gemPortID)
+			f.resourceMgr.RemoveGemPortIDForOnu(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
+			// TODO: The TrafficQueue corresponding to this gem-port also should be removed immediately.
+			// But it is anyway eventually  removed later when the TechProfile is freed, so not a big issue for now.
+			f.resourceMgr.RemoveGEMportPonportToOnuMapOnKVStore(ctx, uint32(gemPortID), Intf)
+			f.deleteGemPortFromLocalCache(Intf, uint32(onuID), uint32(gemPortID))
+			f.onuIdsLock.Lock()
+			//everytime an entry is deleted from flowsUsedByGemPort cache, the same should be updated in kv as well
+			// by calling DeleteFlowIDsForGem
+			delete(f.flowsUsedByGemPort, gemPK)
+			f.resourceMgr.DeleteFlowIDsForGem(ctx, Intf, uint32(gemPortID))
+			f.resourceMgr.FreeGemPortID(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
+			f.onuIdsLock.Unlock()
+			// Delete the gem port on the ONU.
+			if err := f.sendDeleteGemPortToChild(Intf, uint32(onuID), uint32(uniID), uint32(gemPortID), tpPath); err != nil {
+				log.Errorw("error processing delete gem-port towards onu",
+					log.Fields{"err": err, "pon": Intf, "onuID": onuID, "uniID": uniID, "gemPortId": gemPortID})
+			}
+
+			ok, _ := f.isTechProfileUsedByAnotherGem(ctx, Intf, uint32(onuID), uint32(uniID), tpID, techprofileInst, uint32(gemPortID))
+			if !ok {
+				f.resourceMgr.RemoveTechProfileIDForOnu(ctx, Intf, uint32(onuID), uint32(uniID), tpID)
+				f.RemoveSchedulerQueues(ctx, schedQueue{direction: tp_pb.Direction_UPSTREAM, intfID: Intf, onuID: uint32(onuID), uniID: uint32(uniID), tpID: tpID, uniPort: portNum, tpInst: techprofileInst})
+				f.RemoveSchedulerQueues(ctx, schedQueue{direction: tp_pb.Direction_DOWNSTREAM, intfID: Intf, onuID: uint32(onuID), uniID: uint32(uniID), tpID: tpID, uniPort: portNum, tpInst: techprofileInst})
+				f.DeleteTechProfileInstance(ctx, Intf, uint32(onuID), uint32(uniID), "", tpID)
+				f.resourceMgr.FreeAllocID(ctx, Intf, uint32(onuID), uint32(uniID), techprofileInst.UsScheduler.AllocID)
+				// Delete the TCONT on the ONU.
+				if err := f.sendDeleteTcontToChild(Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.UsScheduler.AllocID), tpPath); err != nil {
+					log.Errorw("error processing delete tcont towards onu",
+						log.Fields{"pon": Intf, "onuID": onuID, "uniID": uniID, "allocId": techprofileInst.UsScheduler.AllocID})
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// nolint: gocyclo
+func (f *OpenOltFlowMgr) clearFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats, flowDirection string) {
+
+	log.Debugw("clearFlowFromResourceManager", log.Fields{"flowDirection": flowDirection, "flow": *flow})
+
+	if flowDirection == Multicast {
+		f.clearMulticastFlowFromResourceManager(ctx, flow)
+		return
+	}
+
+	var updatedFlows []rsrcMgr.FlowInfo
+	classifierInfo := make(map[string]interface{})
+
+	portNum, Intf, onu, uni, inPort, ethType, err := FlowExtractInfo(flow, flowDirection)
+	if err != nil {
+		log.Error(err)
+		return
+	}
+
+	onuID := int32(onu)
+	uniID := int32(uni)
+
+	for _, field := range flows.GetOfbFields(flow) {
+		if field.Type == flows.IP_PROTO {
+			classifierInfo[IPProto] = field.GetIpProto()
+			log.Debug("field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
+		}
+	}
+	log.Debugw("Extracted access info from flow to be deleted",
+		log.Fields{"ponIntf": Intf, "onuID": onuID, "uniID": uniID})
+
+	if ethType == LldpEthType || ((classifierInfo[IPProto] == IPProtoDhcp) && (flowDirection == "downstream")) {
+		onuID = -1
+		uniID = -1
+		log.Debug("Trap on nni flow set oni, uni to -1")
+		Intf, err = IntfIDFromNniPortNum(inPort)
+		if err != nil {
+			log.Errorw("invalid-in-port-number",
+				log.Fields{
+					"port-number": inPort,
+					"error":       err})
+			return
+		}
+	}
+	flowIds := f.resourceMgr.GetCurrentFlowIDsForOnu(ctx, Intf, onuID, uniID)
+	for _, flowID := range flowIds {
+		flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, Intf, onuID, uniID, flowID)
+		if flowInfo == nil {
+			log.Debugw("No FlowInfo found found in KV store",
+				log.Fields{"Intf": Intf, "onuID": onuID, "uniID": uniID, "flowID": flowID})
+			return
+		}
+		updatedFlows = nil
+		for _, flow := range *flowInfo {
+			updatedFlows = append(updatedFlows, flow)
+		}
+
+		for i, storedFlow := range updatedFlows {
+			if flow.Id == storedFlow.LogicalFlowID {
+				removeFlowMessage := openoltpb2.Flow{FlowId: storedFlow.Flow.FlowId, FlowType: storedFlow.Flow.FlowType}
+				log.Debugw("Flow to be deleted", log.Fields{"flow": storedFlow})
+				// DKB
+				if err = f.removeFlowFromDevice(&removeFlowMessage); err != nil {
+					log.Errorw("failed-to-remove-flow", log.Fields{"error": err})
+					return
+				}
+				log.Debug("Flow removed from device successfully")
+				//Remove the Flow from FlowInfo
+				updatedFlows = append(updatedFlows[:i], updatedFlows[i+1:]...)
+				if err = f.clearResources(ctx, flow, Intf, onuID, uniID, storedFlow.Flow.GemportId,
+					flowID, flowDirection, portNum, updatedFlows); err != nil {
+					log.Error("Failed to clear resources for flow", log.Fields{"flow": storedFlow})
+					return
+				}
+			}
+		}
+	}
+}
+
+//clearMulticastFlowFromResourceManager  removes a multicast flow from the KV store and
+// clears resources reserved for this multicast flow
+func (f *OpenOltFlowMgr) clearMulticastFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats) {
+	classifierInfo := make(map[string]interface{})
+	formulateClassifierInfoFromFlow(classifierInfo, flow)
+	inPort, err := f.getInPortOfMulticastFlow(ctx, classifierInfo)
+
+	if err != nil {
+		log.Warnw("No inPort found. Cannot release resources of the multicast flow.", log.Fields{"flowId:": flow.Id})
+		return
+	}
+
+	networkInterfaceID, err := IntfIDFromNniPortNum(inPort)
+	if err != nil {
+		// DKB
+		log.Errorw("invalid-in-port-number",
+			log.Fields{
+				"port-number": inPort,
+				"error":       err})
+		return
+	}
+	var onuID = int32(NoneOnuID)
+	var uniID = int32(NoneUniID)
+	var flowID uint32
+	var updatedFlows []rsrcMgr.FlowInfo
+
+	flowIds := f.resourceMgr.GetCurrentFlowIDsForOnu(ctx, networkInterfaceID, onuID, uniID)
+
+	for _, flowID = range flowIds {
+		flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, networkInterfaceID, onuID, uniID, flowID)
+		if flowInfo == nil {
+			log.Debugw("No multicast FlowInfo found in the KV store",
+				log.Fields{"Intf": networkInterfaceID, "onuID": onuID, "uniID": uniID, "flowID": flowID})
+			continue
+		}
+		updatedFlows = nil
+		for _, flow := range *flowInfo {
+			updatedFlows = append(updatedFlows, flow)
+		}
+		for i, storedFlow := range updatedFlows {
+			if flow.Id == storedFlow.LogicalFlowID {
+				removeFlowMessage := openoltpb2.Flow{FlowId: storedFlow.Flow.FlowId, FlowType: storedFlow.Flow.FlowType}
+				log.Debugw("Multicast flow to be deleted", log.Fields{"flow": storedFlow})
+				//remove from device
+				if err := f.removeFlowFromDevice(&removeFlowMessage); err != nil {
+					// DKB
+					log.Errorw("failed-to-remove-multicast-flow",
+						log.Fields{
+							"flow-id": flow.Id,
+							"error":   err})
+					return
+				}
+				log.Debugw("Multicast flow removed from device successfully", log.Fields{"flowId": flow.Id})
+				//Remove the Flow from FlowInfo
+				updatedFlows = append(updatedFlows[:i], updatedFlows[i+1:]...)
+				if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID), NoneOnuID, NoneUniID, flowID, &updatedFlows); err != nil {
+					log.Error("Failed to delete multicast flow from the KV store", log.Fields{"flow": storedFlow, "err": err})
+					return
+				}
+				//release flow id
+				log.Debugw("Releasing multicast flow id", log.Fields{"flowId": flowID, "interfaceID": networkInterfaceID})
+				f.resourceMgr.FreeFlowID(ctx, uint32(networkInterfaceID), NoneOnuID, NoneUniID, flowID)
+			}
+		}
+	}
+}
+
+//RemoveFlow removes the flow from the device
+func (f *OpenOltFlowMgr) RemoveFlow(ctx context.Context, flow *ofp.OfpFlowStats) {
+	log.Debugw("Removing Flow", log.Fields{"flow": flow})
+	var direction string
+	actionInfo := make(map[string]interface{})
+
+	for _, action := range flows.GetActions(flow) {
+		if action.Type == flows.OUTPUT {
+			if out := action.GetOutput(); out != nil {
+				actionInfo[Output] = out.GetPort()
+				log.Debugw("action-type-output", log.Fields{"out_port": actionInfo[Output].(uint32)})
+			} else {
+				log.Error("Invalid output port in action")
+				return
+			}
+		}
+	}
+
+	if flows.HasGroup(flow) {
+		direction = Multicast
+	} else if IsUpstream(actionInfo[Output].(uint32)) {
+		direction = Upstream
+	} else {
+		direction = Downstream
+	}
+	f.clearFlowFromResourceManager(ctx, flow, direction) //TODO: Take care of the limitations
+
+	return
+}
+
+func (f *OpenOltFlowMgr) waitForFlowDeletesToCompleteForOnu(ctx context.Context, intfID uint32, onuID uint32,
+	uniID uint32, ch chan bool) {
+	pnFlDelKey := pendingFlowDeleteKey{intfID, onuID, uniID}
+	for {
+		select {
+		case <-time.After(20 * time.Millisecond):
+			if flowDelRefCnt, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok || flowDelRefCnt == 0 {
+				log.Debug("pending flow deletes completed")
+				ch <- true
+				return
+			}
+		case <-ctx.Done():
+			log.Error("flow delete wait handler routine canceled")
+			return
+		}
+	}
+}
+
+//isIgmpTrapDownstreamFlow return true if the flow is a downsteam IGMP trap-to-host flow; false otherwise
+func isIgmpTrapDownstreamFlow(classifierInfo map[string]interface{}) bool {
+	if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_ETHERNET_NNI {
+		if ethType, ok := classifierInfo[EthType]; ok {
+			if ethType.(uint32) == IPv4EthType {
+				if ipProto, ok := classifierInfo[IPProto]; ok {
+					if ipProto.(uint32) == IgmpProto {
+						return true
+					}
+				}
+			}
+		}
+	}
+	return false
+}
+
+// AddFlow add flow to device
+// nolint: gocyclo
+func (f *OpenOltFlowMgr) AddFlow(ctx context.Context, flow *ofp.OfpFlowStats, flowMetadata *voltha.FlowMetadata) {
+	classifierInfo := make(map[string]interface{})
+	actionInfo := make(map[string]interface{})
+	var UsMeterID uint32
+	var DsMeterID uint32
+
+	log.Debug("Adding Flow", log.Fields{"flow": flow, "flowMetadata": flowMetadata})
+	formulateClassifierInfoFromFlow(classifierInfo, flow)
+
+	err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flow)
+	if err != nil {
+		// Error logging is already done in the called function
+		// So just return in case of error
+		return
+	}
+
+	if flows.HasGroup(flow) {
+		// handle multicast flow
+		f.handleFlowWithGroup(ctx, actionInfo, classifierInfo, flow)
+		return
+	}
+
+	/* Controller bound trap flows */
+	err = formulateControllerBoundTrapFlowInfo(actionInfo, classifierInfo, flow)
+	if err != nil {
+		// error if any, already logged in the called function
+		return
+	}
+
+	log.Infow("Flow ports", log.Fields{"classifierInfo_inport": classifierInfo[InPort], "action_output": actionInfo[Output]})
+	portNo, intfID, onuID, uniID := ExtractAccessFromFlow(classifierInfo[InPort].(uint32), actionInfo[Output].(uint32))
+
+	if ethType, ok := classifierInfo[EthType]; ok {
+		if ethType.(uint32) == LldpEthType {
+			log.Info("Adding LLDP flow")
+			f.addLLDPFlow(ctx, flow, portNo)
+			return
+		}
+	}
+	if ipProto, ok := classifierInfo[IPProto]; ok {
+		if ipProto.(uint32) == IPProtoDhcp {
+			if udpSrc, ok := classifierInfo[UDPSrc]; ok {
+				if udpSrc.(uint32) == uint32(67) || udpSrc.(uint32) == uint32(546) {
+					log.Debug("trap-dhcp-from-nni-flow")
+					f.addDHCPTrapFlowOnNNI(ctx, flow, classifierInfo, portNo)
+					return
+				}
+			}
+		}
+	}
+	if isIgmpTrapDownstreamFlow(classifierInfo) {
+		log.Debug("trap-igmp-from-nni-flow")
+		f.addIgmpTrapFlowOnNNI(ctx, flow, classifierInfo, portNo)
+		return
+	}
+
+	f.deviceHandler.AddUniPortToOnu(intfID, onuID, portNo)
+	f.resourceMgr.AddUniPortToOnuInfo(ctx, intfID, onuID, portNo)
+
+	TpID, err := getTpIDFromFlow(flow)
+	if err != nil {
+		log.Error("metadata-is-not-present-invalid-flow-to-process", log.Fields{"pon": intfID, "onuID": onuID, "uniID": uniID})
+		return
+	}
+	log.Debugw("TPID for this subcriber", log.Fields{"TpId": TpID, "pon": intfID, "onuID": onuID, "uniID": uniID})
+	if IsUpstream(actionInfo[Output].(uint32)) {
+		UsMeterID = flows.GetMeterIdFromFlow(flow)
+		log.Debugw("Upstream-flow-meter-id", log.Fields{"UsMeterID": UsMeterID})
+	} else {
+		DsMeterID = flows.GetMeterIdFromFlow(flow)
+		log.Debugw("Downstream-flow-meter-id", log.Fields{"DsMeterID": DsMeterID})
+
+	}
+
+	pnFlDelKey := pendingFlowDeleteKey{intfID, onuID, uniID}
+	if _, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok {
+		log.Debugw("no pending flows found, going ahead with flow install", log.Fields{"pon": intfID, "onuID": onuID, "uniID": uniID})
+		f.divideAndAddFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, uint32(TpID), UsMeterID, DsMeterID, flowMetadata)
+	} else {
+		pendingFlowDelComplete := make(chan bool)
+		go f.waitForFlowDeletesToCompleteForOnu(ctx, intfID, onuID, uniID, pendingFlowDelComplete)
+		select {
+		case <-pendingFlowDelComplete:
+			log.Debugw("all pending flow deletes completed", log.Fields{"pon": intfID, "onuID": onuID, "uniID": uniID})
+			f.divideAndAddFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, uint32(TpID), UsMeterID, DsMeterID, flowMetadata)
+
+		case <-time.After(10 * time.Second):
+			log.Errorw("pending flow deletes not completed after timeout", log.Fields{"pon": intfID, "onuID": onuID, "uniID": uniID})
+		}
+	}
+}
+
+// handleFlowWithGroup adds multicast flow to the device.
+func (f *OpenOltFlowMgr) handleFlowWithGroup(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+	classifierInfo[PacketTagType] = DoubleTag
+	log.Debugw("add-multicast-flow", log.Fields{"classifierInfo": classifierInfo, "actionInfo": actionInfo})
+
+	inPort, err := f.getInPortOfMulticastFlow(ctx, classifierInfo)
+	if err != nil {
+		return NewErrNotFound("multicast-in-port", log.Fields{"classifier": classifierInfo}, err).Log()
+	}
+	//replace ipDst with ethDst
+	if ipv4Dst, ok := classifierInfo[Ipv4Dst]; ok &&
+		flows.IsMulticastIp(ipv4Dst.(uint32)) {
+		// replace ipv4_dst classifier with eth_dst
+		multicastMac := flows.ConvertToMulticastMacBytes(ipv4Dst.(uint32))
+		delete(classifierInfo, Ipv4Dst)
+		delete(classifierInfo, EthType)
+		classifierInfo[EthDst] = multicastMac
+		log.Debugw("multicast-ip-to-mac-conversion-success", log.Fields{"ip:": ipv4Dst.(uint32), "mac:": multicastMac})
+	}
+
+	onuID := NoneOnuID
+	uniID := NoneUniID
+	gemPortID := NoneGemPortID
+
+	networkInterfaceID, err := IntfIDFromNniPortNum(inPort)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"nni-in-port-number": inPort}, err).Log()
+	}
+
+	flowStoreCookie := getFlowStoreCookie(classifierInfo, uint32(0))
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debugw("multicast-flow-exists-not-re-adding", log.Fields{"classifierInfo": classifierInfo})
+		return nil
+	}
+	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
+	if err != nil {
+		return NewErrNotFound("multicast-flow-id", log.Fields{
+			"interface-id": networkInterfaceID,
+			"onu-id":       onuID,
+			"uni-id":       uniID,
+			"gem-port-id":  gemPortID,
+			"cookie":       flowStoreCookie},
+			err).Log()
+	}
+	classifierProto, err := makeOpenOltClassifierField(classifierInfo)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifierInfo}, err).Log()
+	}
+	groupID := actionInfo[GroupID].(uint32)
+	multicastFlow := openoltpb2.Flow{
+		FlowId:        flowID,
+		FlowType:      Multicast,
+		NetworkIntfId: int32(networkInterfaceID),
+		GroupId:       groupID,
+		Classifier:    classifierProto,
+		Priority:      int32(flow.Priority),
+		Cookie:        flow.Cookie}
+
+	if err = f.addFlowToDevice(ctx, flow, &multicastFlow); err != nil {
+		return NewErrFlowOp("add", flowID, log.Fields{"flow": multicastFlow}, err).Log()
+	}
+	log.Debug("multicast flow added to device successfully")
+	//get cached group
+	group, _, err := f.GetFlowGroupFromKVStore(ctx, groupID, true)
+	if err == nil {
+		//calling groupAdd to set group members after multicast flow creation
+		if f.ModifyGroup(ctx, group) {
+			//cached group can be removed now
+			f.resourceMgr.RemoveFlowGroupFromKVStore(ctx, groupID, true)
+		}
+	}
+
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &multicastFlow, flowStoreCookie, MulticastFlow, flowID, flow.Id)
+	if err = f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
+		int32(onuID),
+		int32(uniID),
+		flowID, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", flowID, log.Fields{"flow": multicastFlow}, err).Log()
+	}
+	return nil
+}
+
+//getInPortOfMulticastFlow return inPort criterion if exists; returns NNI interface of the device otherwise
+func (f *OpenOltFlowMgr) getInPortOfMulticastFlow(ctx context.Context, classifierInfo map[string]interface{}) (uint32, error) {
+	if _, ok := classifierInfo[InPort]; ok {
+		return classifierInfo[InPort].(uint32), nil
+	}
+	// find first NNI port of the device
+	nniPorts, e := f.resourceMgr.GetNNIFromKVStore(ctx)
+	if e == nil && len(nniPorts) > 0 {
+		return nniPorts[0], nil
+	}
+	return 0, NewErrNotFound("nni-port", nil, e).Log()
+}
+
+// AddGroup add or update the group
+func (f *OpenOltFlowMgr) AddGroup(ctx context.Context, group *ofp.OfpGroupEntry) {
+	log.Infow("add-group", log.Fields{"group": group})
+	if group == nil {
+		log.Warn("skipping nil group")
+		return
+	}
+
+	groupToOlt := openoltpb2.Group{
+		GroupId: group.Desc.GroupId,
+		Command: openoltpb2.Group_SET_MEMBERS,
+		Action:  f.buildGroupAction(),
+	}
+
+	log.Debugw("Sending group to device", log.Fields{"groupToOlt": groupToOlt})
+	_, err := f.deviceHandler.Client.PerformGroupOperation(ctx, &groupToOlt)
+	if err != nil {
+		log.Errorw("add-group operation failed", log.Fields{"err": err, "groupToOlt": groupToOlt})
+		return
+	}
+	// group members not created yet. So let's store the group
+	if err := f.resourceMgr.AddFlowGroupToKVStore(ctx, group, true); err != nil {
+		log.Errorw("Group cannot be stored in KV store", log.Fields{"groupId": group.Desc.GroupId, "err": err})
+	} else {
+		log.Debugw("add-group operation performed on the device successfully ", log.Fields{"groupToOlt": groupToOlt})
+	}
+}
+
+//buildGroupAction creates and returns a group action
+func (f *OpenOltFlowMgr) buildGroupAction() *openoltpb2.Action {
+	var actionCmd openoltpb2.ActionCmd
+	var action openoltpb2.Action
+	action.Cmd = &actionCmd
+	//pop outer vlan
+	action.Cmd.RemoveOuterTag = true
+	return &action
+}
+
+// ModifyGroup updates the group
+func (f *OpenOltFlowMgr) ModifyGroup(ctx context.Context, group *ofp.OfpGroupEntry) bool {
+	log.Infow("modify-group", log.Fields{"group": group})
+	if group == nil || group.Desc == nil {
+		log.Warn("cannot modify group; group is nil")
+		return false
+	}
+
+	new := f.buildGroup(group.Desc.GroupId, group.Desc.Buckets)
+	//get existing members of the group
+	val, groupExists, err := f.GetFlowGroupFromKVStore(ctx, group.Desc.GroupId, false)
+
+	if err != nil {
+		log.Errorw("Failed to retrieve the group from the store. Cannot modify group.",
+			log.Fields{"groupId": group.Desc.GroupId, "err": err})
+		return false
+	}
+
+	var current *openoltpb2.Group // represents the group on the device
+	if groupExists {
+		// group already exists
+		current = f.buildGroup(group.Desc.GroupId, val.Desc.GetBuckets())
+		log.Debugw("modify-group: group exists.", log.Fields{"group on the device": val, "new": group})
+	} else {
+		current = f.buildGroup(group.Desc.GroupId, nil)
+	}
+
+	log.Debugw("modify-group: comparing current and new.", log.Fields{"group on the device": current, "new": new})
+	// get members to be added
+	membersToBeAdded := f.findDiff(current, new)
+	// get members to be removed
+	membersToBeRemoved := f.findDiff(new, current)
+
+	log.Infow("modify-group -> differences found", log.Fields{"membersToBeAdded": membersToBeAdded,
+		"membersToBeRemoved": membersToBeRemoved, "groupId": group.Desc.GroupId})
+
+	groupToOlt := openoltpb2.Group{
+		GroupId: group.Desc.GroupId,
+	}
+	var added, removed = true, true
+	if membersToBeAdded != nil && len(membersToBeAdded) > 0 {
+		groupToOlt.Command = openoltpb2.Group_ADD_MEMBERS
+		groupToOlt.Members = membersToBeAdded
+		//execute addMembers
+		added = f.callGroupAddRemove(&groupToOlt)
+	}
+	if membersToBeRemoved != nil && len(membersToBeRemoved) > 0 {
+		groupToOlt.Command = openoltpb2.Group_REMOVE_MEMBERS
+		groupToOlt.Members = membersToBeRemoved
+		//execute removeMembers
+		removed = f.callGroupAddRemove(&groupToOlt)
+	}
+
+	//save the modified group
+	if added && removed {
+		if err := f.resourceMgr.AddFlowGroupToKVStore(ctx, group, false); err != nil {
+			log.Errorw("Failed to save the group into kv store", log.Fields{"groupId": group.Desc.GroupId})
+		}
+		log.Debugw("modify-group was success. Storing the group", log.Fields{"group": group, "existingGroup": current})
+	} else {
+		log.Warnw("One of the group add/remove operations has failed. Cannot save group modifications",
+			log.Fields{"group": group})
+	}
+	return added && removed
+}
+
+//callGroupAddRemove performs add/remove buckets operation for the indicated group
+func (f *OpenOltFlowMgr) callGroupAddRemove(group *openoltpb2.Group) bool {
+	if err := f.performGroupOperation(group); err != nil {
+		st, _ := status.FromError(err)
+		//ignore already exists error code
+		if st.Code() != codes.AlreadyExists {
+			return false
+		}
+	}
+	return true
+}
+
+//findDiff compares group members and finds members which only exists in groups2
+func (f *OpenOltFlowMgr) findDiff(group1 *openoltpb2.Group, group2 *openoltpb2.Group) []*openoltpb2.GroupMember {
+	var members []*openoltpb2.GroupMember
+	for _, bucket := range group2.Members {
+		if !f.contains(group1.Members, bucket) {
+			// bucket does not exist and must be added
+			members = append(members, bucket)
+		}
+	}
+	return members
+}
+
+//contains returns true if the members list contains the given member; false otherwise
+func (f *OpenOltFlowMgr) contains(members []*openoltpb2.GroupMember, member *openoltpb2.GroupMember) bool {
+	for _, groupMember := range members {
+		if groupMember.InterfaceId == member.InterfaceId {
+			return true
+		}
+	}
+	return false
+}
+
+//performGroupOperation call performGroupOperation operation of openolt proto
+func (f *OpenOltFlowMgr) performGroupOperation(group *openoltpb2.Group) error {
+	log.Debugw("Sending group to device", log.Fields{"groupToOlt": group, "command": group.Command})
+	_, err := f.deviceHandler.Client.PerformGroupOperation(context.Background(), group)
+	if err != nil {
+		log.Errorw("group operation failed", log.Fields{"err": err, "groupToOlt": group})
+	}
+	return err
+}
+
+//buildGroup build openoltpb2.Group from given group id and bucket list
+func (f *OpenOltFlowMgr) buildGroup(groupID uint32, buckets []*ofp.OfpBucket) *openoltpb2.Group {
+	group := openoltpb2.Group{
+		GroupId: groupID}
+	// create members of the group
+	if buckets != nil {
+		for _, ofBucket := range buckets {
+			member := f.buildMember(ofBucket)
+			if member != nil && !f.contains(group.Members, member) {
+				group.Members = append(group.Members, member)
+			}
+		}
+	}
+	return &group
+}
+
+//buildMember builds openoltpb2.GroupMember from an OpenFlow bucket
+func (f *OpenOltFlowMgr) buildMember(ofBucket *ofp.OfpBucket) *openoltpb2.GroupMember {
+	var outPort uint32
+	outPortFound := false
+	for _, ofAction := range ofBucket.Actions {
+		if ofAction.Type == ofp.OfpActionType_OFPAT_OUTPUT {
+			outPort = ofAction.GetOutput().Port
+			outPortFound = true
+		}
+	}
+
+	if !outPortFound {
+		log.Debugw("bucket skipped since no out port found in it",
+			log.Fields{"ofBucket": ofBucket})
+		return nil
+	}
+	interfaceID := IntfIDFromUniPortNum(outPort)
+	log.Debugw("got associated interface id of the port", log.Fields{"portNumber:": outPort, "interfaceId:": interfaceID})
+	if groupInfo, ok := f.interfaceToMcastQueueMap[interfaceID]; ok {
+		member := openoltpb2.GroupMember{
+			InterfaceId:   interfaceID,
+			InterfaceType: openoltpb2.GroupMember_PON,
+			GemPortId:     groupInfo.gemPortID,
+			Priority:      groupInfo.servicePriority,
+		}
+		//add member to the group
+		return &member
+	}
+	log.Warnf("bucket skipped since interface-2-gem mapping cannot be found",
+		log.Fields{"ofBucket": ofBucket})
+	return nil
+}
+
+//sendTPDownloadMsgToChild send payload
+func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32) error {
+
+	onuDevice, err := f.getOnuChildDevice(intfID, onuID)
+	if err != nil {
+		log.Errorw("Error while fetching Child device from core", log.Fields{"onuId": onuID})
+		return err
+	}
+	log.Debugw("Got child device from OLT device handler", log.Fields{"device": *onuDevice})
+
+	tpPath := f.getTPpath(intfID, uni, TpID)
+	tpDownloadMsg := &ic.InterAdapterTechProfileDownloadMessage{UniId: uniID, Path: tpPath}
+	log.Infow("Sending Load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"msg": *tpDownloadMsg})
+	sendErr := f.deviceHandler.AdapterProxy.SendInterAdapterMessage(context.Background(),
+		tpDownloadMsg,
+		ic.InterAdapterMessageType_TECH_PROFILE_DOWNLOAD_REQUEST,
+		f.deviceHandler.deviceType,
+		onuDevice.Type,
+		onuDevice.Id,
+		onuDevice.ProxyAddress.DeviceId, "")
+	if sendErr != nil {
+		log.Errorw("send techprofile-download request error", log.Fields{"fromAdapter": f.deviceHandler.deviceType,
+			"toAdapter": onuDevice.Type, "onuId": onuDevice.Id,
+			"proxyDeviceId": onuDevice.ProxyAddress.DeviceId})
+		return sendErr
+	}
+	log.Debugw("success Sending Load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"msg": tpDownloadMsg})
+	return nil
+}
+
+//UpdateOnuInfo function adds onu info to cache and kvstore
+func (f *OpenOltFlowMgr) UpdateOnuInfo(ctx context.Context, intfID uint32, onuID uint32, serialNum string) {
+
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+	onu := rsrcMgr.OnuGemInfo{OnuID: onuID, SerialNumber: serialNum, IntfID: intfID}
+	f.onuGemInfo[intfID] = append(f.onuGemInfo[intfID], onu)
+	if err := f.resourceMgr.AddOnuGemInfo(ctx, intfID, onu); err != nil {
+		// TODO: VOL-2638
+		log.Errorw("failed to add onu info", log.Fields{"onu": onu})
+		return
+	}
+	log.Debugw("Updated onuinfo", log.Fields{"intfID": intfID, "onuID": onuID, "serialNum": serialNum})
+}
+
+//addGemPortToOnuInfoMap function adds GEMport to ONU map
+func (f *OpenOltFlowMgr) addGemPortToOnuInfoMap(ctx context.Context, intfID uint32, onuID uint32, gemPort uint32) {
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+	onugem := f.onuGemInfo[intfID]
+	// update the gem to the local cache as well as to kv strore
+	for idx, onu := range onugem {
+		if onu.OnuID == onuID {
+			// check if gem already exists , else update the cache and kvstore
+			for _, gem := range onu.GemPorts {
+				if gem == gemPort {
+					log.Debugw("Gem already in cache, no need to update cache and kv store",
+						log.Fields{"gem": gemPort})
+					return
+				}
+			}
+			onugem[idx].GemPorts = append(onugem[idx].GemPorts, gemPort)
+			f.onuGemInfo[intfID] = onugem
+		}
+	}
+	err := f.resourceMgr.AddGemToOnuGemInfo(ctx, intfID, onuID, gemPort)
+	if err != nil {
+		log.Errorw("Failed to add gem to onu", log.Fields{"intfId": intfID, "onuId": onuID, "gemPort": gemPort})
+		return
+	}
+}
+
+// This function Lookup maps  by serialNumber or (intfId, gemPort)
+
+//getOnuIDfromGemPortMap Returns OnuID,nil if found or set 0,error if no onuId is found for serialNumber or (intfId, gemPort)
+func (f *OpenOltFlowMgr) getOnuIDfromGemPortMap(serialNumber string, intfID uint32, gemPortID uint32) (uint32, error) {
+
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+
+	log.Debugw("Getting ONU ID from GEM port and PON port", log.Fields{"serialNumber": serialNumber, "intfId": intfID, "gemPortId": gemPortID})
+	// get onuid from the onugem info cache
+	onugem := f.onuGemInfo[intfID]
+	for _, onu := range onugem {
+		for _, gem := range onu.GemPorts {
+			if gem == gemPortID {
+				return onu.OnuID, nil
+			}
+		}
+	}
+	return uint32(0), NewErrNotFound("onu-id", log.Fields{
+		"serial-number": serialNumber,
+		"interface-id":  intfID,
+		"gem-port-id":   gemPortID},
+		nil).Log()
+}
+
+//GetLogicalPortFromPacketIn function computes logical port UNI/NNI port from packet-in indication and returns the same
+func (f *OpenOltFlowMgr) GetLogicalPortFromPacketIn(ctx context.Context, packetIn *openoltpb2.PacketIndication) (uint32, error) {
+	var logicalPortNum uint32
+	var onuID uint32
+	var err error
+
+	if packetIn.IntfType == "pon" {
+		// packet indication does not have serial number , so sending as nil
+		if onuID, err = f.getOnuIDfromGemPortMap("", packetIn.IntfId, packetIn.GemportId); err != nil {
+			log.Errorw("Unable to get ONU ID from GEM/PON port", log.Fields{"pon port": packetIn.IntfId, "gemport": packetIn.GemportId})
+			return logicalPortNum, err
+		}
+		if packetIn.PortNo != 0 {
+			logicalPortNum = packetIn.PortNo
+		} else {
+			uniID := uint32(0) //  FIXME - multi-uni support
+			logicalPortNum = MkUniPortNum(packetIn.IntfId, onuID, uniID)
+		}
+		// Store the gem port through which the packet_in came. Use the same gem port for packet_out
+		f.UpdateGemPortForPktIn(ctx, packetIn.IntfId, onuID, logicalPortNum, packetIn.GemportId)
+	} else if packetIn.IntfType == "nni" {
+		logicalPortNum = IntfIDToPortNo(packetIn.IntfId, voltha.Port_ETHERNET_NNI)
+	}
+	log.Debugw("Retrieved logicalport from  packet-in", log.Fields{
+		"logicalPortNum": logicalPortNum,
+		"IntfType":       packetIn.IntfType,
+		"packet":         hex.EncodeToString(packetIn.Pkt),
+	})
+	return logicalPortNum, nil
+}
+
+//GetPacketOutGemPortID returns gemPortId
+func (f *OpenOltFlowMgr) GetPacketOutGemPortID(ctx context.Context, intfID uint32, onuID uint32, portNum uint32) (uint32, error) {
+	var gemPortID uint32
+	var err error
+
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+	pktInkey := rsrcMgr.PacketInInfoKey{IntfID: intfID, OnuID: onuID, LogicalPort: portNum}
+
+	gemPortID, ok := f.packetInGemPort[pktInkey]
+	if ok {
+		log.Debugw("Found gemport for pktin key", log.Fields{"pktinkey": pktInkey, "gem": gemPortID})
+		return gemPortID, err
+	}
+	//If gem is not found in cache try to get it from kv store, if found in kv store, update the cache and return.
+	gemPortID, err = f.resourceMgr.GetGemPortFromOnuPktIn(ctx, intfID, onuID, portNum)
+	if err == nil {
+		if gemPortID != 0 {
+			f.packetInGemPort[pktInkey] = gemPortID
+			log.Debugw("Found gem port from kv store and updating cache with gemport",
+				log.Fields{"pktinkey": pktInkey, "gem": gemPortID})
+			return gemPortID, nil
+		}
+	}
+	log.Errorw("Failed to get gemport", log.Fields{"pktinkey": pktInkey, "gem": gemPortID})
+	return uint32(0), err
+}
+
+func installFlowOnAllGemports(ctx context.Context,
+	f1 func(ctx context.Context, intfId uint32, onuId uint32, uniId uint32,
+		portNo uint32, classifier map[string]interface{}, action map[string]interface{},
+		logicalFlow *ofp.OfpFlowStats, allocId uint32, gemPortId uint32) error,
+	f2 func(ctx context.Context, intfId uint32, onuId uint32, uniId uint32, portNo uint32,
+		classifier map[string]interface{}, action map[string]interface{},
+		logicalFlow *ofp.OfpFlowStats, allocId uint32, gemPortId uint32, vlanId uint32,
+	) error,
+	args map[string]uint32,
+	classifier map[string]interface{}, action map[string]interface{},
+	logicalFlow *ofp.OfpFlowStats,
+	gemPorts []uint32,
+	TpInst *tp.TechProfile,
+	FlowType string,
+	vlanID ...uint32) {
+	log.Debugw("Installing flow on all GEM ports", log.Fields{"FlowType": FlowType, "gemPorts": gemPorts, "vlan": vlanID})
+
+	for _, gemPortAttribute := range TpInst.UpstreamGemPortAttributeList {
+		var gemPortID uint32
+		// The bit mapping for a gemport is expressed in tech-profile as a binary string. For example, 0b00000001
+		// We need to trim prefix "0b", before further processing
+		// Once the "0b" prefix is trimmed, we iterate each character in the string to identify which index
+		// in the string is set to binary bit 1 (expressed as char '1' in the binary string).
+		for pos, pbitSet := range strings.TrimPrefix(gemPortAttribute.PbitMap, BinaryStringPrefix) {
+			// If a particular character in the string is set to '1', identify the index of this character from
+			// the LSB position which marks the PCP bit consumed by the given gem port.
+			// This PCP bit now becomes a classifier in the flow.
+			if pbitSet == BinaryBit1 {
+				classifier[VlanPcp] = uint32(len(strings.TrimPrefix(gemPortAttribute.PbitMap, BinaryStringPrefix))) - 1 - uint32(pos)
+				gemPortID = gemPortAttribute.GemportID
+				if FlowType == HsiaFlow || FlowType == DhcpFlow || FlowType == IgmpFlow {
+					f1(ctx, args["intfId"], args["onuId"], args["uniId"], args["portNo"], classifier, action, logicalFlow, args["allocId"], gemPortID)
+				} else if FlowType == EapolFlow {
+					f2(ctx, args["intfId"], args["onuId"], args["uniId"], args["portNo"], classifier, action, logicalFlow, args["allocId"], gemPortID, vlanID[0])
+				} else {
+					log.Errorw("Unrecognized Flow Type", log.Fields{"FlowType": FlowType})
+					return
+				}
+			}
+		}
+	}
+}
+
+func (f *OpenOltFlowMgr) addDHCPTrapFlowOnNNI(ctx context.Context, logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) error {
+	log.Debug("Adding trap-dhcp-of-nni-flow")
+	action := make(map[string]interface{})
+	classifier[PacketTagType] = DoubleTag
+	action[TrapToHost] = true
+	/* We manage flowId resource pool on per PON port basis.
+	   Since this situation is tricky, as a hack, we pass the NNI port
+	   index (network_intf_id) as PON port Index for the flowId resource
+	   pool. Also, there is no ONU Id available for trapping DHCP packets
+	   on NNI port, use onu_id as -1 (invalid)
+	   ****************** CAVEAT *******************
+	   This logic works if the NNI Port Id falls within the same valid
+	   range of PON Port Ids. If this doesn't work for some OLT Vendor
+	   we need to have a re-look at this.
+	   *********************************************
+	*/
+	onuID := -1
+	uniID := -1
+	gemPortID := -1
+	allocID := -1
+	networkInterfaceID, err := getNniIntfID(classifier, action)
+	if err != nil {
+		return NewErrNotFound("nni-intreface-id", log.Fields{
+			"classifier": classifier,
+			"action":     action},
+			err).Log()
+	}
+
+	flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("Flow-exists-not-re-adding")
+		return nil
+	}
+	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0)
+	if err != nil {
+		return NewErrNotFound("dhcp-trap-nni-flow-id", log.Fields{
+			"interface-id": networkInterfaceID,
+			"onu-id":       onuID,
+			"uni-id":       uniID,
+			"gem-port-id":  gemPortID,
+			"cookie":       flowStoreCookie},
+			err).Log()
+	}
+	classifierProto, err := makeOpenOltClassifierField(classifier)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
+	}
+	log.Debugw("Created classifier proto", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(action)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": action}, err).Log()
+	}
+	log.Debugw("Created action proto", log.Fields{"action": *actionProto})
+	downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
+		OnuId:         int32(onuID), // OnuId not required
+		UniId:         int32(uniID), // UniId not used
+		FlowId:        flowID,
+		FlowType:      Downstream,
+		AllocId:       int32(allocID), // AllocId not used
+		NetworkIntfId: int32(networkInterfaceID),
+		GemportId:     int32(gemPortID), // GemportId not used
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+	if err := f.addFlowToDevice(ctx, logicalFlow, &downstreamflow); err != nil {
+		return NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err).Log()
+	}
+	log.Debug("DHCP trap on NNI flow added to device successfully")
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, logicalFlow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
+		int32(onuID),
+		int32(uniID),
+		flowID, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", flowID, log.Fields{"flow": downstreamflow}, err).Log()
+	}
+	return nil
+}
+
+//getPacketTypeFromClassifiers finds and returns packet type of a flow by checking flow classifiers
+func getPacketTypeFromClassifiers(classifierInfo map[string]interface{}) string {
+	var packetType string
+	ovid, ivid := false, false
+	if vlanID, ok := classifierInfo[VlanVid].(uint32); ok {
+		vid := vlanID & VlanvIDMask
+		if vid != ReservedVlan {
+			ovid = true
+		}
+	}
+	if metadata, ok := classifierInfo[Metadata].(uint64); ok {
+		vid := uint32(metadata)
+		if vid != ReservedVlan {
+			ivid = true
+		}
+	}
+	if ovid && ivid {
+		packetType = DoubleTag
+	} else if !ovid && !ivid {
+		packetType = Untagged
+	} else {
+		packetType = SingleTag
+	}
+	return packetType
+}
+
+//addIgmpTrapFlowOnNNI adds a trap-to-host flow on NNI
+func (f *OpenOltFlowMgr) addIgmpTrapFlowOnNNI(ctx context.Context, logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) error {
+	log.Debugw("Adding igmp-trap-of-nni-flow", log.Fields{"classifierInfo": classifier})
+	action := make(map[string]interface{})
+	classifier[PacketTagType] = getPacketTypeFromClassifiers(classifier)
+	action[TrapToHost] = true
+	/* We manage flowId resource pool on per PON port basis.
+	   Since this situation is tricky, as a hack, we pass the NNI port
+	   index (network_intf_id) as PON port Index for the flowId resource
+	   pool. Also, there is no ONU Id available for trapping packets
+	   on NNI port, use onu_id as -1 (invalid)
+	   ****************** CAVEAT *******************
+	   This logic works if the NNI Port Id falls within the same valid
+	   range of PON Port Ids. If this doesn't work for some OLT Vendor
+	   we need to have a re-look at this.
+	   *********************************************
+	*/
+	onuID := -1
+	uniID := -1
+	gemPortID := -1
+	allocID := -1
+	networkInterfaceID, err := getNniIntfID(classifier, action)
+	if err != nil {
+		return NewErrNotFound("nni-interface-id", log.Fields{
+			"classifier": classifier,
+			"action":     action},
+			err).Log()
+	}
+	flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+	if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
+		log.Debug("igmp-flow-exists-not-re-adding")
+		return nil
+	}
+	flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
+	if err != nil {
+		return NewErrNotFound("igmp-flow-id", log.Fields{
+			"interface-id": networkInterfaceID,
+			"onu-id":       onuID,
+			"uni-id":       uniID,
+			"gem-port-id":  gemPortID,
+			"cookie":       flowStoreCookie},
+			err).Log()
+	}
+	classifierProto, err := makeOpenOltClassifierField(classifier)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
+	}
+	log.Debugw("Created classifier proto for the IGMP flow", log.Fields{"classifier": *classifierProto})
+	actionProto, err := makeOpenOltActionField(action)
+	if err != nil {
+		return NewErrInvalidValue(log.Fields{"action": action}, err).Log()
+	}
+	log.Debugw("Created action proto for the IGMP flow", log.Fields{"action": *actionProto})
+	downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
+		OnuId:         int32(onuID), // OnuId not required
+		UniId:         int32(uniID), // UniId not used
+		FlowId:        flowID,
+		FlowType:      Downstream,
+		AllocId:       int32(allocID), // AllocId not used
+		NetworkIntfId: int32(networkInterfaceID),
+		GemportId:     int32(gemPortID), // GemportId not used
+		Classifier:    classifierProto,
+		Action:        actionProto,
+		Priority:      int32(logicalFlow.Priority),
+		Cookie:        logicalFlow.Cookie,
+		PortNo:        portNo}
+	if err := f.addFlowToDevice(ctx, logicalFlow, &downstreamflow); err != nil {
+		return NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err).Log()
+	}
+	log.Debug("IGMP Trap on NNI flow added to device successfully")
+	flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, logicalFlow.Id)
+	if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
+		int32(onuID),
+		int32(uniID),
+		flowID, flowsToKVStore); err != nil {
+		return NewErrPersistence("update", "flow", flowID, log.Fields{"flow": downstreamflow}, err).Log()
+	}
+	return nil
+}
+
+func verifyMeterIDAndGetDirection(MeterID uint32, Dir tp_pb.Direction) (string, error) {
+	if MeterID == 0 { // This should never happen
+		return "", NewErrInvalidValue(log.Fields{"meter-id": MeterID}, nil).Log()
+	}
+	if Dir == tp_pb.Direction_UPSTREAM {
+		return "upstream", nil
+	} else if Dir == tp_pb.Direction_DOWNSTREAM {
+		return "downstream", nil
+	}
+	return "", nil
+}
+
+func (f *OpenOltFlowMgr) checkAndAddFlow(ctx context.Context, args map[string]uint32, classifierInfo map[string]interface{},
+	actionInfo map[string]interface{}, flow *ofp.OfpFlowStats, TpInst *tp.TechProfile, gemPorts []uint32,
+	TpID uint32, uni string) {
+	var gemPort uint32
+	intfID := args[IntfID]
+	onuID := args[OnuID]
+	uniID := args[UniID]
+	portNo := args[PortNo]
+	allocID := TpInst.UsScheduler.AllocID
+	if ipProto, ok := classifierInfo[IPProto]; ok {
+		if ipProto.(uint32) == IPProtoDhcp {
+			log.Info("Adding DHCP flow")
+			if pcp, ok := classifierInfo[VlanPcp]; ok {
+				gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+					tp_pb.Direction_UPSTREAM,
+					pcp.(uint32))
+				//Adding DHCP upstream flow
+				f.addDHCPTrapFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort)
+			} else {
+				//Adding DHCP upstream flow to all gemports
+				installFlowOnAllGemports(ctx, f.addDHCPTrapFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, DhcpFlow)
+			}
+
+		} else if ipProto == IgmpProto {
+			log.Infow("Adding Us IGMP flow", log.Fields{"intfID": intfID, "onuID": onuID, "uniID": uniID, "classifierInfo:": classifierInfo})
+			if pcp, ok := classifierInfo[VlanPcp]; ok {
+				gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+					tp_pb.Direction_UPSTREAM,
+					pcp.(uint32))
+				f.addIGMPTrapFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort)
+			} else {
+				//Adding IGMP upstream flow to all gem ports
+				installFlowOnAllGemports(ctx, f.addIGMPTrapFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, IgmpFlow)
+			}
+		} else {
+			log.Errorw("Invalid-Classifier-to-handle", log.Fields{"classifier": classifierInfo, "action": actionInfo})
+			return
+		}
+	} else if ethType, ok := classifierInfo[EthType]; ok {
+		if ethType.(uint32) == EapEthType {
+			log.Info("Adding EAPOL flow")
+			var vlanID uint32
+			if val, ok := classifierInfo[VlanVid]; ok {
+				vlanID = (val.(uint32)) & VlanvIDMask
+			} else {
+				vlanID = DefaultMgmtVlan
+			}
+			if pcp, ok := classifierInfo[VlanPcp]; ok {
+				gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+					tp_pb.Direction_UPSTREAM,
+					pcp.(uint32))
+
+				f.addEAPOLFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort, vlanID)
+			} else {
+				installFlowOnAllGemports(ctx, nil, f.addEAPOLFlow, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, EapolFlow, vlanID)
+			}
+		}
+	} else if _, ok := actionInfo[PushVlan]; ok {
+		log.Info("Adding upstream data rule")
+		if pcp, ok := classifierInfo[VlanPcp]; ok {
+			gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+				tp_pb.Direction_UPSTREAM,
+				pcp.(uint32))
+			//Adding HSIA upstream flow
+			f.addUpstreamDataFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort)
+		} else {
+			//Adding HSIA upstream flow to all gemports
+			installFlowOnAllGemports(ctx, f.addUpstreamDataFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, HsiaFlow)
+		}
+	} else if _, ok := actionInfo[PopVlan]; ok {
+		log.Info("Adding Downstream data rule")
+		if pcp, ok := classifierInfo[VlanPcp]; ok {
+			gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+				tp_pb.Direction_DOWNSTREAM,
+				pcp.(uint32))
+			//Adding HSIA downstream flow
+			f.addDownstreamDataFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort)
+		} else {
+			//Adding HSIA downstream flow to all gemports
+			installFlowOnAllGemports(ctx, f.addDownstreamDataFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, HsiaFlow)
+		}
+	} else {
+		log.Errorw("Invalid-flow-type-to-handle", log.Fields{"classifier": classifierInfo, "action": actionInfo, "flow": flow})
+		return
+	}
+	// Send Techprofile download event to child device in go routine as it takes time
+	go f.sendTPDownloadMsgToChild(intfID, onuID, uniID, uni, TpID)
+}
+
+func (f *OpenOltFlowMgr) isGemPortUsedByAnotherFlow(gemPK gemPortKey) bool {
+	flowIDList := f.flowsUsedByGemPort[gemPK]
+	if len(flowIDList) > 1 {
+		return true
+	}
+	return false
+}
+
+func (f *OpenOltFlowMgr) isTechProfileUsedByAnotherGem(ctx context.Context, ponIntf uint32, onuID uint32, uniID uint32, tpID uint32, tpInst *tp.TechProfile, gemPortID uint32) (bool, uint32) {
+	currentGemPorts := f.resourceMgr.GetCurrentGEMPortIDsForOnu(ctx, ponIntf, onuID, uniID)
+	tpGemPorts := tpInst.UpstreamGemPortAttributeList
+	for _, currentGemPort := range currentGemPorts {
+		for _, tpGemPort := range tpGemPorts {
+			if (currentGemPort == tpGemPort.GemportID) && (currentGemPort != gemPortID) {
+				return true, currentGemPort
+			}
+		}
+	}
+	if tpInst.InstanceCtrl.Onu == "single-instance" {
+		// The TP information for the given TP ID, PON ID, ONU ID, UNI ID should be removed.
+		f.resourceMgr.RemoveTechProfileIDForOnu(ctx, ponIntf, uint32(onuID), uint32(uniID), tpID)
+		f.DeleteTechProfileInstance(ctx, ponIntf, uint32(onuID), uint32(uniID), "", tpID)
+
+		// Although we cleaned up TP Instance for the given (PON ID, ONU ID, UNI ID), the TP might
+		// still be used on other uni ports.
+		// So, we need to check and make sure that no other gem port is referring to the given TP ID
+		// on any other uni port.
+		tpInstances := f.techprofile[ponIntf].FindAllTpInstances(ctx, tpID, ponIntf, onuID)
+		log.Debugw("got single instance tp instances", log.Fields{"tpInstances": tpInstances})
+		for i := 0; i < len(tpInstances); i++ {
+			tpI := tpInstances[i]
+			tpGemPorts := tpI.UpstreamGemPortAttributeList
+			for _, tpGemPort := range tpGemPorts {
+				if tpGemPort.GemportID != gemPortID {
+					log.Debugw("single instance tp is in use by gem", log.Fields{"gemPort": tpGemPort.GemportID})
+					return true, tpGemPort.GemportID
+				}
+			}
+		}
+	}
+	log.Debug("tech profile is not in use by any gem")
+	return false, 0
+}
+
+func formulateClassifierInfoFromFlow(classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) {
+	for _, field := range flows.GetOfbFields(flow) {
+		if field.Type == flows.ETH_TYPE {
+			classifierInfo[EthType] = field.GetEthType()
+			log.Debug("field-type-eth-type", log.Fields{"classifierInfo[ETH_TYPE]": classifierInfo[EthType].(uint32)})
+		} else if field.Type == flows.ETH_DST {
+			classifierInfo[EthDst] = field.GetEthDst()
+			log.Debug("field-type-eth-type", log.Fields{"classifierInfo[ETH_DST]": classifierInfo[EthDst].([]uint8)})
+		} else if field.Type == flows.IP_PROTO {
+			classifierInfo[IPProto] = field.GetIpProto()
+			log.Debug("field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
+		} else if field.Type == flows.IN_PORT {
+			classifierInfo[InPort] = field.GetPort()
+			log.Debug("field-type-in-port", log.Fields{"classifierInfo[IN_PORT]": classifierInfo[InPort].(uint32)})
+		} else if field.Type == flows.VLAN_VID {
+			classifierInfo[VlanVid] = field.GetVlanVid() & 0xfff
+			log.Debug("field-type-vlan-vid", log.Fields{"classifierInfo[VLAN_VID]": classifierInfo[VlanVid].(uint32)})
+		} else if field.Type == flows.VLAN_PCP {
+			classifierInfo[VlanPcp] = field.GetVlanPcp()
+			log.Debug("field-type-vlan-pcp", log.Fields{"classifierInfo[VLAN_PCP]": classifierInfo[VlanPcp].(uint32)})
+		} else if field.Type == flows.UDP_DST {
+			classifierInfo[UDPDst] = field.GetUdpDst()
+			log.Debug("field-type-udp-dst", log.Fields{"classifierInfo[UDP_DST]": classifierInfo[UDPDst].(uint32)})
+		} else if field.Type == flows.UDP_SRC {
+			classifierInfo[UDPSrc] = field.GetUdpSrc()
+			log.Debug("field-type-udp-src", log.Fields{"classifierInfo[UDP_SRC]": classifierInfo[UDPSrc].(uint32)})
+		} else if field.Type == flows.IPV4_DST {
+			classifierInfo[Ipv4Dst] = field.GetIpv4Dst()
+			log.Debug("field-type-ipv4-dst", log.Fields{"classifierInfo[IPV4_DST]": classifierInfo[Ipv4Dst].(uint32)})
+		} else if field.Type == flows.IPV4_SRC {
+			classifierInfo[Ipv4Src] = field.GetIpv4Src()
+			log.Debug("field-type-ipv4-src", log.Fields{"classifierInfo[IPV4_SRC]": classifierInfo[Ipv4Src].(uint32)})
+		} else if field.Type == flows.METADATA {
+			classifierInfo[Metadata] = field.GetTableMetadata()
+			log.Debug("field-type-metadata", log.Fields{"classifierInfo[Metadata]": classifierInfo[Metadata].(uint64)})
+		} else if field.Type == flows.TUNNEL_ID {
+			classifierInfo[TunnelID] = field.GetTunnelId()
+			log.Debug("field-type-tunnelId", log.Fields{"classifierInfo[TUNNEL_ID]": classifierInfo[TunnelID].(uint64)})
+		} else {
+			log.Errorw("Un supported field type", log.Fields{"type": field.Type})
+			return
+		}
+	}
+}
+
+func formulateActionInfoFromFlow(actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+	for _, action := range flows.GetActions(flow) {
+		if action.Type == flows.OUTPUT {
+			if out := action.GetOutput(); out != nil {
+				actionInfo[Output] = out.GetPort()
+				log.Debugw("action-type-output", log.Fields{"out_port": actionInfo[Output].(uint32)})
+			} else {
+				return NewErrInvalidValue(log.Fields{"output-port": nil}, nil).Log()
+			}
+		} else if action.Type == flows.POP_VLAN {
+			actionInfo[PopVlan] = true
+			log.Debugw("action-type-pop-vlan", log.Fields{"in_port": classifierInfo[InPort].(uint32)})
+		} else if action.Type == flows.PUSH_VLAN {
+			if out := action.GetPush(); out != nil {
+				if tpid := out.GetEthertype(); tpid != 0x8100 {
+					log.Errorw("Invalid ethertype in push action", log.Fields{"ethertype": actionInfo[PushVlan].(int32)})
+				} else {
+					actionInfo[PushVlan] = true
+					actionInfo[TPID] = tpid
+					log.Debugw("action-type-push-vlan",
+						log.Fields{"push_tpid": actionInfo[TPID].(uint32), "in_port": classifierInfo[InPort].(uint32)})
+				}
+			}
+		} else if action.Type == flows.SET_FIELD {
+			if out := action.GetSetField(); out != nil {
+				if field := out.GetField(); field != nil {
+					if ofClass := field.GetOxmClass(); ofClass != ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
+						return NewErrInvalidValue(log.Fields{"openflow-class": ofClass}, nil).Log()
+					}
+					/*log.Debugw("action-type-set-field",log.Fields{"field": field, "in_port": classifierInfo[IN_PORT].(uint32)})*/
+					formulateSetFieldActionInfoFromFlow(field, actionInfo)
+				}
+			}
+		} else if action.Type == flows.GROUP {
+			formulateGroupActionInfoFromFlow(action, actionInfo)
+		} else {
+			return NewErrInvalidValue(log.Fields{"action-type": action.Type}, nil).Log()
+		}
+	}
+	return nil
+}
+
+func formulateSetFieldActionInfoFromFlow(field *ofp.OfpOxmField, actionInfo map[string]interface{}) {
+	if ofbField := field.GetOfbField(); ofbField != nil {
+		if fieldtype := ofbField.GetType(); fieldtype == ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID {
+			if vlan := ofbField.GetVlanVid(); vlan != 0 {
+				actionInfo[VlanVid] = vlan & 0xfff
+				log.Debugw("action-set-vlan-vid", log.Fields{"actionInfo[VLAN_VID]": actionInfo[VlanVid].(uint32)})
+			} else {
+				log.Error("No Invalid vlan id in set vlan-vid action")
+			}
+		} else {
+			log.Errorw("unsupported-action-set-field-type", log.Fields{"type": fieldtype})
+		}
+	}
+}
+
+func formulateGroupActionInfoFromFlow(action *ofp.OfpAction, actionInfo map[string]interface{}) {
+	if action.GetGroup() == nil {
+		log.Warn("No group entry found in the group action")
+	} else {
+		actionInfo[GroupID] = action.GetGroup().GroupId
+		log.Debugw("action-group-id", log.Fields{"actionInfo[GroupID]": actionInfo[GroupID].(uint32)})
+	}
+}
+
+func formulateControllerBoundTrapFlowInfo(actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+	if isControllerFlow := IsControllerBoundFlow(actionInfo[Output].(uint32)); isControllerFlow {
+		log.Debug("Controller bound trap flows, getting inport from tunnelid")
+		/* Get UNI port/ IN Port from tunnel ID field for upstream controller bound flows  */
+		if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_PON_OLT {
+			if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
+				classifierInfo[InPort] = uniPort
+				log.Debugw("upstream pon-to-controller-flow,inport-in-tunnelid", log.Fields{"newInPort": classifierInfo[InPort].(uint32), "outPort": actionInfo[Output].(uint32)})
+			} else {
+				return NewErrNotFound("child-in-port", log.Fields{
+					"reason": "upstream pon-to-controller-flow, NO-inport-in-tunnelid",
+					"flow":   flow}, nil).Log()
+			}
+		}
+	} else {
+		log.Debug("Non-Controller flows, getting uniport from tunnelid")
+		// Downstream flow from NNI to PON port , Use tunnel ID as new OUT port / UNI port
+		if portType := IntfIDToPortTypeName(actionInfo[Output].(uint32)); portType == voltha.Port_PON_OLT {
+			if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
+				actionInfo[Output] = uniPort
+				log.Debugw("downstream-nni-to-pon-port-flow, outport-in-tunnelid", log.Fields{"newOutPort": actionInfo[Output].(uint32), "outPort": actionInfo[Output].(uint32)})
+			} else {
+				return NewErrNotFound("out-port", log.Fields{
+					"reason": "downstream-nni-to-pon-port-flow, no-outport-in-tunnelid",
+					"flow":   flow}, nil).Log()
+			}
+			// Upstream flow from PON to NNI port , Use tunnel ID as new IN port / UNI port
+		} else if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_PON_OLT {
+			if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
+				classifierInfo[InPort] = uniPort
+				log.Debugw("upstream-pon-to-nni-port-flow, inport-in-tunnelid", log.Fields{"newInPort": actionInfo[Output].(uint32),
+					"outport": actionInfo[Output].(uint32)})
+			} else {
+				return NewErrNotFound("nni-port", log.Fields{
+					"reason":   "upstream-pon-to-nni-port-flow, no-inport-in-tunnelid",
+					"in-port":  classifierInfo[InPort].(uint32),
+					"out-port": actionInfo[Output].(uint32),
+					"flow":     flow}, nil).Log()
+			}
+		}
+	}
+	return nil
+}
+
+func getTpIDFromFlow(flow *ofp.OfpFlowStats) (uint32, error) {
+	/*     Metadata 8 bytes:
+		   Most Significant 2 Bytes = Inner VLAN
+		   Next 2 Bytes = Tech Profile ID(TPID)
+		   Least Significant 4 Bytes = Port ID
+	       Flow Metadata carries Tech-Profile (TP) ID and is mandatory in all
+	       subscriber related flows.
+	*/
+	metadata := flows.GetMetadataFromWriteMetadataAction(flow)
+	if metadata == 0 {
+		return 0, NewErrNotFound("metadata", log.Fields{"flow": flow}, nil).Log()
+	}
+	TpID := flows.GetTechProfileIDFromWriteMetaData(metadata)
+	return uint32(TpID), nil
+}
+
+func appendUnique(slice []uint32, item uint32) []uint32 {
+	for _, sliceElement := range slice {
+		if sliceElement == item {
+			return slice
+		}
+	}
+	return append(slice, item)
+}
+
+// getNniIntfID gets nni intf id from the flow classifier/action
+func getNniIntfID(classifier map[string]interface{}, action map[string]interface{}) (uint32, error) {
+
+	portType := IntfIDToPortTypeName(classifier[InPort].(uint32))
+	if portType == voltha.Port_PON_OLT {
+		intfID, err := IntfIDFromNniPortNum(action[Output].(uint32))
+		if err != nil {
+			log.Debugw("invalid-action-port-number",
+				log.Fields{
+					"port-number": action[Output].(uint32),
+					"error":       err})
+			return uint32(0), err
+		}
+		log.Debugw("output Nni IntfID is", log.Fields{"intfid": intfID})
+		return intfID, nil
+	} else if portType == voltha.Port_ETHERNET_NNI {
+		intfID, err := IntfIDFromNniPortNum(classifier[InPort].(uint32))
+		if err != nil {
+			log.Debugw("invalid-classifier-port-number",
+				log.Fields{
+					"port-number": action[Output].(uint32),
+					"error":       err})
+			return uint32(0), err
+		}
+		log.Debugw("input Nni IntfID is", log.Fields{"intfid": intfID})
+		return intfID, nil
+	}
+	return uint32(0), nil
+}
+
+// UpdateGemPortForPktIn updates gemport for packet-in in to the cache and to the kv store as well.
+func (f *OpenOltFlowMgr) UpdateGemPortForPktIn(ctx context.Context, intfID uint32, onuID uint32, logicalPort uint32, gemPort uint32) {
+	pktInkey := rsrcMgr.PacketInInfoKey{IntfID: intfID, OnuID: onuID, LogicalPort: logicalPort}
+
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+	lookupGemPort, ok := f.packetInGemPort[pktInkey]
+	if ok {
+		if lookupGemPort == gemPort {
+			log.Debugw("pktin key/value found in cache , no need to update kv as we are assuming both will be in sync",
+				log.Fields{"pktinkey": pktInkey, "gem": gemPort})
+			return
+		}
+	}
+	f.packetInGemPort[pktInkey] = gemPort
+
+	f.resourceMgr.UpdateGemPortForPktIn(ctx, pktInkey, gemPort)
+	log.Debugw("pktin key not found in local cache or value is different. updating cache and kv store", log.Fields{"pktinkey": pktInkey, "gem": gemPort})
+	return
+}
+
+// AddUniPortToOnuInfo adds uni port to the onugem info both in cache and kvstore.
+func (f *OpenOltFlowMgr) AddUniPortToOnuInfo(ctx context.Context, intfID uint32, onuID uint32, portNum uint32) {
+
+	f.lockCache.Lock()
+	defer f.lockCache.Unlock()
+	onugem := f.onuGemInfo[intfID]
+	for idx, onu := range onugem {
+		if onu.OnuID == onuID {
+			for _, uni := range onu.UniPorts {
+				if uni == portNum {
+					log.Debugw("uni already in cache, no need to update cache and kv store",
+						log.Fields{"uni": portNum})
+					return
+				}
+			}
+			onugem[idx].UniPorts = append(onugem[idx].UniPorts, portNum)
+			f.onuGemInfo[intfID] = onugem
+		}
+	}
+	f.resourceMgr.AddUniPortToOnuInfo(ctx, intfID, onuID, portNum)
+}
+
+func (f *OpenOltFlowMgr) loadFlowIDlistForGem(ctx context.Context, intf uint32) {
+	flowIDsList, err := f.resourceMgr.GetFlowIDsGemMapForInterface(ctx, intf)
+	if err != nil {
+		log.Error("Failed to get flowid list per gem", log.Fields{"intf": intf})
+		return
+	}
+	for gem, FlowIDs := range flowIDsList {
+		gemPK := gemPortKey{intf, uint32(gem)}
+		f.flowsUsedByGemPort[gemPK] = FlowIDs
+	}
+	return
+}
+
+//loadInterfaceToMulticastQueueMap reads multicast queues per interface from the KV store
+//and put them into interfaceToMcastQueueMap.
+func (f *OpenOltFlowMgr) loadInterfaceToMulticastQueueMap(ctx context.Context) {
+	storedMulticastQueueMap, err := f.resourceMgr.GetMcastQueuePerInterfaceMap(ctx)
+	if err != nil {
+		log.Error("Failed to get pon interface to multicast queue map")
+		return
+	}
+	for intf, queueInfo := range storedMulticastQueueMap {
+		q := queueInfoBrief{
+			gemPortID:       queueInfo[0],
+			servicePriority: queueInfo[1],
+		}
+		f.interfaceToMcastQueueMap[intf] = &q
+	}
+}
+
+//GetFlowGroupFromKVStore fetches and returns flow group from the KV store. Returns (nil, false, error) if any problem occurs during
+//fetching the data. Returns (group, true, nil) if the group is fetched and returned successfully.
+//Returns (nil, false, nil) if the group does not exists in the KV store.
+func (f *OpenOltFlowMgr) GetFlowGroupFromKVStore(ctx context.Context, groupID uint32, cached bool) (*ofp.OfpGroupEntry, bool, error) {
+	exists, groupInfo, err := f.resourceMgr.GetFlowGroupFromKVStore(ctx, groupID, cached)
+	if err != nil {
+		return nil, false, NewErrNotFound("flow-group", log.Fields{"group-id": groupID}, err).Log()
+	}
+	if exists {
+		return newGroup(groupInfo.GroupID, groupInfo.OutPorts), exists, nil
+	}
+	return nil, exists, nil
+}
+
+func newGroup(groupID uint32, outPorts []uint32) *ofp.OfpGroupEntry {
+	groupDesc := ofp.OfpGroupDesc{
+		Type:    ofp.OfpGroupType_OFPGT_ALL,
+		GroupId: groupID,
+	}
+	groupEntry := ofp.OfpGroupEntry{
+		Desc: &groupDesc,
+	}
+	for i := 0; i < len(outPorts); i++ {
+		var acts []*ofp.OfpAction
+		acts = append(acts, flows.Output(outPorts[i]))
+		bucket := ofp.OfpBucket{
+			Actions: acts,
+		}
+		groupDesc.Buckets = append(groupDesc.Buckets, &bucket)
+	}
+	return &groupEntry
+}
diff --git a/internal/pkg/core/openolt_flowmgr_test.go b/internal/pkg/core/openolt_flowmgr_test.go
new file mode 100644
index 0000000..2890b19
--- /dev/null
+++ b/internal/pkg/core/openolt_flowmgr_test.go
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	fu "github.com/opencord/voltha-lib-go/v3/pkg/flows"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp "github.com/opencord/voltha-lib-go/v3/pkg/techprofile"
+	"github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
+	rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
+	"github.com/opencord/voltha-openolt-adapter/pkg/mocks"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/openolt"
+	openoltpb2 "github.com/opencord/voltha-protos/v3/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+)
+
+var flowMgr *OpenOltFlowMgr
+
+func init() {
+	log.SetDefaultLogger(log.JSON, log.DebugLevel, nil)
+	flowMgr = newMockFlowmgr()
+}
+func newMockResourceMgr() *resourcemanager.OpenOltResourceMgr {
+	ranges := []*openolt.DeviceInfo_DeviceResourceRanges{
+		{IntfIds: []uint32{0, 1, 2}}}
+
+	deviceinfo := &openolt.DeviceInfo{Vendor: "openolt", Model: "openolt", HardwareVersion: "1.0", FirmwareVersion: "1.0",
+		DeviceId: "olt", DeviceSerialNumber: "openolt", PonPorts: 3, Technology: "Default",
+		OnuIdStart: 1, OnuIdEnd: 1, AllocIdStart: 1, AllocIdEnd: 1,
+		GemportIdStart: 1, GemportIdEnd: 1, FlowIdStart: 1, FlowIdEnd: 1,
+		Ranges: ranges,
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	rsrMgr := resourcemanager.NewResourceMgr(ctx, "olt", "127.0.0.1:2379", "etcd", "olt", deviceinfo)
+	for key := range rsrMgr.ResourceMgrs {
+		rsrMgr.ResourceMgrs[key].KVStore = &db.Backend{}
+		rsrMgr.ResourceMgrs[key].KVStore.Client = &mocks.MockKVClient{}
+		rsrMgr.ResourceMgrs[key].TechProfileMgr = mocks.MockTechProfile{TpID: key}
+	}
+	return rsrMgr
+}
+
+func newMockFlowmgr() *OpenOltFlowMgr {
+	rMgr := newMockResourceMgr()
+	dh := newMockDeviceHandler()
+
+	rMgr.KVStore = &db.Backend{}
+	rMgr.KVStore.Client = &mocks.MockKVClient{}
+
+	dh.resourceMgr = rMgr
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	flwMgr := NewFlowManager(ctx, dh, rMgr)
+
+	onuGemInfo1 := make([]rsrcMgr.OnuGemInfo, 2)
+	onuGemInfo2 := make([]rsrcMgr.OnuGemInfo, 2)
+	onuGemInfo1[0] = rsrcMgr.OnuGemInfo{OnuID: 1, SerialNumber: "1", IntfID: 1, GemPorts: []uint32{1}}
+	onuGemInfo2[1] = rsrcMgr.OnuGemInfo{OnuID: 2, SerialNumber: "2", IntfID: 2, GemPorts: []uint32{2}}
+	flwMgr.onuGemInfo[1] = onuGemInfo1
+	flwMgr.onuGemInfo[2] = onuGemInfo2
+
+	packetInGemPort := make(map[rsrcMgr.PacketInInfoKey]uint32)
+	packetInGemPort[rsrcMgr.PacketInInfoKey{IntfID: 1, OnuID: 1, LogicalPort: 1}] = 1
+	packetInGemPort[rsrcMgr.PacketInInfoKey{IntfID: 2, OnuID: 2, LogicalPort: 2}] = 2
+
+	flwMgr.packetInGemPort = packetInGemPort
+	tps := make(map[uint32]tp.TechProfileIf)
+	for key := range rMgr.ResourceMgrs {
+		tps[key] = mocks.MockTechProfile{TpID: key}
+	}
+	flwMgr.techprofile = tps
+
+	interface2mcastQeueuMap := make(map[uint32]*queueInfoBrief)
+	interface2mcastQeueuMap[0] = &queueInfoBrief{
+		gemPortID:       4000,
+		servicePriority: 3,
+	}
+	flwMgr.interfaceToMcastQueueMap = interface2mcastQeueuMap
+	return flwMgr
+}
+
+func TestOpenOltFlowMgr_CreateSchedulerQueues(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+
+	tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
+		ProfileType: "pt1", NumGemPorts: 1, Version: 1,
+		InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
+	}
+	tprofile.UsScheduler.Direction = "UPSTREAM"
+	tprofile.UsScheduler.AdditionalBw = "AdditionalBW_None"
+	tprofile.UsScheduler.QSchedPolicy = "WRR"
+
+	tprofile2 := tprofile
+	tprofile2.DsScheduler.Direction = "DOWNSTREAM"
+	tprofile2.DsScheduler.AdditionalBw = "AdditionalBW_None"
+	tprofile2.DsScheduler.QSchedPolicy = "WRR"
+	bands := make([]*ofp.OfpMeterBandHeader, 2)
+	bands[0] = &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 1000, BurstSize: 5000, Data: &ofp.OfpMeterBandHeader_Drop{}}
+	bands[1] = &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 2000, BurstSize: 5000, Data: &ofp.OfpMeterBandHeader_Drop{}}
+	ofpMeterConfig := &ofp.OfpMeterConfig{Flags: 1, MeterId: 1, Bands: bands}
+	flowmetadata := &voltha.FlowMetadata{
+		Meters: []*ofp.OfpMeterConfig{ofpMeterConfig},
+	}
+	type args struct {
+		Dir          tp_pb.Direction
+		IntfID       uint32
+		OnuID        uint32
+		UniID        uint32
+		UniPort      uint32
+		TpInst       *tp.TechProfile
+		MeterID      uint32
+		flowMetadata *voltha.FlowMetadata
+	}
+	tests := []struct {
+		name       string
+		schedQueue schedQueue
+		wantErr    bool
+	}{
+		// TODO: Add test cases.
+		{"CreateSchedulerQueues-1", schedQueue{tp_pb.Direction_UPSTREAM, 1, 1, 1, 64, 1, tprofile, 1, flowmetadata}, false},
+		{"CreateSchedulerQueues-2", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 1, flowmetadata}, false},
+		{"CreateSchedulerQueues-3", schedQueue{tp_pb.Direction_UPSTREAM, 1, 1, 1, 64, 1, tprofile, 2, flowmetadata}, true},
+		{"CreateSchedulerQueues-4", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 2, flowmetadata}, true},
+		{"CreateSchedulerQueues-5", schedQueue{tp_pb.Direction_UPSTREAM, 2, 2, 2, 64, 2, tprofile, 2, flowmetadata}, true},
+		{"CreateSchedulerQueues-6", schedQueue{tp_pb.Direction_DOWNSTREAM, 2, 2, 2, 65, 2, tprofile2, 2, flowmetadata}, true},
+		{"CreateSchedulerQueues-13", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 1, flowmetadata}, false},
+		//Negative testcases
+		{"CreateSchedulerQueues-7", schedQueue{tp_pb.Direction_UPSTREAM, 1, 1, 1, 64, 1, tprofile, 1, &voltha.FlowMetadata{}}, true},
+		{"CreateSchedulerQueues-8", schedQueue{tp_pb.Direction_UPSTREAM, 1, 1, 1, 64, 1, tprofile, 0, &voltha.FlowMetadata{}}, true},
+		{"CreateSchedulerQueues-9", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 1, &voltha.FlowMetadata{}}, true},
+		{"CreateSchedulerQueues-10", schedQueue{tp_pb.Direction_UPSTREAM, 1, 1, 1, 64, 1, tprofile, 2, &voltha.FlowMetadata{}}, true},
+		{"CreateSchedulerQueues-11", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 2, &voltha.FlowMetadata{}}, true},
+		{"CreateSchedulerQueues-12", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 2, nil}, true},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := flowMgr.CreateSchedulerQueues(ctx, tt.schedQueue); (err != nil) != tt.wantErr {
+				t.Errorf("OpenOltFlowMgr.CreateSchedulerQueues() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_RemoveSchedulerQueues(t *testing.T) {
+
+	// flowMgr := newMockFlowmgr()
+	tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
+		ProfileType: "pt1", NumGemPorts: 1, Version: 1,
+		InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
+	}
+	tprofile.UsScheduler.Direction = "UPSTREAM"
+	tprofile.UsScheduler.AdditionalBw = "AdditionalBW_None"
+	tprofile.UsScheduler.QSchedPolicy = "WRR"
+
+	tprofile2 := tprofile
+	tprofile2.DsScheduler.Direction = "DOWNSTREAM"
+	tprofile2.DsScheduler.AdditionalBw = "AdditionalBW_None"
+	tprofile2.DsScheduler.QSchedPolicy = "WRR"
+	//defTprofile := &tp.DefaultTechProfile{}
+	type args struct {
+		Dir     tp_pb.Direction
+		IntfID  uint32
+		OnuID   uint32
+		UniID   uint32
+		UniPort uint32
+		TpInst  *tp.TechProfile
+	}
+	tests := []struct {
+		name       string
+		schedQueue schedQueue
+		wantErr    bool
+	}{
+		// TODO: Add test cases.
+		{"RemoveSchedulerQueues", schedQueue{tp_pb.Direction_UPSTREAM, 1, 1, 1, 64, 1, tprofile, 0, nil}, false},
+		{"RemoveSchedulerQueues", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 0, nil}, false},
+		// negative test cases
+		{"RemoveSchedulerQueues", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 0, nil}, false},
+		{"RemoveSchedulerQueues", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 1, 1, 65, 1, tprofile2, 0, nil}, false},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := flowMgr.RemoveSchedulerQueues(ctx, tt.schedQueue); (err != nil) != tt.wantErr {
+				t.Errorf("OpenOltFlowMgr.RemoveSchedulerQueues() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+
+}
+
+func TestOpenOltFlowMgr_RemoveFlow(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+	log.Debug("Info Warning Error: Starting RemoveFlow() test")
+	fa := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(2),
+			fu.Metadata_ofp(2),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+			fu.Output(1),
+		},
+	}
+	ofpstats := fu.MkFlowStat(fa)
+	ofpstats.Cookie = ofpstats.Id
+	lldpFa := &fu.FlowArgs{
+		KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1),
+			fu.EthType(0x88CC),
+			fu.TunnelId(536870912),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+		},
+	}
+	lldpofpstats := fu.MkFlowStat(lldpFa)
+	//lldpofpstats.Cookie = lldpofpstats.Id
+
+	dhcpFa := &fu.FlowArgs{
+		KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1),
+			fu.UdpSrc(67),
+			//fu.TunnelId(536870912),
+			fu.IpProto(17),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+		},
+	}
+	dhcpofpstats := fu.MkFlowStat(dhcpFa)
+	//dhcpofpstats.Cookie = dhcpofpstats.Id
+
+	//multicast flow
+	multicastFa := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(65536),
+			fu.VlanVid(660),             //vlan
+			fu.Metadata_ofp(uint64(66)), //inner vlan
+			fu.EthType(0x800),           //ipv4
+			fu.Ipv4Dst(3809869825),      //227.22.0.1
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Group(1),
+		},
+	}
+	multicastOfpStats := fu.MkFlowStat(multicastFa)
+	multicastOfpStats.Id = 1
+
+	type args struct {
+		flow *ofp.OfpFlowStats
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{"RemoveFlow", args{flow: ofpstats}},
+		{"RemoveFlow", args{flow: lldpofpstats}},
+		{"RemoveFlow", args{flow: dhcpofpstats}},
+		{"RemoveFlow", args{flow: multicastOfpStats}},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			flowMgr.RemoveFlow(ctx, tt.args.flow)
+		})
+	}
+	// t.Error("=====")
+}
+
+func TestOpenOltFlowMgr_AddFlow(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+	kw := make(map[string]uint64)
+	kw["table_id"] = 1
+	kw["meter_id"] = 1
+	kw["write_metadata"] = 0x4000000000 // Tech-Profile-ID 64
+
+	// Upstream flow
+	fa := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(536870912),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+			fu.Output(65536),
+			fu.PushVlan(0x8100),
+		},
+		KV: kw,
+	}
+
+	// Downstream flow
+	fa3 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(65536),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			//fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+			fu.PopVlan(),
+			fu.Output(536870912),
+		},
+		KV: kw,
+	}
+
+	fa2 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1000),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+			fu.Output(65533),
+		},
+		KV: kw,
+	}
+
+	// TODO Add LLDP flow
+	// TODO Add DHCP flow
+
+	// Flows for negative scenarios
+	// Failure in formulateActionInfoFromFlow()
+	fa4 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1000),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Experimenter(257, []byte{1, 2, 3, 4}),
+		},
+		KV: kw,
+	}
+
+	// Invalid Output
+	fa5 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1000),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Output(0),
+		},
+		KV: kw,
+	}
+
+	// Tech-Profile-ID update (not supported)
+	kw6 := make(map[string]uint64)
+	kw6["table_id"] = 1
+	kw6["meter_id"] = 1
+	kw6["write_metadata"] = 0x4100000000 // TpID Other than the stored one
+	fa6 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(536870912),
+			fu.TunnelId(16),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+			fu.Output(65535),
+		},
+		KV: kw6,
+	}
+
+	lldpFa := &fu.FlowArgs{
+		KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1),
+			fu.EthType(0x88CC),
+			fu.TunnelId(536870912),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+		},
+	}
+
+	dhcpFa := &fu.FlowArgs{
+		KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1),
+			fu.UdpSrc(67),
+			//fu.TunnelId(536870912),
+			fu.IpProto(17),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+		},
+	}
+	igmpFa := &fu.FlowArgs{
+		KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(1),
+			fu.UdpSrc(67),
+			//fu.TunnelId(536870912),
+			fu.IpProto(2),
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+		},
+	}
+
+	fa9 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(536870912),
+			fu.TunnelId(16),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+			fu.VlanPcp(1000),
+			fu.UdpDst(65535),
+			fu.UdpSrc(536870912),
+			fu.Ipv4Dst(65535),
+			fu.Ipv4Src(536870912),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+			fu.Output(65535),
+		},
+		KV: kw6,
+	}
+
+	fa10 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(65533),
+			//	fu.TunnelId(16),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+			fu.VlanPcp(1000),
+			fu.UdpDst(65535),
+			fu.UdpSrc(536870912),
+			fu.Ipv4Dst(65535),
+			fu.Ipv4Src(536870912),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+			fu.Output(65535),
+		},
+		KV: kw6,
+	}
+	//multicast flow
+	fa11 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(65536),
+			fu.VlanVid(660),             //vlan
+			fu.Metadata_ofp(uint64(66)), //inner vlan
+			fu.EthType(0x800),           //ipv4
+			fu.Ipv4Dst(3809869825),      //227.22.0.1
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Group(1),
+		},
+		KV: kw6,
+	}
+	ofpstats := fu.MkFlowStat(fa)
+	ofpstats2 := fu.MkFlowStat(fa2)
+	ofpstats3 := fu.MkFlowStat(fa3)
+	ofpstats4 := fu.MkFlowStat(fa4)
+	ofpstats5 := fu.MkFlowStat(fa5)
+	ofpstats6 := fu.MkFlowStat(fa6)
+	ofpstats7 := fu.MkFlowStat(lldpFa)
+	ofpstats8 := fu.MkFlowStat(dhcpFa)
+	ofpstats9 := fu.MkFlowStat(fa9)
+	ofpstats10 := fu.MkFlowStat(fa10)
+	igmpstats := fu.MkFlowStat(igmpFa)
+	ofpstats11 := fu.MkFlowStat(fa11)
+
+	fmt.Println(ofpstats6, ofpstats9, ofpstats10)
+
+	ofpMeterConfig := &ofp.OfpMeterConfig{Flags: 1, MeterId: 1}
+	flowMetadata := &voltha.FlowMetadata{
+		Meters: []*ofp.OfpMeterConfig{ofpMeterConfig},
+	}
+	type args struct {
+		flow         *ofp.OfpFlowStats
+		flowMetadata *voltha.FlowMetadata
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{"AddFlow", args{flow: ofpstats, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: ofpstats2, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: ofpstats3, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: ofpstats4, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: ofpstats5, flowMetadata: flowMetadata}},
+		//{"AddFlow", args{flow: ofpstats6, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: ofpstats7, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: ofpstats8, flowMetadata: flowMetadata}},
+		//{"AddFlow", args{flow: ofpstats9, flowMetadata: flowMetadata}},
+		{"AddFlow", args{flow: igmpstats, flowMetadata: flowMetadata}},
+		//{"AddFlow", args{flow: ofpstats10, flowMetadata: flowMetadata}},
+		//ofpstats10
+		{"AddFlow", args{flow: ofpstats11, flowMetadata: flowMetadata}},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			flowMgr.AddFlow(ctx, tt.args.flow, tt.args.flowMetadata)
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_UpdateOnuInfo(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+	type args struct {
+		intfID    uint32
+		onuID     uint32
+		serialNum string
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{"UpdateOnuInfo", args{1, 1, "onu1"}},
+		{"UpdateOnuInfo", args{2, 3, "onu1"}},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			flowMgr.UpdateOnuInfo(ctx, tt.args.intfID, tt.args.onuID, tt.args.serialNum)
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_deleteGemPortFromLocalCache(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+	type args struct {
+		intfID                uint32
+		onuID                 uint32
+		gemPortIDs            []uint32
+		gemPortIDsToBeDeleted []uint32
+		serialNum             string
+		finalLength           int
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// Add/Delete single gem port
+		{"DeleteGemPortFromLocalCache1", args{0, 1, []uint32{1}, []uint32{1}, "onu1", 0}},
+		// Delete all gemports
+		{"DeleteGemPortFromLocalCache2", args{0, 1, []uint32{1, 2, 3, 4}, []uint32{1, 2, 3, 4}, "onu1", 0}},
+		// Try to delete when there is no gem port
+		{"DeleteGemPortFromLocalCache3", args{0, 1, []uint32{}, []uint32{1, 2}, "onu1", 0}},
+		// Try to delete non-existent gem port
+		{"DeleteGemPortFromLocalCache4", args{0, 1, []uint32{1}, []uint32{2}, "onu1", 1}},
+		// Try to delete two of the gem ports
+		{"DeleteGemPortFromLocalCache5", args{0, 1, []uint32{1, 2, 3, 4}, []uint32{2, 4}, "onu1", 2}},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			flowMgr.UpdateOnuInfo(ctx, tt.args.intfID, tt.args.onuID, tt.args.serialNum)
+			for _, gemPort := range tt.args.gemPortIDs {
+				flowMgr.addGemPortToOnuInfoMap(ctx, tt.args.intfID, tt.args.onuID, gemPort)
+			}
+			for _, gemPortDeleted := range tt.args.gemPortIDsToBeDeleted {
+				flowMgr.deleteGemPortFromLocalCache(tt.args.intfID, tt.args.onuID, gemPortDeleted)
+			}
+			lenofGemPorts := len(flowMgr.onuGemInfo[tt.args.intfID][0].GemPorts)
+			if lenofGemPorts != tt.args.finalLength {
+				t.Errorf("GemPorts length is not as expected len = %d, want %d", lenofGemPorts, tt.args.finalLength)
+			}
+
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_GetLogicalPortFromPacketIn(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+	type args struct {
+		packetIn *openoltpb2.PacketIndication
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    uint32
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 1, GemportId: 1, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1, false},
+		{"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "nni", IntfId: 1, GemportId: 1, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1048577, false},
+		// Negative Test cases.
+		{"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 2, GemportId: 1, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 0, true},
+		{"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 1, GemportId: 1, FlowId: 100, PortNo: 0, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 4112, false},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			got, err := flowMgr.GetLogicalPortFromPacketIn(ctx, tt.args.packetIn)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("OpenOltFlowMgr.GetLogicalPortFromPacketIn() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if got != tt.want {
+				t.Errorf("OpenOltFlowMgr.GetLogicalPortFromPacketIn() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_GetPacketOutGemPortID(t *testing.T) {
+	// flwMgr := newMockFlowmgr()
+
+	type args struct {
+		intfID  uint32
+		onuID   uint32
+		portNum uint32
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    uint32
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"GetPacketOutGemPortID", args{intfID: 1, onuID: 1, portNum: 1}, 1, false},
+		{"GetPacketOutGemPortID", args{intfID: 2, onuID: 2, portNum: 2}, 2, false},
+		{"GetPacketOutGemPortID", args{intfID: 1, onuID: 2, portNum: 2}, 0, true},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			got, err := flowMgr.GetPacketOutGemPortID(ctx, tt.args.intfID, tt.args.onuID, tt.args.portNum)
+			if (err != nil) != tt.wantErr {
+				t.Errorf("OpenOltFlowMgr.GetPacketOutGemPortID() error = %v, wantErr %v", err, tt.wantErr)
+				return
+			}
+			if got != tt.want {
+				t.Errorf("OpenOltFlowMgr.GetPacketOutGemPortID() = %v, want %v", got, tt.want)
+			}
+
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_DeleteTechProfileInstance(t *testing.T) {
+	// flwMgr := newMockFlowmgr()
+	type args struct {
+		intfID uint32
+		onuID  uint32
+		uniID  uint32
+		sn     string
+		tpID   uint32
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"DeleteTechProfileInstance", args{intfID: 0, onuID: 1, uniID: 1, sn: "", tpID: 64}, false},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := flowMgr.DeleteTechProfileInstance(ctx, tt.args.intfID, tt.args.onuID, tt.args.uniID, tt.args.sn, tt.args.tpID); (err != nil) != tt.wantErr {
+				t.Errorf("OpenOltFlowMgr.DeleteTechProfileInstance() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_checkAndAddFlow(t *testing.T) {
+	// flowMgr := newMockFlowmgr()
+	kw := make(map[string]uint64)
+	kw["table_id"] = 1
+	kw["meter_id"] = 1
+	kw["write_metadata"] = 0x4000000000 // Tech-Profile-ID 64
+
+	// Upstream flow
+	fa := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(536870912),
+			fu.Metadata_ofp(1),
+			fu.IpProto(17), // dhcp
+			fu.VlanPcp(0),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+			fu.Output(65536),
+			fu.PushVlan(0x8100),
+		},
+		KV: kw,
+	}
+
+	// EAPOL
+	fa2 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(536870912),
+			fu.Metadata_ofp(1),
+			fu.EthType(0x888E),
+			fu.VlanPcp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+			fu.Output(65536),
+			fu.PushVlan(0x8100),
+		},
+		KV: kw,
+	}
+
+	// HSIA
+	fa3 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(536870912),
+			fu.Metadata_ofp(1),
+			//fu.EthType(0x8100),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0)),
+			fu.Output(65536),
+			fu.PushVlan(0x8100),
+		},
+		KV: kw,
+	}
+
+	fa4 := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(65535),
+			fu.Metadata_ofp(1),
+			fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+			fu.VlanPcp(1),
+		},
+		Actions: []*ofp.OfpAction{
+			//fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+			fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0)),
+			fu.Output(536870912),
+			fu.PopVlan(),
+		},
+		KV: kw,
+	}
+
+	classifierInfo := make(map[string]interface{})
+	actionInfo := make(map[string]interface{})
+	classifierInfo2 := make(map[string]interface{})
+	actionInfo2 := make(map[string]interface{})
+	classifierInfo3 := make(map[string]interface{})
+	actionInfo3 := make(map[string]interface{})
+	classifierInfo4 := make(map[string]interface{})
+	actionInfo4 := make(map[string]interface{})
+	flowState := fu.MkFlowStat(fa)
+	flowState2 := fu.MkFlowStat(fa2)
+	flowState3 := fu.MkFlowStat(fa3)
+	flowState4 := fu.MkFlowStat(fa4)
+	formulateClassifierInfoFromFlow(classifierInfo, flowState)
+	formulateClassifierInfoFromFlow(classifierInfo2, flowState2)
+	formulateClassifierInfoFromFlow(classifierInfo3, flowState3)
+	formulateClassifierInfoFromFlow(classifierInfo4, flowState4)
+
+	err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flowState)
+	if err != nil {
+		// Error logging is already done in the called function
+		// So just return in case of error
+		return
+	}
+
+	err = formulateActionInfoFromFlow(actionInfo2, classifierInfo2, flowState2)
+	if err != nil {
+		// Error logging is already done in the called function
+		// So just return in case of error
+		return
+	}
+
+	err = formulateActionInfoFromFlow(actionInfo3, classifierInfo3, flowState3)
+	if err != nil {
+		// Error logging is already done in the called function
+		// So just return in case of error
+		return
+	}
+
+	err = formulateActionInfoFromFlow(actionInfo4, classifierInfo4, flowState4)
+	if err != nil {
+		// Error logging is already done in the called function
+		// So just return in case of error
+		return
+	}
+
+	//ofpMeterConfig := &ofp.OfpMeterConfig{Flags: 1, MeterId: 1}
+	//flowMetadata := &voltha.FlowMetadata{
+	//	Meters: []*ofp.OfpMeterConfig{ofpMeterConfig},
+	//}
+
+	TpInst := &tp.TechProfile{
+		Name:                 "Test-Tech-Profile",
+		SubscriberIdentifier: "257",
+		ProfileType:          "Mock",
+		Version:              1,
+		NumGemPorts:          4,
+		InstanceCtrl: tp.InstanceControl{
+			Onu: "1",
+			Uni: "16",
+		},
+	}
+
+	type fields struct {
+		techprofile   []tp.TechProfileIf
+		deviceHandler *DeviceHandler
+		resourceMgr   *rsrcMgr.OpenOltResourceMgr
+	}
+	type args struct {
+		args           map[string]uint32
+		classifierInfo map[string]interface{}
+		actionInfo     map[string]interface{}
+		flow           *ofp.OfpFlowStats
+		gemPort        uint32
+		intfID         uint32
+		onuID          uint32
+		uniID          uint32
+		portNo         uint32
+		TpInst         *tp.TechProfile
+		allocID        []uint32
+		gemPorts       []uint32
+		TpID           uint32
+		uni            string
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+	}{
+		{
+			name: "checkAndAddFlow-1",
+			args: args{
+				args:           nil,
+				classifierInfo: classifierInfo,
+				actionInfo:     actionInfo,
+				flow:           flowState,
+				gemPort:        1,
+				intfID:         1,
+				onuID:          1,
+				uniID:          16,
+				portNo:         1,
+				TpInst:         TpInst,
+				allocID:        []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+				gemPorts:       []uint32{1, 2, 3, 4},
+				TpID:           64,
+				uni:            "16",
+			},
+		},
+		{
+			name: "checkAndAddFlow-2",
+			args: args{
+				args:           nil,
+				classifierInfo: classifierInfo2,
+				actionInfo:     actionInfo2,
+				flow:           flowState2,
+				gemPort:        1,
+				intfID:         1,
+				onuID:          1,
+				uniID:          16,
+				portNo:         1,
+				TpInst:         TpInst,
+				allocID:        []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+				gemPorts:       []uint32{1, 2, 3, 4},
+				TpID:           64,
+				uni:            "16",
+			},
+		},
+		{
+			name: "checkAndAddFlow-3",
+			args: args{
+				args:           nil,
+				classifierInfo: classifierInfo3,
+				actionInfo:     actionInfo3,
+				flow:           flowState3,
+				gemPort:        1,
+				intfID:         1,
+				onuID:          1,
+				uniID:          16,
+				portNo:         1,
+				TpInst:         TpInst,
+				allocID:        []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+				gemPorts:       []uint32{1, 2, 3, 4},
+				TpID:           64,
+				uni:            "16",
+			},
+		},
+		{
+			name: "checkAndAddFlow-4",
+			args: args{
+				args:           nil,
+				classifierInfo: classifierInfo4,
+				actionInfo:     actionInfo4,
+				flow:           flowState4,
+				gemPort:        1,
+				intfID:         1,
+				onuID:          1,
+				uniID:          16,
+				portNo:         1,
+				TpInst:         TpInst,
+				allocID:        []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+				gemPorts:       []uint32{1, 2, 3, 4},
+				TpID:           64,
+				uni:            "16",
+			},
+		},
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			flowMgr.checkAndAddFlow(ctx, tt.args.args, tt.args.classifierInfo, tt.args.actionInfo, tt.args.flow,
+				tt.args.TpInst, tt.args.gemPorts, tt.args.TpID, tt.args.uni)
+		})
+	}
+}
+
+func TestOpenOltFlowMgr_TestMulticastFlow(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	//create group
+	group := newGroup(2, []uint32{1})
+	flowMgr.AddGroup(ctx, group)
+
+	//create multicast flow
+	multicastFlowArgs := &fu.FlowArgs{
+		MatchFields: []*ofp.OfpOxmOfbField{
+			fu.InPort(65536),
+			fu.VlanVid(660),             //vlan
+			fu.Metadata_ofp(uint64(66)), //inner vlan
+			fu.EthType(0x800),           //ipv4
+			fu.Ipv4Dst(3809869825),      //227.22.0.1
+		},
+		Actions: []*ofp.OfpAction{
+			fu.Group(1),
+		},
+	}
+	ofpStats := fu.MkFlowStat(multicastFlowArgs)
+	flowMgr.AddFlow(ctx, ofpStats, &voltha.FlowMetadata{})
+
+	//add bucket to the group
+	group = newGroup(2, []uint32{1, 2})
+
+	flowMgr.ModifyGroup(ctx, group)
+}
diff --git a/internal/pkg/core/openolt_test.go b/internal/pkg/core/openolt_test.go
new file mode 100644
index 0000000..5944db2
--- /dev/null
+++ b/internal/pkg/core/openolt_test.go
@@ -0,0 +1,985 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+This file contains unit test cases for functions in the file openolt.go.
+This file also implements the fields struct to mock the Openolt and few utility functions.
+*/
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"errors"
+	com "github.com/opencord/voltha-lib-go/v3/pkg/adapters/common"
+	fu "github.com/opencord/voltha-lib-go/v3/pkg/flows"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/openflow_13"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"reflect"
+	"sync"
+	"testing"
+)
+
+// mocks the OpenOLT struct.
+type fields struct {
+	deviceHandlers        map[string]*DeviceHandler
+	coreProxy             *com.CoreProxy
+	adapterProxy          *com.AdapterProxy
+	eventProxy            *com.EventProxy
+	kafkaICProxy          kafka.InterContainerProxy
+	numOnus               int
+	KVStoreHost           string
+	KVStorePort           int
+	KVStoreType           string
+	exitChannel           chan int
+	lockDeviceHandlersMap sync.RWMutex
+	ctx                   context.Context
+}
+
+// mockOlt mocks OpenOLT struct.
+func mockOlt() *fields {
+	dh := newMockDeviceHandler()
+	newOlt := &fields{}
+	newOlt.deviceHandlers = map[string]*DeviceHandler{}
+	newOlt.deviceHandlers[dh.device.Id] = dh
+	return newOlt
+}
+
+// testOltObject maps fields type to OpenOLt type.
+func testOltObject(testOlt *fields) *OpenOLT {
+	return &OpenOLT{
+		deviceHandlers: testOlt.deviceHandlers,
+		coreProxy:      testOlt.coreProxy,
+		adapterProxy:   testOlt.adapterProxy,
+		eventProxy:     testOlt.eventProxy,
+		kafkaICProxy:   testOlt.kafkaICProxy,
+		numOnus:        testOlt.numOnus,
+		KVStoreHost:    testOlt.KVStoreHost,
+		KVStorePort:    testOlt.KVStorePort,
+		KVStoreType:    testOlt.KVStoreType,
+		exitChannel:    testOlt.exitChannel,
+	}
+}
+
+// mockDevice mocks Device.
+func mockDevice() *voltha.Device {
+	device := &voltha.Device{
+		Id:       "olt",
+		Root:     true,
+		ParentId: "logical_device",
+		Ports: []*voltha.Port{
+			{PortNo: 1, Label: "pon"},
+			{PortNo: 2, Label: "nni"},
+		},
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "olt",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+	}
+	return device
+}
+
+func TestNewOpenOLT(t *testing.T) {
+	tests := []struct {
+		name        string
+		fields      *fields
+		configFlags *config.AdapterFlags
+		want        *OpenOLT
+	}{
+		{"newopenolt-1", &fields{}, &config.AdapterFlags{OnuNumber: 1, KVStorePort: 1, KVStoreType: "consul", KVStoreHost: "1.1.1.1"},
+			&OpenOLT{numOnus: 1, KVStorePort: 1, KVStoreType: "consul", KVStoreHost: "1.1.1.1"}},
+		{"newopenolt-2", &fields{}, &config.AdapterFlags{OnuNumber: 2, KVStorePort: 2, KVStoreType: "etcd", KVStoreHost: "2.2.2.2"},
+			&OpenOLT{numOnus: 2, KVStorePort: 2, KVStoreType: "etcd", KVStoreHost: "2.2.2.2"}},
+		{"newopenolt-3", &fields{}, &config.AdapterFlags{OnuNumber: 3, KVStorePort: 3, KVStoreType: "consul", KVStoreHost: "3.3.3.3"},
+			&OpenOLT{numOnus: 3, KVStorePort: 3, KVStoreType: "consul", KVStoreHost: "3.3.3.3"}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := NewOpenOLT(tt.fields.ctx, tt.fields.kafkaICProxy, tt.fields.coreProxy, tt.fields.adapterProxy,
+				tt.fields.eventProxy, tt.configFlags); reflect.TypeOf(got) != reflect.TypeOf(tt.want) && got != nil {
+				t.Errorf("NewOpenOLT() error = %v, wantErr %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Abandon_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"abandon_device-1", &fields{}, args{}, ErrNotImplemented},
+		{"abandon_device-2", &fields{}, args{}, ErrNotImplemented},
+		{"abandon_device-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Abandon_device(tt.args.device); err != tt.wantErr {
+				t.Errorf("Abandon_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Activate_image_update(t *testing.T) {
+	type args struct {
+		device  *voltha.Device
+		request *voltha.ImageDownload
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *voltha.ImageDownload
+		wantErr error
+	}{
+		{"activate_image_upate-1", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123XYZ"},
+			ErrNotImplemented},
+		{"activate_image_upate-2", &fields{}, args{}, &voltha.ImageDownload{Id: "Image2-ABC123CDE"},
+			ErrNotImplemented},
+		{"activate_image_upate-3", &fields{}, args{}, &voltha.ImageDownload{Id: "Image3-ABC123EFG"},
+			ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Activate_image_update(tt.args.device, tt.args.request)
+			if err != tt.wantErr && got == nil {
+				t.Errorf("Activate_image_update() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Adapter_descriptor(t *testing.T) {
+	tests := []struct {
+		name    string
+		fields  *fields
+		wantErr error
+	}{
+		{"adapter_descriptor-1", &fields{}, ErrNotImplemented},
+		{"adapter_descriptor-2", &fields{}, ErrNotImplemented},
+		{"adapter_descriptor-3", &fields{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Adapter_descriptor(); err != tt.wantErr {
+				t.Errorf("Adapter_descriptor() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Adopt_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	var device = mockDevice()
+	device.Id = "olt"
+	nilDevice := NewErrInvalidValue(log.Fields{"device": nil}, nil)
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"adopt_device-1", mockOlt(), args{}, nilDevice},
+		{"adopt_device-2", mockOlt(), args{device}, nilDevice},
+		{"adopt_device-3", mockOlt(), args{mockDevice()}, nil},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			err := oo.Adopt_device(tt.args.device)
+			if (err != nil) && (reflect.TypeOf(err) !=
+				reflect.TypeOf(tt.wantErr)) && (tt.args.device == nil) {
+				t.Errorf("Adopt_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+			if err == nil {
+				t.Log("return'd nil")
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Cancel_image_download(t *testing.T) {
+	type args struct {
+		device  *voltha.Device
+		request *voltha.ImageDownload
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *voltha.ImageDownload
+		wantErr error
+	}{
+		{"cancel_image_download-1", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123XYZ"},
+			ErrNotImplemented},
+		{"cancel_image_download-2", &fields{}, args{}, &voltha.ImageDownload{Id: "Image2-ABC123IJK"},
+			ErrNotImplemented},
+		{"cancel_image_download-3", &fields{}, args{}, &voltha.ImageDownload{Id: "Image3-ABC123KLM"},
+			ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Cancel_image_download(tt.args.device, tt.args.request)
+			if err != tt.wantErr && got == nil {
+				t.Errorf("Cancel_image_download() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Delete_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"delete_device-1", &fields{}, args{mockDevice()},
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Delete_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Delete_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Device_types(t *testing.T) {
+	tests := []struct {
+		name    string
+		fields  *fields
+		want    *voltha.DeviceTypes
+		wantErr error
+	}{
+		{"device_types-1", &fields{}, &voltha.DeviceTypes{},
+			ErrNotImplemented},
+		{"device_types-2", &fields{}, &voltha.DeviceTypes{},
+			ErrNotImplemented},
+		{"device_types-3", &fields{}, &voltha.DeviceTypes{},
+			ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Device_types()
+			if err != tt.wantErr && got == nil {
+				t.Errorf("Device_types() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Disable_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"disable_device-1", mockOlt(), args{mockDevice()}, nil},
+		{"disable_device-2", &fields{}, args{mockDevice()},
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Disable_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Disable_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Download_image(t *testing.T) {
+	type args struct {
+		device  *voltha.Device
+		request *voltha.ImageDownload
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *voltha.ImageDownload
+		wantErr error
+	}{
+		{"download_image-1", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123XYZ"},
+			ErrNotImplemented},
+		{"download_image-2", &fields{}, args{}, &voltha.ImageDownload{Id: "Image2-ABC123LKJ"},
+			ErrNotImplemented},
+		{"download_image-3", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123RTY"},
+			ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Download_image(tt.args.device, tt.args.request)
+			if err != tt.wantErr && got == nil {
+				t.Errorf("Download_image() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Get_device_details(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"get_device_details-1", &fields{}, args{}, ErrNotImplemented},
+		{"get_device_details-2", &fields{}, args{}, ErrNotImplemented},
+		{"get_device_details-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Get_device_details(tt.args.device); err != tt.wantErr {
+				t.Errorf("Get_device_details() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Get_image_download_status(t *testing.T) {
+	type args struct {
+		device  *voltha.Device
+		request *voltha.ImageDownload
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *voltha.ImageDownload
+		wantErr error
+	}{
+		{"get_image_download_status-1", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123XYZ"},
+			ErrNotImplemented},
+		{"get_image_download_status-2", &fields{}, args{}, &voltha.ImageDownload{Id: "Image2-ABC123LKJ"},
+			ErrNotImplemented},
+		{"get_image_download_status-3", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123DFG"},
+			ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Get_image_download_status(tt.args.device, tt.args.request)
+			if err != tt.wantErr && got == nil {
+				t.Errorf("Get_image_download_status() got = %v want = %v error = %v, wantErr %v",
+					got, tt.want, err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Get_ofp_device_info(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *ic.SwitchCapability
+		wantErr error
+	}{
+		{"get_ofp_device_info-1", mockOlt(), args{mockDevice()}, &ic.SwitchCapability{
+			Desc: &openflow_13.OfpDesc{
+				MfrDesc: "VOLTHA Project",
+				HwDesc:  "open_pon",
+				SwDesc:  "open_pon",
+			},
+			SwitchFeatures: &openflow_13.OfpSwitchFeatures{
+				NBuffers:     uint32(256),
+				NTables:      uint32(2),
+				Capabilities: uint32(15),
+			},
+		}, nil},
+		{"get_ofp_device_info-2", &fields{}, args{mockDevice()}, nil,
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Get_ofp_device_info(tt.args.device)
+			if !reflect.DeepEqual(err, tt.wantErr) || !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("Get_ofp_device_info() got = %v want = %v error = %v, wantErr = %v",
+					got, tt.want, err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Get_ofp_port_info(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+		portNo int64
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *ic.PortCapability
+		wantErr error
+	}{
+		{"get_ofp_port_info-1", mockOlt(), args{mockDevice(), 1}, &ic.PortCapability{
+			Port: &voltha.LogicalPort{
+				DeviceId:     "olt",
+				DevicePortNo: uint32(1),
+				OfpPort: &openflow_13.OfpPort{
+					HwAddr:     []uint32{1, 2, 3, 4, 5, 6},
+					State:      uint32(4),
+					Curr:       uint32(4128),
+					Advertised: uint32(4128),
+					Peer:       uint32(4128),
+					CurrSpeed:  uint32(32),
+					MaxSpeed:   uint32(32),
+				},
+			},
+		}, nil},
+		{"get_ofp_port_info-2", &fields{}, args{mockDevice(), 1}, nil,
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Get_ofp_port_info(tt.args.device, tt.args.portNo)
+			if !reflect.DeepEqual(err, tt.wantErr) || !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("Get_ofp_port_info() got = %v want = %v error = %v, wantErr = %v",
+					got, tt.want, err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Health(t *testing.T) {
+	tests := []struct {
+		name    string
+		fields  *fields
+		want    *voltha.HealthStatus
+		wantErr error
+	}{
+		{"health-1", &fields{}, &voltha.HealthStatus{}, ErrNotImplemented},
+		{"health-2", &fields{}, &voltha.HealthStatus{}, ErrNotImplemented},
+		{"health-3", &fields{}, &voltha.HealthStatus{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Health()
+			if err != tt.wantErr && got == nil {
+				t.Errorf("Get_ofp_port_info() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Process_inter_adapter_message(t *testing.T) {
+	type args struct {
+		msg *ic.InterAdapterMessage
+	}
+	var message1 = args{
+		msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:            "olt",
+				ProxyDeviceId: "",
+				ToDeviceId:    "onu1",
+			},
+		},
+	}
+	var message2 = args{
+		msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:            "olt",
+				ProxyDeviceId: "olt",
+				ToDeviceId:    "olt",
+				Type:          ic.InterAdapterMessageType_OMCI_REQUEST,
+			},
+		},
+	}
+	var message3 = args{
+		msg: &ic.InterAdapterMessage{
+			Header: &ic.InterAdapterHeader{
+				Id:            "olt",
+				ProxyDeviceId: "olt",
+				ToDeviceId:    "olt",
+				Type:          ic.InterAdapterMessageType_FLOW_REQUEST,
+			},
+		},
+	}
+	tests := []struct {
+		name        string
+		fields      *fields
+		args        args
+		wantErrType reflect.Type
+	}{
+		{"process_inter_adaptor_messgae-1", mockOlt(), message1,
+			reflect.TypeOf(&ErrNotFound{})},
+		{"process_inter_adaptor_messgae-2", mockOlt(), message2,
+			reflect.TypeOf(errors.New("message is nil"))},
+		{"process_inter_adaptor_messgae-3", mockOlt(), message3,
+			reflect.TypeOf(&ErrInvalidValue{})},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Process_inter_adapter_message(tt.args.msg); reflect.TypeOf(err) != tt.wantErrType {
+				t.Errorf("Process_inter_adapter_message() error = %v, wantErr %v",
+					reflect.TypeOf(err), tt.wantErrType)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Reboot_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"reboot_device-1", mockOlt(), args{mockDevice()}, nil},
+		{"reboot_device-2", &fields{}, args{mockDevice()},
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Reboot_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Reboot_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Receive_packet_out(t *testing.T) {
+	acts := []*ofp.OfpAction{
+		fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA))),
+		fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+		fu.Output(1),
+	}
+	type args struct {
+		deviceID     string
+		egressPortNo int
+		packet       *openflow_13.OfpPacketOut
+	}
+	pktout := &ofp.OfpPacketOut{BufferId: 0, InPort: 1, Actions: acts, Data: []byte("AYDCAAAOAODsSE5TiMwCBwQA4OxITlIEBQUwLzUx" +
+		"BgIAFAgEMC81MQoJbG9jYWxob3N0EBwFAawbqqACAAAAoRAxLjMuNi4xLjQuMS40NDEz/gYAgMILAgD+GQCAwgkDAAAAAGQAAAAAAAAAAgICAgICAgL+" +
+		"GQCAwgoDAAAAAGQAAAAAAAAAAgICAgICAgIAAA==")}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"receive_packet_out-1", mockOlt(), args{mockDevice().Id, 1, pktout}, nil},
+		{"receive_packet_out-2", mockOlt(), args{"1234", 1, pktout},
+			NewErrNotFound("device-handler", log.Fields{"device-id": "1234"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Receive_packet_out(tt.args.deviceID, tt.args.egressPortNo, tt.args.packet); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Receive_packet_out() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Reconcile_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	expectedError := NewErrInvalidValue(log.Fields{"device": nil}, nil)
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"reconcile_device-1", &fields{}, args{}, expectedError},
+		{"reconcile_device-2", &fields{}, args{}, expectedError},
+		{"reconcile_device-3", &fields{}, args{}, expectedError},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Reconcile_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Reconcile_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Reenable_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"reenable_device-1", mockOlt(), args{mockDevice()}, nil},
+		{"reenable_device-2", &fields{}, args{mockDevice()},
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Reenable_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Reenable_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Revert_image_update(t *testing.T) {
+	type args struct {
+		device  *voltha.Device
+		request *voltha.ImageDownload
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		want    *voltha.ImageDownload
+		wantErr error
+	}{
+		{"revert_image_update-1", &fields{}, args{}, &voltha.ImageDownload{Id: "Image1-ABC123XYZ"},
+			ErrNotImplemented},
+		{"revert_image_update-2", &fields{}, args{}, &voltha.ImageDownload{Id: "Image2-ABC123TYU"},
+			ErrNotImplemented},
+		{"revert_image_update-3", &fields{}, args{}, &voltha.ImageDownload{Id: "Image3-ABC123GTH"},
+			ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			got, err := oo.Revert_image_update(tt.args.device, tt.args.request)
+			if err != tt.wantErr && got == nil {
+				t.Log("error :", err)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Self_test_device(t *testing.T) {
+	type args struct {
+		device *voltha.Device
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"self_test_device-1", &fields{}, args{}, ErrNotImplemented},
+		{"self_test_device-2", &fields{}, args{}, ErrNotImplemented},
+		{"self_test_device-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Self_test_device(tt.args.device); err != tt.wantErr {
+				t.Errorf("Self_test_device() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Start(t *testing.T) {
+	type args struct {
+		ctx context.Context
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"start-1", &fields{}, args{}, errors.New("start error")},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Start(tt.args.ctx); err != nil {
+				t.Errorf("Start() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Stop(t *testing.T) {
+	type args struct {
+		ctx context.Context
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"stop-1", &fields{exitChannel: make(chan int, 1)}, args{}, errors.New("stop error")},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			oo.Start(tt.args.ctx)
+			if err := oo.Stop(tt.args.ctx); err != nil {
+				t.Errorf("Stop() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Suppress_event(t *testing.T) {
+	type args struct {
+		filter *voltha.EventFilter
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"suppress_event-1", &fields{}, args{}, ErrNotImplemented},
+		{"suppress_event-2", &fields{}, args{}, ErrNotImplemented},
+		{"suppress_event-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Suppress_event(tt.args.filter); err != tt.wantErr {
+				t.Errorf("Suppress_event() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Unsuppress_event(t *testing.T) {
+	type args struct {
+		filter *voltha.EventFilter
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"unsupress_event-1", &fields{}, args{}, ErrNotImplemented},
+		{"unsupress_event-2", &fields{}, args{}, ErrNotImplemented},
+		{"unsupress_event-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Unsuppress_event(tt.args.filter); err != tt.wantErr {
+				t.Errorf("Unsuppress_event() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Update_flows_bulk(t *testing.T) {
+	type args struct {
+		device       *voltha.Device
+		flows        *voltha.Flows
+		groups       *voltha.FlowGroups
+		flowMetadata *voltha.FlowMetadata
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"update_flows_bulk-1", &fields{}, args{}, ErrNotImplemented},
+		{"update_flows_bulk-2", &fields{}, args{}, ErrNotImplemented},
+		{"update_flows_bulk-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Update_flows_bulk(tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); err != tt.wantErr {
+				t.Errorf("Update_flows_bulk() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Update_flows_incrementally(t *testing.T) {
+	type args struct {
+		device       *voltha.Device
+		flows        *openflow_13.FlowChanges
+		groups       *openflow_13.FlowGroupChanges
+		flowMetadata *voltha.FlowMetadata
+	}
+
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"update_flows_incrementally-1", &fields{}, args{device: mockDevice()},
+			NewErrNotFound("device-handler", log.Fields{"device-id": "olt"}, nil)},
+		{"update_flows_incrementally-2", mockOlt(), args{device: mockDevice()}, nil},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Update_flows_incrementally(tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); !reflect.DeepEqual(err, tt.wantErr) {
+				t.Errorf("Update_flows_incrementally() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Update_pm_config(t *testing.T) {
+	type args struct {
+		device    *voltha.Device
+		pmConfigs *voltha.PmConfigs
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr error
+	}{
+		{"update_pm_config-1", &fields{}, args{}, ErrNotImplemented},
+		{"update_pm_config-2", &fields{}, args{}, ErrNotImplemented},
+		{"update_pm_config-3", &fields{}, args{}, ErrNotImplemented},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Update_pm_config(tt.args.device, tt.args.pmConfigs); err != tt.wantErr {
+				t.Errorf("Update_pm_config() error = %v, wantErr %v", err, tt.wantErr)
+			}
+
+		})
+	}
+}
+
+func TestOpenOLT_deleteDeviceHandlerToMap(t *testing.T) {
+	type args struct {
+		agent *DeviceHandler
+	}
+	tests := []struct {
+		name   string
+		fields *fields
+		args   args
+	}{
+		{"delete_device_handler_map-1", mockOlt(), args{newMockDeviceHandler()}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			oo.deleteDeviceHandlerToMap(tt.args.agent)
+			if len(oo.deviceHandlers) > 0 {
+				t.Errorf("delete device manager failed")
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Enable_port(t *testing.T) {
+	type args struct {
+		deviceID string
+		port     *voltha.Port
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"Enable_port-1", mockOlt(), args{deviceID: "olt", port: &voltha.Port{Type: voltha.Port_PON_OLT, PortNo: 1}}, false},
+		{"Enable_port-2", mockOlt(), args{deviceID: "olt", port: &voltha.Port{Type: voltha.Port_ETHERNET_NNI, PortNo: 1}}, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Enable_port(tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
+				t.Errorf("OpenOLT.Enable_port() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestOpenOLT_Disable_port(t *testing.T) {
+	type args struct {
+		deviceID string
+		port     *voltha.Port
+	}
+	tests := []struct {
+		name    string
+		fields  *fields
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{"Disable_port-1", mockOlt(), args{deviceID: "olt", port: &voltha.Port{Type: voltha.Port_PON_OLT, PortNo: 1}}, false},
+		{"Disable_port-2", mockOlt(), args{deviceID: "olt", port: &voltha.Port{Type: voltha.Port_ETHERNET_NNI, PortNo: 1}}, true},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			oo := testOltObject(tt.fields)
+			if err := oo.Disable_port(tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
+				t.Errorf("OpenOLT.Disable_port() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
diff --git a/internal/pkg/core/statsmanager.go b/internal/pkg/core/statsmanager.go
new file mode 100755
index 0000000..3133bce
--- /dev/null
+++ b/internal/pkg/core/statsmanager.go
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-protos/v3/go/openolt"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"sync"
+	"time"
+)
+
+var mutex = &sync.Mutex{}
+
+// PonPort representation
+type PonPort struct {
+	/*
+	   This is a highly reduced version taken from the adtran pon_port.
+	   TODO: Extend for use in the openolt adapter set.
+	*/
+	/*    MAX_ONUS_SUPPORTED = 256
+	      DEFAULT_ENABLED = False
+	      MAX_DEPLOYMENT_RANGE = 25000  # Meters (OLT-PB maximum)
+
+	      _MCAST_ONU_ID = 253
+	      _MCAST_ALLOC_BASE = 0x500
+
+	      _SUPPORTED_ACTIVATION_METHODS = ['autodiscovery']  # , 'autoactivate']
+	      _SUPPORTED_AUTHENTICATION_METHODS = ['serial-number']
+	*/
+	PONID    uint32
+	DeviceID string
+	IntfID   uint32
+	PortNum  uint32
+	PortID   uint32
+	Label    string
+	ONUs     map[uint32]interface{}
+	ONUsByID map[uint32]interface{}
+
+	RxBytes        uint64
+	RxPackets      uint64
+	RxUcastPackets uint64
+	RxMcastPackets uint64
+	RxBcastPackets uint64
+	RxErrorPackets uint64
+	TxBytes        uint64
+	TxPackets      uint64
+	TxUcastPackets uint64
+	TxMcastPackets uint64
+	TxBcastPackets uint64
+	TxErrorPackets uint64
+	RxCrcErrors    uint64
+	BipErrors      uint64
+}
+
+// NewPONPort returns a new instance of PonPort initialized with given PONID, DeviceID, IntfID and PortNum
+func NewPONPort(PONID uint32, DeviceID string, IntfID uint32, PortNum uint32) *PonPort {
+
+	var PON PonPort
+
+	PON.PONID = PONID
+	PON.DeviceID = DeviceID
+	PON.IntfID = IntfID
+	PON.PortNum = PortNum
+	PON.PortID = 0
+	PON.Label = fmt.Sprintf("%s%d", "pon-", PONID)
+
+	PON.ONUs = make(map[uint32]interface{})
+	PON.ONUsByID = make(map[uint32]interface{})
+
+	/*
+	   Statistics  taken from nni_port
+	   self.intf_id = 0  #handled by getter
+	   self.port_no = 0  #handled by getter
+	   self.port_id = 0  #handled by getter
+
+	   Note:  In the current implementation of the kpis coming from the BAL the stats are the
+	   samne model for NNI and PON.
+
+	   TODO:   Integrate additional kpis for the PON and other southbound port objecgts.
+
+	*/
+
+	PON.RxBytes = 0
+	PON.RxPackets = 0
+	PON.RxUcastPackets = 0
+	PON.RxMcastPackets = 0
+	PON.RxBcastPackets = 0
+	PON.RxErrorPackets = 0
+	PON.TxBytes = 0
+	PON.TxPackets = 0
+	PON.TxUcastPackets = 0
+	PON.TxMcastPackets = 0
+	PON.TxBcastPackets = 0
+	PON.TxErrorPackets = 0
+	PON.RxCrcErrors = 0
+	PON.BipErrors = 0
+
+	/*    def __str__(self):
+	      return "PonPort-{}: Admin: {}, Oper: {}, OLT: {}".format(self._label,
+	                                                               self._admin_state,
+	                                                               self._oper_status,
+	                                                               self.olt)
+	*/
+	return &PON
+}
+
+// NniPort representation
+type NniPort struct {
+	/*
+	   Northbound network port, often Ethernet-based
+
+	   This is a highly reduced version taken from the adtran nni_port code set
+	   TODO:   add functions to allow for port specific values and operations
+	*/
+	PortNum     uint32
+	Name        string
+	LogicalPort uint32
+	IntfID      uint32
+
+	RxBytes        uint64
+	RxPackets      uint64
+	RxUcastPackets uint64
+	RxMcastPackets uint64
+	RxBcastPackets uint64
+	RxErrorPackets uint64
+	TxBytes        uint64
+	TxPackets      uint64
+	TxUcastPackets uint64
+	TxMcastPackets uint64
+	TxBcastPackets uint64
+	TxErrorPackets uint64
+	RxCrcErrors    uint64
+	BipErrors      uint64
+}
+
+// NewNniPort returns a new instance of NniPort initialized with the given PortNum and IntfID
+func NewNniPort(PortNum uint32, IntfID uint32) *NniPort {
+
+	var NNI NniPort
+
+	NNI.PortNum = PortNum
+	NNI.Name = fmt.Sprintf("%s%d", "nni-", PortNum)
+	NNI.IntfID = IntfID
+
+	NNI.RxBytes = 0
+	NNI.RxPackets = 0
+	NNI.RxUcastPackets = 0
+	NNI.RxMcastPackets = 0
+	NNI.RxBcastPackets = 0
+	NNI.RxErrorPackets = 0
+	NNI.TxBytes = 0
+	NNI.TxPackets = 0
+	NNI.TxUcastPackets = 0
+	NNI.TxMcastPackets = 0
+	NNI.TxBcastPackets = 0
+	NNI.TxErrorPackets = 0
+	NNI.RxCrcErrors = 0
+	NNI.BipErrors = 0
+
+	return &NNI
+}
+
+// OpenOltStatisticsMgr structure
+type OpenOltStatisticsMgr struct {
+	Device         *DeviceHandler
+	NorthBoundPort map[uint32]*NniPort
+	SouthBoundPort map[uint32]*PonPort
+	// TODO  PMMetrics Metrics
+}
+
+// NewOpenOltStatsMgr returns a new instance of the OpenOltStatisticsMgr
+func NewOpenOltStatsMgr(Dev *DeviceHandler) *OpenOltStatisticsMgr {
+
+	var StatMgr OpenOltStatisticsMgr
+
+	StatMgr.Device = Dev
+	// TODO call metric PMMetric =
+	// Northbound and Southbound ports
+	// added to initialize the pm_metrics
+	var Ports interface{}
+	Ports, _ = InitPorts("nni", Dev.deviceID, 1)
+	StatMgr.NorthBoundPort, _ = Ports.(map[uint32]*NniPort)
+	NumPonPorts := Dev.resourceMgr.DevInfo.GetPonPorts()
+	Ports, _ = InitPorts("pon", Dev.deviceID, NumPonPorts)
+	StatMgr.SouthBoundPort, _ = Ports.(map[uint32]*PonPort)
+	return &StatMgr
+}
+
+// InitPorts collects the port objects:  nni and pon that are updated with the current data from the OLT
+func InitPorts(Intftype string, DeviceID string, numOfPorts uint32) (interface{}, error) {
+	/*
+	     This method collects the port objects:  nni and pon that are updated with the
+	     current data from the OLT
+
+	     Both the northbound (nni) and southbound ports are indexed by the interface id (intf_id)
+	     and NOT the port number. When the port object is instantiated it will contain the intf_id and
+	     port_no values
+
+	   :param type:
+	   :return:
+	*/
+	var i uint32
+	if Intftype == "nni" {
+		NniPorts := make(map[uint32]*NniPort)
+		for i = 0; i < numOfPorts; i++ {
+			Port := BuildPortObject(i, "nni", DeviceID).(*NniPort)
+			NniPorts[Port.IntfID] = Port
+		}
+		return NniPorts, nil
+	} else if Intftype == "pon" {
+		PONPorts := make(map[uint32]*PonPort)
+		for i = 0; i < numOfPorts; i++ {
+			PONPort := BuildPortObject(i, "pon", DeviceID).(*PonPort)
+			PONPorts[PortNoToIntfID(PONPort.IntfID, voltha.Port_PON_OLT)] = PONPort
+		}
+		return PONPorts, nil
+	} else {
+		log.Errorf("Invalid type of interface %s", Intftype)
+		return nil, NewErrInvalidValue(log.Fields{"interface-type": Intftype}, nil)
+	}
+}
+
+// BuildPortObject allows for updating north and southbound ports, newly discovered ports, and devices
+func BuildPortObject(PortNum uint32, IntfType string, DeviceID string) interface{} {
+	/*
+	   Separate method to allow for updating north and southbound ports
+	   newly discovered ports and devices
+
+	   :param port_num:
+	   :param type:
+	   :return:
+	*/
+
+	//This builds a port object which is added to the
+	//appropriate northbound or southbound values
+	if IntfType == "nni" {
+		IntfID := IntfIDToPortNo(PortNum, voltha.Port_ETHERNET_NNI)
+		nniID := PortNoToIntfID(IntfID, voltha.Port_ETHERNET_NNI)
+		log.Debugf("NniID %v", nniID)
+		return NewNniPort(PortNum, nniID)
+	} else if IntfType == "pon" {
+		// PON ports require a different configuration
+		//  intf_id and pon_id are currently equal.
+		IntfID := IntfIDToPortNo(PortNum, voltha.Port_PON_OLT)
+		PONID := PortNoToIntfID(IntfID, voltha.Port_PON_OLT)
+		log.Debugf("PonID %v", PONID)
+		return NewPONPort(PONID, DeviceID, IntfID, PortNum)
+	} else {
+		log.Errorf("Invalid type of interface %s", IntfType)
+		return nil
+	}
+}
+
+// collectNNIMetrics will collect the nni port metrics
+func (StatMgr *OpenOltStatisticsMgr) collectNNIMetrics(nniID uint32) map[string]float32 {
+
+	nnival := make(map[string]float32)
+	mutex.Lock()
+	cm := StatMgr.Device.portStats.NorthBoundPort[nniID]
+	mutex.Unlock()
+	metricName := StatMgr.Device.metrics.GetSubscriberMetrics()
+
+	if metricName != nil && len(metricName) > 0 {
+		for mName := range metricName {
+			switch mName {
+			case "rx_bytes":
+				nnival["RxBytes"] = float32(cm.RxBytes)
+			case "rx_packets":
+				nnival["RxPackets"] = float32(cm.RxPackets)
+			case "rx_ucast_packets":
+				nnival["RxUcastPackets"] = float32(cm.RxUcastPackets)
+			case "rx_mcast_packets":
+				nnival["RxMcastPackets"] = float32(cm.RxMcastPackets)
+			case "rx_bcast_packets":
+				nnival["RxBcastPackets"] = float32(cm.RxBcastPackets)
+			case "tx_bytes":
+				nnival["TxBytes"] = float32(cm.TxBytes)
+			case "tx_packets":
+				nnival["TxPackets"] = float32(cm.TxPackets)
+			case "tx_mcast_packets":
+				nnival["TxMcastPackets"] = float32(cm.TxMcastPackets)
+			case "tx_bcast_packets":
+				nnival["TxBcastPackets"] = float32(cm.TxBcastPackets)
+			}
+		}
+	}
+	return nnival
+}
+
+// collectPONMetrics will collect the pon port metrics
+func (StatMgr *OpenOltStatisticsMgr) collectPONMetrics(pID uint32) map[string]float32 {
+
+	ponval := make(map[string]float32)
+	mutex.Lock()
+	cm := StatMgr.Device.portStats.SouthBoundPort[pID]
+	mutex.Unlock()
+	metricName := StatMgr.Device.metrics.GetSubscriberMetrics()
+
+	if metricName != nil && len(metricName) > 0 {
+		for mName := range metricName {
+			switch mName {
+			case "rx_bytes":
+				ponval["RxBytes"] = float32(cm.RxBytes)
+			case "rx_packets":
+				ponval["RxPackets"] = float32(cm.RxPackets)
+			// these are not supported in OpenOlt Agent now
+			// will return zero until supported
+			case "rx_ucast_packets":
+				ponval["RxUcastPackets"] = float32(cm.RxUcastPackets)
+			case "rx_mcast_packets":
+				ponval["RxMcastPackets"] = float32(cm.RxMcastPackets)
+			case "rx_bcast_packets":
+				ponval["RxBcastPackets"] = float32(cm.RxBcastPackets)
+			// End will return zero until supported
+			case "tx_bytes":
+				ponval["TxBytes"] = float32(cm.TxBytes)
+			case "tx_packets":
+				ponval["TxPackets"] = float32(cm.TxPackets)
+			// these are not supported in OpenOlt Agent now
+			// will return zero until supported
+			case "tx_ucast_packets":
+				ponval["TxUcastPackets"] = float32(cm.TxUcastPackets)
+			case "tx_mcast_packets":
+				ponval["TxMcastPackets"] = float32(cm.TxMcastPackets)
+			case "tx_bcast_packets":
+				ponval["TxBcastPackets"] = float32(cm.TxBcastPackets)
+			}
+		}
+	}
+	return ponval
+}
+
+// publishMatrics will publish the pon port metrics
+func (StatMgr OpenOltStatisticsMgr) publishMetrics(portType string, val map[string]float32, portnum uint32, context map[string]string, devID string) {
+	log.Debugf("Post-%v %v", portType, val)
+
+	var metricInfo voltha.MetricInformation
+	var ke voltha.KpiEvent2
+	var volthaEventSubCatgry voltha.EventSubCategory_Types
+
+	if portType == "NNIStats" {
+		volthaEventSubCatgry = voltha.EventSubCategory_NNI
+	} else {
+		volthaEventSubCatgry = voltha.EventSubCategory_PON
+	}
+
+	raisedTs := time.Now().UnixNano()
+	mmd := voltha.MetricMetaData{
+		Title:    portType,
+		Ts:       float64(raisedTs),
+		Context:  context,
+		DeviceId: devID,
+	}
+
+	metricInfo.Metadata = &mmd
+	metricInfo.Metrics = val
+
+	ke.SliceData = []*voltha.MetricInformation{&metricInfo}
+	ke.Type = voltha.KpiEventType_slice
+	ke.Ts = float64(time.Now().UnixNano())
+
+	if err := StatMgr.Device.EventProxy.SendKpiEvent("STATS_EVENT", &ke, voltha.EventCategory_EQUIPMENT, volthaEventSubCatgry, raisedTs); err != nil {
+		log.Errorw("Failed to send Pon stats", log.Fields{"err": err})
+	}
+
+}
+
+// PortStatisticsIndication handles the port statistics indication
+func (StatMgr *OpenOltStatisticsMgr) PortStatisticsIndication(PortStats *openolt.PortStatistics, NumPonPorts uint32) {
+	log.Debugf("port-stats-collected %v", PortStats)
+	StatMgr.PortsStatisticsKpis(PortStats, NumPonPorts)
+	log.Infow("Received port stats indication", log.Fields{"PortStats": PortStats})
+	// TODO send stats to core topic to the voltha kafka or a different kafka ?
+}
+
+// FlowStatisticsIndication to be implemented
+func FlowStatisticsIndication(self, FlowStats *openolt.FlowStatistics) {
+	log.Debugf("flow-stats-collected %v", FlowStats)
+	//TODO send to kafka ?
+}
+
+// PortsStatisticsKpis map the port stats values into a dictionary, creates the kpiEvent and then publish to Kafka
+func (StatMgr *OpenOltStatisticsMgr) PortsStatisticsKpis(PortStats *openolt.PortStatistics, NumPonPorts uint32) {
+
+	/*map the port stats values into a dictionary
+	  Create a kpoEvent and publish to Kafka
+
+	  :param port_stats:
+	  :return:
+	*/
+	//var err error
+	IntfID := PortStats.IntfId
+
+	if (IntfIDToPortNo(1, voltha.Port_ETHERNET_NNI) < IntfID) &&
+		(IntfID < IntfIDToPortNo(4, voltha.Port_ETHERNET_NNI)) {
+		/*
+		   for this release we are only interested in the first NNI for
+		   Northbound.
+		   we are not using the other 3
+		*/
+		return
+	} else if IntfIDToPortNo(0, voltha.Port_ETHERNET_NNI) == IntfID {
+
+		var portNNIStat NniPort
+		portNNIStat.IntfID = IntfID
+		portNNIStat.PortNum = uint32(0)
+		portNNIStat.RxBytes = PortStats.RxBytes
+		portNNIStat.RxPackets = PortStats.RxPackets
+		portNNIStat.RxUcastPackets = PortStats.RxUcastPackets
+		portNNIStat.RxMcastPackets = PortStats.RxMcastPackets
+		portNNIStat.RxBcastPackets = PortStats.RxBcastPackets
+		portNNIStat.TxBytes = PortStats.TxBytes
+		portNNIStat.TxPackets = PortStats.TxPackets
+		portNNIStat.TxUcastPackets = PortStats.TxUcastPackets
+		portNNIStat.TxMcastPackets = PortStats.TxMcastPackets
+		portNNIStat.TxBcastPackets = PortStats.TxBcastPackets
+		mutex.Lock()
+		StatMgr.NorthBoundPort[0] = &portNNIStat
+		mutex.Unlock()
+		log.Debugf("Received-NNI-Stats: %v", StatMgr.NorthBoundPort)
+	}
+	for i := uint32(0); i < NumPonPorts; i++ {
+
+		if IntfIDToPortNo(i, voltha.Port_PON_OLT) == IntfID {
+			var portPonStat PonPort
+			portPonStat.IntfID = IntfID
+			portPonStat.PortNum = i
+			portPonStat.PONID = i
+			portPonStat.RxBytes = PortStats.RxBytes
+			portPonStat.RxPackets = PortStats.RxPackets
+			portPonStat.RxUcastPackets = PortStats.RxUcastPackets
+			portPonStat.RxMcastPackets = PortStats.RxMcastPackets
+			portPonStat.RxBcastPackets = PortStats.RxBcastPackets
+			portPonStat.TxBytes = PortStats.TxBytes
+			portPonStat.TxPackets = PortStats.TxPackets
+			portPonStat.TxUcastPackets = PortStats.TxUcastPackets
+			portPonStat.TxMcastPackets = PortStats.TxMcastPackets
+			portPonStat.TxBcastPackets = PortStats.TxBcastPackets
+			mutex.Lock()
+			StatMgr.SouthBoundPort[i] = &portPonStat
+			mutex.Unlock()
+			log.Debugf("Received-PON-Stats-for-Port %v : %v", i, StatMgr.SouthBoundPort[i])
+		}
+	}
+
+	/*
+	   Based upon the intf_id map to an nni port or a pon port
+	   the intf_id is the key to the north or south bound collections
+
+	   Based upon the intf_id the port object (nni_port or pon_port) will
+	   have its data attr. updated by the current dataset collected.
+
+	   For prefixing the rule is currently to use the port number and not the intf_id
+	*/
+	//FIXME : Just use first NNI for now
+	/* TODO should the data be marshaled before sending it ?
+	   if IntfID == IntfIdToPortNo(0, voltha.Port_ETHERNET_NNI) {
+	       //NNI port (just the first one)
+	       err = UpdatePortObjectKpiData(StatMgr.NorthBoundPorts[PortStats.IntfID], PMData)
+	   } else {
+	       //PON ports
+	       err = UpdatePortObjectKpiData(SouthboundPorts[PortStats.IntfID], PMData)
+	   }
+	   if (err != nil) {
+	       log.Error("Error publishing statistics data")
+	   }
+	*/
+
+}
diff --git a/internal/pkg/core/statsmanager_test.go b/internal/pkg/core/statsmanager_test.go
new file mode 100644
index 0000000..6ea2487
--- /dev/null
+++ b/internal/pkg/core/statsmanager_test.go
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"reflect"
+	"testing"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-protos/v3/go/openolt"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+func init() {
+	_, _ = log.AddPackage(log.JSON, log.DebugLevel, nil)
+}
+func TestOpenOltStatisticsMgr_PortStatisticsIndication(t *testing.T) {
+	device := &voltha.Device{
+		Id:       "olt",
+		Root:     true,
+		ParentId: "logical_device",
+		Ports: []*voltha.Port{
+			{PortNo: 1, Label: "pon", Type: voltha.Port_ETHERNET_UNI},
+			{PortNo: 2, Label: "nni", Type: voltha.Port_ETHERNET_NNI},
+		},
+		ProxyAddress: &voltha.Device_ProxyAddress{
+			DeviceId:       "olt",
+			DeviceType:     "onu",
+			ChannelId:      1,
+			ChannelGroupId: 1,
+		},
+		ConnectStatus: 1,
+	}
+	dh := newMockDeviceHandler()
+	dh.device = device
+	StatMgr := NewOpenOltStatsMgr(dh)
+
+	type args struct {
+		PortStats *openolt.PortStatistics
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+		{"PortStatisticsIndication", args{PortStats: &openolt.PortStatistics{}}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			StatMgr.PortStatisticsIndication(tt.args.PortStats, 16)
+		})
+	}
+}
+
+func TestOpenOltStatisticsMgr_publishMetrics(t *testing.T) {
+	type fields struct {
+		Device         *DeviceHandler
+		NorthBoundPort map[uint32]*NniPort
+		SouthBoundPort map[uint32]*PonPort
+	}
+	type args struct {
+		portType string
+		val      map[string]float32
+		portnum  uint32
+		context  map[string]string
+	}
+	ctx := map[string]string{}
+	ctx["deviceID"] = "Test"
+	ponmap := map[uint32]*PonPort{}
+	ponmap[0] = &PonPort{
+		PONID:          0,
+		DeviceID:       "onu1",
+		IntfID:         0,
+		PortNum:        0,
+		PortID:         0,
+		Label:          "",
+		ONUs:           nil,
+		ONUsByID:       nil,
+		RxBytes:        0,
+		RxPackets:      0,
+		RxUcastPackets: 0,
+		RxMcastPackets: 0,
+		RxBcastPackets: 0,
+		RxErrorPackets: 0,
+		TxBytes:        0,
+		TxPackets:      0,
+		TxUcastPackets: 0,
+		TxMcastPackets: 0,
+		TxBcastPackets: 0,
+		TxErrorPackets: 0,
+		RxCrcErrors:    0,
+		BipErrors:      0,
+	}
+	nnimap := map[uint32]*NniPort{}
+	nnimap[0] = &NniPort{
+		PortNum:        0,
+		Name:           "olt1",
+		LogicalPort:    0,
+		IntfID:         0,
+		RxBytes:        0,
+		RxPackets:      0,
+		RxUcastPackets: 0,
+		RxMcastPackets: uint64(1111),
+		RxBcastPackets: 0,
+		RxErrorPackets: 0,
+		TxBytes:        0,
+		TxPackets:      0,
+		TxUcastPackets: 0,
+		TxMcastPackets: 0,
+		TxBcastPackets: 0,
+		TxErrorPackets: 0,
+		RxCrcErrors:    0,
+		BipErrors:      0,
+	}
+	pval := make(map[string]float32)
+	pval["rx_bytes"] = float32(111)
+	nval := make(map[string]float32)
+	nval["rx_bytes"] = float32(111)
+	dhandlerNNI := newMockDeviceHandler()
+	dhandlerNNI.portStats = &OpenOltStatisticsMgr{Device: nil, SouthBoundPort: nil, NorthBoundPort: nnimap}
+	dhandlerPON := newMockDeviceHandler()
+	dhandlerPON.portStats = &OpenOltStatisticsMgr{Device: nil, SouthBoundPort: ponmap, NorthBoundPort: nil}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+	}{
+		{
+			name: "PublishNNIMetrics-1",
+			fields: fields{
+				Device:         dhandlerNNI,
+				NorthBoundPort: nnimap,
+				SouthBoundPort: nil,
+			},
+			args: args{
+				portType: "NNIStats",
+				val:      nval,
+				portnum:  0,
+				context:  ctx,
+			},
+		},
+		{
+			name: "PublishPONMetrics-1",
+			fields: fields{
+				Device:         dhandlerPON,
+				NorthBoundPort: nil,
+				SouthBoundPort: ponmap,
+			},
+			args: args{
+				portType: "PONStats",
+				val:      pval,
+				portnum:  0,
+				context:  ctx,
+			},
+		},
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			StatMgr := &OpenOltStatisticsMgr{
+				Device:         tt.fields.Device,
+				NorthBoundPort: tt.fields.NorthBoundPort,
+				SouthBoundPort: tt.fields.SouthBoundPort,
+			}
+			StatMgr.publishMetrics(tt.args.portType, tt.args.val, tt.args.portnum, tt.args.context, "onu1")
+
+		})
+	}
+}
+
+func TestOpenOltStatisticsMgr_collectNNIMetrics(t *testing.T) {
+	type fields struct {
+		Device         *DeviceHandler
+		NorthBoundPort map[uint32]*NniPort
+		SouthBoundPort map[uint32]*PonPort
+	}
+	type args struct {
+		nniID uint32
+	}
+	dhandler := newMockDeviceHandler()
+	pmconfig := make(map[string]*voltha.PmConfig)
+	pmconfig["rx_bytes"] = &voltha.PmConfig{Name: "olt"}
+
+	var res map[string]float32
+	nnimap := map[uint32]*NniPort{}
+	nnimap[0] = &NniPort{Name: "olt"}
+	nnimap[1] = &NniPort{Name: "olt"}
+	dh := &DeviceHandler{portStats: &OpenOltStatisticsMgr{Device: dhandler, SouthBoundPort: nil, NorthBoundPort: nnimap}}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+		want   map[string]float32
+	}{
+		{"CollectNNIMetrics-1", fields{
+			Device:         dh,
+			NorthBoundPort: nnimap,
+			SouthBoundPort: nil,
+		}, args{0}, res},
+		{"CollectNNIMetrics-2", fields{
+			Device:         dh,
+			NorthBoundPort: nnimap,
+			SouthBoundPort: nil,
+		}, args{1}, res},
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			StatMgr := &OpenOltStatisticsMgr{
+				Device:         tt.fields.Device,
+				NorthBoundPort: tt.fields.NorthBoundPort,
+				SouthBoundPort: tt.fields.SouthBoundPort,
+			}
+			got := StatMgr.collectNNIMetrics(tt.args.nniID)
+			if reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+				t.Errorf("collectNNIMetrics() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestOpenOltStatisticsMgr_collectPONMetrics(t *testing.T) {
+	type fields struct {
+		Device         *DeviceHandler
+		NorthBoundPort map[uint32]*NniPort
+		SouthBoundPort map[uint32]*PonPort
+	}
+	type args struct {
+		pID uint32
+	}
+	dhandler := newMockDeviceHandler()
+	pmconfig := make(map[string]*voltha.PmConfig)
+	pmconfig["rx_bytes"] = &voltha.PmConfig{Name: "olt"}
+
+	var res map[string]float32
+	ponmap := map[uint32]*PonPort{}
+	ponmap[0] = &PonPort{DeviceID: "olt"}
+	ponmap[1] = &PonPort{DeviceID: "olt"}
+	dh := &DeviceHandler{portStats: &OpenOltStatisticsMgr{Device: dhandler, SouthBoundPort: ponmap, NorthBoundPort: nil}}
+
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+		want   map[string]float32
+	}{
+		{"CollectPONMetrics-1", fields{
+			Device:         dh,
+			NorthBoundPort: nil,
+			SouthBoundPort: ponmap,
+		}, args{0}, res},
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			StatMgr := &OpenOltStatisticsMgr{
+				Device:         tt.fields.Device,
+				NorthBoundPort: tt.fields.NorthBoundPort,
+				SouthBoundPort: tt.fields.SouthBoundPort,
+			}
+			got := StatMgr.collectPONMetrics(tt.args.pID)
+			if reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+				t.Errorf("collectPONMetrics() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}