[VOL-3069]Pass Context in methods which are performing logging and need the context
Change-Id: Ie84f9e240aa4f47d0046acaac0d82d21b17252e5
diff --git a/internal/pkg/config/common.go b/internal/pkg/config/common.go
index bfa8e03..1b05963 100644
--- a/internal/pkg/config/common.go
+++ b/internal/pkg/config/common.go
@@ -21,12 +21,12 @@
"github.com/opencord/voltha-lib-go/v3/pkg/log"
)
-var logger log.Logger
+var logger log.CLogger
func init() {
// Setup this package so that it's log level can be modified at run time
var err error
- logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
+ logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
if err != nil {
panic(err)
}
diff --git a/internal/pkg/core/common.go b/internal/pkg/core/common.go
index 9370c0e..4d46870 100644
--- a/internal/pkg/core/common.go
+++ b/internal/pkg/core/common.go
@@ -21,12 +21,12 @@
"github.com/opencord/voltha-lib-go/v3/pkg/log"
)
-var logger log.Logger
+var logger log.CLogger
func init() {
// Setup this package so that it's log level can be modified at run time
var err error
- logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "core"})
+ logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "core"})
if err != nil {
panic(err)
}
diff --git a/internal/pkg/core/device_handler.go b/internal/pkg/core/device_handler.go
index 91c9ced..fe30de5 100644
--- a/internal/pkg/core/device_handler.go
+++ b/internal/pkg/core/device_handler.go
@@ -169,18 +169,18 @@
func (dh *DeviceHandler) start(ctx context.Context) {
dh.lockDevice.Lock()
defer dh.lockDevice.Unlock()
- logger.Debugw("starting-device-agent", log.Fields{"device": dh.device})
+ logger.Debugw(ctx, "starting-device-agent", log.Fields{"device": dh.device})
// Add the initial device to the local model
- logger.Debug("device-agent-started")
+ logger.Debug(ctx, "device-agent-started")
}
// stop stops the device dh. Not much to do for now
func (dh *DeviceHandler) stop(ctx context.Context) {
dh.lockDevice.Lock()
defer dh.lockDevice.Unlock()
- logger.Debug("stopping-device-agent")
+ logger.Debug(ctx, "stopping-device-agent")
dh.exitChannel <- 1
- logger.Debug("device-agent-stopped")
+ logger.Debug(ctx, "device-agent-stopped")
}
func macifyIP(ip net.IP) string {
@@ -194,24 +194,24 @@
return ""
}
-func generateMacFromHost(host string) (string, error) {
+func generateMacFromHost(ctx context.Context, host string) (string, error) {
var genmac string
var addr net.IP
var ips []string
var err error
- logger.Debugw("generating-mac-from-host", log.Fields{"host": host})
+ logger.Debugw(ctx, "generating-mac-from-host", log.Fields{"host": host})
if addr = net.ParseIP(host); addr == nil {
- logger.Debugw("looking-up-hostname", log.Fields{"host": host})
+ logger.Debugw(ctx, "looking-up-hostname", log.Fields{"host": host})
if ips, err = net.LookupHost(host); err == nil {
- logger.Debugw("dns-result-ips", log.Fields{"ips": ips})
+ logger.Debugw(ctx, "dns-result-ips", log.Fields{"ips": ips})
if addr = net.ParseIP(ips[0]); addr == nil {
return "", olterrors.NewErrInvalidValue(log.Fields{"ip": ips[0]}, nil)
}
genmac = macifyIP(addr)
- logger.Debugw("using-ip-as-mac",
+ logger.Debugw(ctx, "using-ip-as-mac",
log.Fields{"host": ips[0],
"mac": genmac})
return genmac, nil
@@ -220,7 +220,7 @@
}
genmac = macifyIP(addr)
- logger.Debugw("using-ip-as-mac",
+ logger.Debugw(ctx, "using-ip-as-mac",
log.Fields{"host": host,
"mac": genmac})
return genmac, nil
@@ -253,7 +253,7 @@
return "", olterrors.NewErrInvalidValue(log.Fields{"port-type": portType}, nil)
}
-func (dh *DeviceHandler) addPort(intfID uint32, portType voltha.Port_PortType, state string) error {
+func (dh *DeviceHandler) addPort(ctx context.Context, intfID uint32, portType voltha.Port_PortType, state string) error {
var operStatus common.OperStatus_Types
if state == "up" {
operStatus = voltha.OperStatus_ACTIVE
@@ -276,7 +276,7 @@
if device.Ports != nil {
for _, dPort := range device.Ports {
if dPort.Type == portType && dPort.PortNo == portNum {
- logger.Debug("port-already-exists-updating-oper-status-of-port")
+ logger.Debug(ctx, "port-already-exists-updating-oper-status-of-port")
if err := dh.coreProxy.PortStateUpdate(context.TODO(), dh.device.Id, portType, portNum, operStatus); err != nil {
return olterrors.NewErrAdapter("failed-to-update-port-state", log.Fields{
"device-id": dh.device.Id,
@@ -307,23 +307,23 @@
MaxSpeed: uint32(of.OfpPortFeatures_OFPPF_1GB_FD),
},
}
- logger.Debugw("sending-port-update-to-core", log.Fields{"port": port})
+ logger.Debugw(ctx, "sending-port-update-to-core", log.Fields{"port": port})
// Synchronous call to update device - this method is run in its own go routine
if err := dh.coreProxy.PortCreated(context.TODO(), dh.device.Id, port); err != nil {
return olterrors.NewErrAdapter("error-creating-port", log.Fields{
"device-id": dh.device.Id,
"port-type": portType}, err)
}
- go dh.updateLocalDevice()
+ go dh.updateLocalDevice(ctx)
return nil
}
-func (dh *DeviceHandler) updateLocalDevice() error {
+func (dh *DeviceHandler) updateLocalDevice(ctx context.Context) error {
dh.lockDevice.Lock()
defer dh.lockDevice.Unlock()
device, err := dh.coreProxy.GetDevice(context.TODO(), dh.device.Id, dh.device.Id)
if err != nil || device == nil {
- logger.Errorf("device", log.Fields{"device-id": dh.device.Id}, err)
+ logger.Errorf(ctx, "device", log.Fields{"device-id": dh.device.Id}, err)
return olterrors.NewErrNotFound("device", log.Fields{"device-id": dh.device.Id}, err)
}
dh.device = device
@@ -333,7 +333,7 @@
// nolint: gocyclo
// readIndications to read the indications from the OLT device
func (dh *DeviceHandler) readIndications(ctx context.Context) error {
- defer logger.Debugw("indications-ended", log.Fields{"device-id": dh.device.Id})
+ defer logger.Debugw(ctx, "indications-ended", log.Fields{"device-id": dh.device.Id})
defer func() {
dh.lockDevice.Lock()
dh.isReadIndicationRoutineActive = false
@@ -366,12 +366,12 @@
for {
select {
case <-dh.stopIndications:
- logger.Debugw("stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
+ logger.Debugw(ctx, "stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
break Loop
default:
indication, err := indications.Recv()
if err == io.EOF {
- logger.Infow("eof-for-indications",
+ logger.Infow(ctx, "eof-for-indications",
log.Fields{"err": err,
"device-id": dh.device.Id})
// Use an exponential back off to prevent getting into a tight loop
@@ -379,7 +379,7 @@
if duration == backoff.Stop {
// If we reach a maximum then warn and reset the backoff
// timer and keep attempting.
- logger.Warnw("maximum-indication-backoff-reached--resetting-backoff-timer",
+ logger.Warnw(ctx, "maximum-indication-backoff-reached--resetting-backoff-timer",
log.Fields{"max-indication-backoff": indicationBackoff.MaxElapsedTime,
"device-id": dh.device.Id})
indicationBackoff.Reset()
@@ -390,7 +390,7 @@
backoff := time.NewTimer(indicationBackoff.NextBackOff())
select {
case <-dh.stopIndications:
- logger.Debugw("stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
+ logger.Debugw(ctx, "stopping-collecting-indications-for-olt", log.Fields{"deviceID:": dh.device.Id})
if !backoff.Stop() {
<-backoff.C
}
@@ -404,18 +404,18 @@
continue
}
if err != nil {
- logger.Errorw("read-indication-error",
+ logger.Errorw(ctx, "read-indication-error",
log.Fields{"err": err,
"device-id": dh.device.Id})
if device.AdminState == voltha.AdminState_DELETED {
- logger.Debug("device-deleted--stopping-the-read-indication-thread")
+ logger.Debug(ctx, "device-deleted--stopping-the-read-indication-thread")
break Loop
}
// Close the stream, and re-initialize it
if err = indications.CloseSend(); err != nil {
// Ok to ignore here, because we landed here due to a problem on the stream
// In all probability, the closeSend call may fail
- logger.Debugw("error-closing-send stream--error-ignored",
+ logger.Debugw(ctx, "error-closing-send stream--error-ignored",
log.Fields{"err": err,
"device-id": dh.device.Id})
}
@@ -429,7 +429,7 @@
indicationBackoff.Reset()
// When OLT is admin down, ignore all indications.
if device.AdminState == voltha.AdminState_DISABLED && !isIndicationAllowedDuringOltAdminDown(indication) {
- logger.Debugw("olt-is-admin-down, ignore indication",
+ logger.Debugw(ctx, "olt-is-admin-down, ignore indication",
log.Fields{"indication": indication,
"device-id": dh.device.Id})
continue
@@ -475,7 +475,7 @@
dh.transitionMap.Handle(ctx, DeviceDownInd)
}
// Send or clear Alarm
- if err := dh.eventMgr.oltUpDownIndication(oltIndication, dh.device.Id, raisedTs); err != nil {
+ if err := dh.eventMgr.oltUpDownIndication(ctx, oltIndication, dh.device.Id, raisedTs); err != nil {
return olterrors.NewErrAdapter("failed-indication", log.Fields{
"device_id": dh.device.Id,
"indication": oltIndication,
@@ -495,16 +495,16 @@
case *oop.Indication_IntfInd:
intfInd := indication.GetIntfInd()
go func() {
- if err := dh.addPort(intfInd.GetIntfId(), voltha.Port_PON_OLT, intfInd.GetOperState()); err != nil {
+ if err := dh.addPort(ctx, intfInd.GetIntfId(), voltha.Port_PON_OLT, intfInd.GetOperState()); err != nil {
olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface", "device-id": dh.device.Id}, err).Log()
}
}()
- logger.Infow("received-interface-indication", log.Fields{"InterfaceInd": intfInd, "device-id": dh.device.Id})
+ logger.Infow(ctx, "received-interface-indication", log.Fields{"InterfaceInd": intfInd, "device-id": dh.device.Id})
case *oop.Indication_IntfOperInd:
intfOperInd := indication.GetIntfOperInd()
if intfOperInd.GetType() == "nni" {
go func() {
- if err := dh.addPort(intfOperInd.GetIntfId(), voltha.Port_ETHERNET_NNI, intfOperInd.GetOperState()); err != nil {
+ if err := dh.addPort(ctx, intfOperInd.GetIntfId(), voltha.Port_ETHERNET_NNI, intfOperInd.GetOperState()); err != nil {
olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-nni", "device-id": dh.device.Id}, err).Log()
}
}()
@@ -513,18 +513,18 @@
// TODO: Check what needs to be handled here for When PON PORT down, ONU will be down
// Handle pon port update
go func() {
- if err := dh.addPort(intfOperInd.GetIntfId(), voltha.Port_PON_OLT, intfOperInd.GetOperState()); err != nil {
+ if err := dh.addPort(ctx, intfOperInd.GetIntfId(), voltha.Port_PON_OLT, intfOperInd.GetOperState()); err != nil {
olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-pon", "device-id": dh.device.Id}, err).Log()
}
}()
- go dh.eventMgr.oltIntfOperIndication(indication.GetIntfOperInd(), dh.device.Id, raisedTs)
+ go dh.eventMgr.oltIntfOperIndication(ctx, indication.GetIntfOperInd(), dh.device.Id, raisedTs)
}
- logger.Infow("received-interface-oper-indication",
+ logger.Infow(ctx, "received-interface-oper-indication",
log.Fields{"interfaceOperInd": intfOperInd,
"device-id": dh.device.Id})
case *oop.Indication_OnuDiscInd:
onuDiscInd := indication.GetOnuDiscInd()
- logger.Infow("received-onu-discovery-indication", log.Fields{"OnuDiscInd": onuDiscInd, "device-id": dh.device.Id})
+ logger.Infow(ctx, "received-onu-discovery-indication", log.Fields{"OnuDiscInd": onuDiscInd, "device-id": dh.device.Id})
sn := dh.stringifySerialNumber(onuDiscInd.SerialNumber)
go func() {
if err := dh.onuDiscIndication(ctx, onuDiscInd, sn); err != nil {
@@ -533,23 +533,23 @@
}()
case *oop.Indication_OnuInd:
onuInd := indication.GetOnuInd()
- logger.Infow("received-onu-indication", log.Fields{"OnuInd": onuInd, "device-id": dh.device.Id})
+ logger.Infow(ctx, "received-onu-indication", log.Fields{"OnuInd": onuInd, "device-id": dh.device.Id})
go func() {
- if err := dh.onuIndication(onuInd); err != nil {
+ if err := dh.onuIndication(ctx, onuInd); err != nil {
olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "onu", "device-id": dh.device.Id}, err).Log()
}
}()
case *oop.Indication_OmciInd:
omciInd := indication.GetOmciInd()
- logger.Debugw("received-omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+ logger.Debugw(ctx, "received-omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
go func() {
- if err := dh.omciIndication(omciInd); err != nil {
+ if err := dh.omciIndication(ctx, omciInd); err != nil {
olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "omci", "device-id": dh.device.Id}, err).Log()
}
}()
case *oop.Indication_PktInd:
pktInd := indication.GetPktInd()
- logger.Debugw("received-packet-indication", log.Fields{
+ logger.Debugw(ctx, "received-packet-indication", log.Fields{
"intf-type": pktInd.IntfId,
"intf-id": pktInd.IntfId,
"gem-port-id": pktInd.GemportId,
@@ -558,7 +558,7 @@
})
if logger.V(log.DebugLevel) {
- logger.Debugw("received-packet-indication-packet", log.Fields{
+ logger.Debugw(ctx, "received-packet-indication-packet", log.Fields{
"intf-type": pktInd.IntfId,
"intf-id": pktInd.IntfId,
"gem-port-id": pktInd.GemportId,
@@ -575,21 +575,21 @@
}()
case *oop.Indication_PortStats:
portStats := indication.GetPortStats()
- go dh.portStats.PortStatisticsIndication(portStats, dh.resourceMgr.DevInfo.GetPonPorts())
+ go dh.portStats.PortStatisticsIndication(ctx, portStats, dh.resourceMgr.DevInfo.GetPonPorts())
case *oop.Indication_FlowStats:
flowStats := indication.GetFlowStats()
- logger.Infow("received-flow-stats", log.Fields{"FlowStats": flowStats, "device-id": dh.device.Id})
+ logger.Infow(ctx, "received-flow-stats", log.Fields{"FlowStats": flowStats, "device-id": dh.device.Id})
case *oop.Indication_AlarmInd:
alarmInd := indication.GetAlarmInd()
- logger.Infow("received-alarm-indication", log.Fields{"AlarmInd": alarmInd, "device-id": dh.device.Id})
- go dh.eventMgr.ProcessEvents(alarmInd, dh.device.Id, raisedTs)
+ logger.Infow(ctx, "received-alarm-indication", log.Fields{"AlarmInd": alarmInd, "device-id": dh.device.Id})
+ go dh.eventMgr.ProcessEvents(ctx, alarmInd, dh.device.Id, raisedTs)
}
}
// doStateUp handle the olt up indication and update to voltha core
func (dh *DeviceHandler) doStateUp(ctx context.Context) error {
//starting the stat collector
- go startCollector(dh)
+ go startCollector(ctx, dh)
// Synchronous call to update device state - this method is run in its own go routine
if err := dh.coreProxy.DeviceStateUpdate(ctx, dh.device.Id, voltha.ConnectStatus_REACHABLE,
@@ -603,7 +603,7 @@
func (dh *DeviceHandler) doStateDown(ctx context.Context) error {
dh.lockDevice.Lock()
defer dh.lockDevice.Unlock()
- logger.Debugw("do-state-down-start", log.Fields{"device-id": dh.device.Id})
+ logger.Debugw(ctx, "do-state-down-start", log.Fields{"device-id": dh.device.Id})
device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
if err != nil || device == nil {
@@ -645,7 +645,7 @@
/* Discovered ONUs entries need to be cleared , since after OLT
is up, it starts sending discovery indications again*/
dh.discOnus = sync.Map{}
- logger.Debugw("do-state-down-end", log.Fields{"device-id": device.Id})
+ logger.Debugw(ctx, "do-state-down-end", log.Fields{"device-id": device.Id})
return nil
}
@@ -670,7 +670,7 @@
// doStateConnected get the device info and update to voltha core
func (dh *DeviceHandler) doStateConnected(ctx context.Context) error {
var err error
- logger.Debugw("olt-device-connected", log.Fields{"device-id": dh.device.Id})
+ logger.Debugw(ctx, "olt-device-connected", log.Fields{"device-id": dh.device.Id})
// Case where OLT is disabled and then rebooted.
device, err := dh.coreProxy.GetDevice(ctx, dh.device.Id, dh.device.Id)
@@ -679,7 +679,7 @@
return olterrors.NewErrAdapter("device-fetch-failed", log.Fields{"device-id": dh.device.Id}, err).LogAt(log.ErrorLevel)
}
if device.AdminState == voltha.AdminState_DISABLED {
- logger.Debugln("do-state-connected--device-admin-state-down")
+ logger.Debugln(ctx, "do-state-connected--device-admin-state-down")
cloned := proto.Clone(device).(*voltha.Device)
cloned.ConnectStatus = voltha.ConnectStatus_REACHABLE
@@ -716,8 +716,8 @@
/*TODO: needs to handle error scenarios */
return olterrors.NewErrAdapter("fetch-device-failed", log.Fields{"device-id": dh.device.Id}, err)
}
- dh.populateActivePorts(device)
- if err := dh.disableAdminDownPorts(device); err != nil {
+ dh.populateActivePorts(ctx, device)
+ if err := dh.disableAdminDownPorts(ctx, device); err != nil {
return olterrors.NewErrAdapter("port-status-update-failed", log.Fields{"device": device}, err)
}
@@ -731,16 +731,16 @@
olterrors.NewErrAdapter("read-indications-failure", log.Fields{"device-id": dh.device.Id}, err).Log()
}
}()
- go dh.updateLocalDevice()
+ go dh.updateLocalDevice(ctx)
if device.PmConfigs != nil {
- dh.UpdatePmConfig(device.PmConfigs)
+ dh.UpdatePmConfig(ctx, device.PmConfigs)
}
return nil
}
func (dh *DeviceHandler) initializeDeviceHandlerModules(ctx context.Context) error {
- deviceInfo, err := dh.populateDeviceInfo()
+ deviceInfo, err := dh.populateDeviceInfo(ctx)
if err != nil {
return olterrors.NewErrAdapter("populate-device-info-failed", log.Fields{"device-id": dh.device.Id}, err)
@@ -760,13 +760,13 @@
dh.eventMgr = NewEventMgr(dh.EventProxy, dh)
// Stats config for new device
- dh.portStats = NewOpenOltStatsMgr(dh)
+ dh.portStats = NewOpenOltStatsMgr(ctx, dh)
return nil
}
-func (dh *DeviceHandler) populateDeviceInfo() (*oop.DeviceInfo, error) {
+func (dh *DeviceHandler) populateDeviceInfo(ctx context.Context) (*oop.DeviceInfo, error) {
var err error
var deviceInfo *oop.DeviceInfo
@@ -779,7 +779,7 @@
return nil, olterrors.NewErrInvalidValue(log.Fields{"device": nil}, nil)
}
- logger.Debugw("fetched-device-info", log.Fields{"deviceInfo": deviceInfo, "device-id": dh.device.Id})
+ logger.Debugw(ctx, "fetched-device-info", log.Fields{"deviceInfo": deviceInfo, "device-id": dh.device.Id})
dh.device.Root = true
dh.device.Vendor = deviceInfo.Vendor
dh.device.Model = deviceInfo.Model
@@ -788,13 +788,13 @@
dh.device.FirmwareVersion = deviceInfo.FirmwareVersion
if deviceInfo.DeviceId == "" {
- logger.Warnw("no-device-id-provided-using-host", log.Fields{"hostport": dh.device.GetHostAndPort()})
+ logger.Warnw(ctx, "no-device-id-provided-using-host", log.Fields{"hostport": dh.device.GetHostAndPort()})
host := strings.Split(dh.device.GetHostAndPort(), ":")[0]
- genmac, err := generateMacFromHost(host)
+ genmac, err := generateMacFromHost(ctx, host)
if err != nil {
return nil, olterrors.NewErrAdapter("failed-to-generate-mac-host", log.Fields{"host": host}, err)
}
- logger.Debugw("using-host-for-mac-address", log.Fields{"host": host, "mac": genmac})
+ logger.Debugw(ctx, "using-host-for-mac-address", log.Fields{"host": host, "mac": genmac})
dh.device.MacAddress = genmac
} else {
dh.device.MacAddress = deviceInfo.DeviceId
@@ -808,12 +808,12 @@
return deviceInfo, nil
}
-func startCollector(dh *DeviceHandler) {
- logger.Debugf("starting-collector")
+func startCollector(ctx context.Context, dh *DeviceHandler) {
+ logger.Debugf(ctx, "starting-collector")
for {
select {
case <-dh.stopCollector:
- logger.Debugw("stopping-collector-for-olt", log.Fields{"deviceID:": dh.device.Id})
+ logger.Debugw(ctx, "stopping-collector-for-olt", log.Fields{"deviceID:": dh.device.Id})
return
case <-time.After(time.Duration(dh.metrics.ToPmConfigs().DefaultFreq) * time.Second):
@@ -824,19 +824,19 @@
if port.Type == voltha.Port_ETHERNET_NNI {
intfID := PortNoToIntfID(port.PortNo, voltha.Port_ETHERNET_NNI)
cmnni := dh.portStats.collectNNIMetrics(intfID)
- logger.Debugw("collect-nni-metrics", log.Fields{"metrics": cmnni})
- go dh.portStats.publishMetrics(cmnni, port, dh.device.Id, dh.device.Type)
- logger.Debugw("publish-nni-metrics", log.Fields{"nni-port": port.Label})
+ logger.Debugw(ctx, "collect-nni-metrics", log.Fields{"metrics": cmnni})
+ go dh.portStats.publishMetrics(ctx, cmnni, port, dh.device.Id, dh.device.Type)
+ logger.Debugw(ctx, "publish-nni-metrics", log.Fields{"nni-port": port.Label})
}
// PON Stats
if port.Type == voltha.Port_PON_OLT {
intfID := PortNoToIntfID(port.PortNo, voltha.Port_PON_OLT)
if val, ok := dh.activePorts.Load(intfID); ok && val == true {
cmpon := dh.portStats.collectPONMetrics(intfID)
- logger.Debugw("collect-pon-metrics", log.Fields{"metrics": cmpon})
- go dh.portStats.publishMetrics(cmpon, port, dh.device.Id, dh.device.Type)
+ logger.Debugw(ctx, "collect-pon-metrics", log.Fields{"metrics": cmpon})
+ go dh.portStats.publishMetrics(ctx, cmpon, port, dh.device.Id, dh.device.Type)
}
- logger.Debugw("publish-pon-metrics", log.Fields{"pon-port": port.Label})
+ logger.Debugw(ctx, "publish-pon-metrics", log.Fields{"pon-port": port.Label})
}
}
}
@@ -846,7 +846,7 @@
//AdoptDevice adopts the OLT device
func (dh *DeviceHandler) AdoptDevice(ctx context.Context, device *voltha.Device) {
dh.transitionMap = NewTransitionMap(dh)
- logger.Infow("adopt-device", log.Fields{"device-id": device.Id, "Address": device.GetHostAndPort()})
+ logger.Infow(ctx, "adopt-device", log.Fields{"device-id": device.Id, "Address": device.GetHostAndPort()})
dh.transitionMap.Handle(ctx, DeviceInit)
// Now, set the initial PM configuration for that device
@@ -877,15 +877,15 @@
}, nil
}
-func (dh *DeviceHandler) omciIndication(omciInd *oop.OmciIndication) error {
- logger.Debugw("omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+func (dh *DeviceHandler) omciIndication(ctx context.Context, omciInd *oop.OmciIndication) error {
+ logger.Debugw(ctx, "omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
var deviceType string
var deviceID string
var proxyDeviceID string
transid := extractOmciTransactionID(omciInd.Pkt)
if logger.V(log.DebugLevel) {
- logger.Debugw("recv-omci-msg", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id,
+ logger.Debugw(ctx, "recv-omci-msg", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id,
"omci-transaction-id": transid, "omci-msg": hex.EncodeToString(omciInd.Pkt)})
}
@@ -893,7 +893,7 @@
if onuInCache, ok := dh.onus.Load(onuKey); !ok {
- logger.Debugw("omci-indication-for-a-device-not-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+ logger.Debugw(ctx, "omci-indication-for-a-device-not-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
ponPort := IntfIDToPortNo(omciInd.GetIntfId(), voltha.Port_PON_OLT)
kwargs := make(map[string]interface{})
kwargs["onu_id"] = omciInd.OnuId
@@ -912,7 +912,7 @@
dh.onus.Store(onuKey, NewOnuDevice(deviceID, deviceType, onuDevice.SerialNumber, omciInd.OnuId, omciInd.IntfId, proxyDeviceID, false))
} else {
//found in cache
- logger.Debugw("omci-indication-for-a-device-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
+ logger.Debugw(ctx, "omci-indication-for-a-device-in-cache.", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
deviceType = onuInCache.(*OnuDevice).deviceType
deviceID = onuInCache.(*OnuDevice).deviceID
proxyDeviceID = onuInCache.(*OnuDevice).proxyDeviceID
@@ -934,8 +934,8 @@
//ProcessInterAdapterMessage sends the proxied messages to the target device
// If the proxy address is not found in the unmarshalled message, it first fetches the onu device for which the message
// is meant, and then send the unmarshalled omci message to this onu
-func (dh *DeviceHandler) ProcessInterAdapterMessage(msg *ic.InterAdapterMessage) error {
- logger.Debugw("process-inter-adapter-message", log.Fields{"msgID": msg.Header.Id})
+func (dh *DeviceHandler) ProcessInterAdapterMessage(ctx context.Context, msg *ic.InterAdapterMessage) error {
+ logger.Debugw(ctx, "process-inter-adapter-message", log.Fields{"msgID": msg.Header.Id})
if msg.Header.Type == ic.InterAdapterMessageType_OMCI_REQUEST {
msgID := msg.Header.Id
fromTopic := msg.Header.FromTopic
@@ -943,7 +943,7 @@
toDeviceID := msg.Header.ToDeviceId
proxyDeviceID := msg.Header.ProxyDeviceId
- logger.Debugw("omci-request-message-header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+ logger.Debugw(ctx, "omci-request-message-header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
msgBody := msg.GetBody()
@@ -959,15 +959,15 @@
"device-id": dh.device.Id,
"onu-device-id": toDeviceID}, err)
}
- logger.Debugw("device-retrieved-from-core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
- if err := dh.sendProxiedMessage(onuDevice, omciMsg); err != nil {
+ logger.Debugw(ctx, "device-retrieved-from-core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+ if err := dh.sendProxiedMessage(ctx, onuDevice, omciMsg); err != nil {
return olterrors.NewErrCommunication("send-failed", log.Fields{
"device-id": dh.device.Id,
"onu-device-id": toDeviceID}, err)
}
} else {
- logger.Debugw("proxy-address-found-in-omci-message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
- if err := dh.sendProxiedMessage(nil, omciMsg); err != nil {
+ logger.Debugw(ctx, "proxy-address-found-in-omci-message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+ if err := dh.sendProxiedMessage(ctx, nil, omciMsg); err != nil {
return olterrors.NewErrCommunication("send-failed", log.Fields{
"device-id": dh.device.Id,
"onu-device-id": toDeviceID}, err)
@@ -980,7 +980,7 @@
return nil
}
-func (dh *DeviceHandler) sendProxiedMessage(onuDevice *voltha.Device, omciMsg *ic.InterAdapterOmciMessage) error {
+func (dh *DeviceHandler) sendProxiedMessage(ctx context.Context, onuDevice *voltha.Device, omciMsg *ic.InterAdapterOmciMessage) error {
var intfID uint32
var onuID uint32
var connectStatus common.ConnectStatus_Types
@@ -994,7 +994,7 @@
connectStatus = omciMsg.GetConnectStatus()
}
if connectStatus != voltha.ConnectStatus_REACHABLE {
- logger.Debugw("onu-not-reachable--cannot-send-omci", log.Fields{"intf-id": intfID, "onu-id": onuID})
+ logger.Debugw(ctx, "onu-not-reachable--cannot-send-omci", log.Fields{"intf-id": intfID, "onu-id": onuID})
return olterrors.NewErrCommunication("unreachable", log.Fields{
"intf-id": intfID,
@@ -1011,7 +1011,7 @@
// TODO: Below logging illustrates the "stringify" of the omci Pkt.
// once above is fixed this log line can change to just use hex.EncodeToString(omciMessage.Pkt)
transid := extractOmciTransactionID(omciMsg.Message)
- logger.Debugw("sent-omci-msg", log.Fields{"intf-id": intfID, "onu-id": onuID,
+ logger.Debugw(ctx, "sent-omci-msg", log.Fields{"intf-id": intfID, "onu-id": onuID,
"omciTransactionID": transid, "omciMsg": string(omciMessage.Pkt)})
_, err := dh.Client.OmciMsgOut(context.Background(), omciMessage)
@@ -1025,7 +1025,7 @@
}
func (dh *DeviceHandler) activateONU(ctx context.Context, intfID uint32, onuID int64, serialNum *oop.SerialNumber, serialNumber string) error {
- logger.Debugw("activate-onu", log.Fields{"intf-id": intfID, "onu-id": onuID, "serialNum": serialNum, "serialNumber": serialNumber, "device-id": dh.device.Id})
+ logger.Debugw(ctx, "activate-onu", log.Fields{"intf-id": intfID, "onu-id": onuID, "serialNum": serialNum, "serialNumber": serialNumber, "device-id": dh.device.Id})
if err := dh.flowMgr.UpdateOnuInfo(ctx, intfID, uint32(onuID), serialNumber); err != nil {
return olterrors.NewErrAdapter("onu-activate-failed", log.Fields{"onu": onuID, "intf-id": intfID}, err)
}
@@ -1035,12 +1035,13 @@
if _, err := dh.Client.ActivateOnu(ctx, &Onu); err != nil {
st, _ := status.FromError(err)
if st.Code() == codes.AlreadyExists {
- logger.Debugw("onu-activation-in-progress", log.Fields{"SerialNumber": serialNumber, "onu-id": onuID, "device-id": dh.device.Id})
+ logger.Debugw(ctx, "onu-activation-in-progress", log.Fields{"SerialNumber": serialNumber, "onu-id": onuID, "device-id": dh.device.Id})
+
} else {
return olterrors.NewErrAdapter("onu-activate-failed", log.Fields{"onu": Onu, "device-id": dh.device.Id}, err)
}
} else {
- logger.Infow("activated-onu", log.Fields{"SerialNumber": serialNumber, "device-id": dh.device.Id})
+ logger.Infow(ctx, "activated-onu", log.Fields{"SerialNumber": serialNumber, "device-id": dh.device.Id})
}
return nil
}
@@ -1050,7 +1051,7 @@
channelID := onuDiscInd.GetIntfId()
parentPortNo := IntfIDToPortNo(onuDiscInd.GetIntfId(), voltha.Port_PON_OLT)
- logger.Infow("new-discovery-indication", log.Fields{"sn": sn})
+ logger.Infow(ctx, "new-discovery-indication", log.Fields{"sn": sn})
kwargs := make(map[string]interface{})
if sn != "" {
@@ -1070,7 +1071,7 @@
dh.onus.Range(func(Onukey interface{}, onuInCache interface{}) bool {
if onuInCache.(*OnuDevice).serialNumber == sn && onuInCache.(*OnuDevice).losRaised {
if onuDiscInd.GetIntfId() != onuInCache.(*OnuDevice).intfID {
- logger.Warnw("onu-is-on-a-different-intf-id-now", log.Fields{
+ logger.Warnw(ctx, "onu-is-on-a-different-intf-id-now", log.Fields{
"previousIntfId": onuInCache.(*OnuDevice).intfID,
"currentIntfId": onuDiscInd.GetIntfId()})
// TODO:: Should we need to ignore raising OnuLosClear event
@@ -1079,12 +1080,12 @@
alarmInd.IntfId = onuInCache.(*OnuDevice).intfID
alarmInd.OnuId = onuInCache.(*OnuDevice).onuID
alarmInd.LosStatus = statusCheckOff
- go dh.eventMgr.onuAlarmIndication(&alarmInd, onuInCache.(*OnuDevice).deviceID, raisedTs)
+ go dh.eventMgr.onuAlarmIndication(ctx, &alarmInd, onuInCache.(*OnuDevice).deviceID, raisedTs)
}
return true
})
- logger.Warnw("onu-sn-is-already-being-processed", log.Fields{"sn": sn})
+ logger.Warnw(ctx, "onu-sn-is-already-being-processed", log.Fields{"sn": sn})
return nil
}
@@ -1095,9 +1096,9 @@
onuDevice, err := dh.coreProxy.GetChildDevice(ctx, dh.device.Id, kwargs)
if err != nil {
- logger.Debugw("core-proxy-get-child-device-failed", log.Fields{"parentDevice": dh.device.Id, "err": err, "sn": sn})
+ logger.Debugw(ctx, "core-proxy-get-child-device-failed", log.Fields{"parentDevice": dh.device.Id, "err": err, "sn": sn})
if e, ok := status.FromError(err); ok {
- logger.Debugw("core-proxy-get-child-device-failed-with-code", log.Fields{"errCode": e.Code(), "sn": sn})
+ logger.Debugw(ctx, "core-proxy-get-child-device-failed-with-code", log.Fields{"errCode": e.Code(), "sn": sn})
switch e.Code() {
case codes.Internal:
// this probably means NOT FOUND, so just create a new device
@@ -1112,14 +1113,14 @@
if onuDevice == nil {
// NOTE this should happen a single time, and only if GetChildDevice returns NotFound
- logger.Debugw("creating-new-onu", log.Fields{"sn": sn})
+ logger.Debugw(ctx, "creating-new-onu", log.Fields{"sn": sn})
// we need to create a new ChildDevice
ponintfid := onuDiscInd.GetIntfId()
dh.lockDevice.Lock()
onuID, err = dh.resourceMgr.GetONUID(ctx, ponintfid)
dh.lockDevice.Unlock()
- logger.Infow("creating-new-onu-got-onu-id", log.Fields{"sn": sn, "onuId": onuID})
+ logger.Infow(ctx, "creating-new-onu-got-onu-id", log.Fields{"sn": sn, "onuId": onuID})
if err != nil {
// if we can't create an ID in resource manager,
@@ -1138,8 +1139,8 @@
"pon-intf-id": ponintfid,
"serial-number": sn}, err)
}
- dh.eventMgr.OnuDiscoveryIndication(onuDiscInd, dh.device.Id, onuDevice.Id, onuID, sn, time.Now().UnixNano())
- logger.Infow("onu-child-device-added",
+ dh.eventMgr.OnuDiscoveryIndication(ctx, onuDiscInd, dh.device.Id, onuDevice.Id, onuID, sn, time.Now().UnixNano())
+ logger.Infow(ctx, "onu-child-device-added",
log.Fields{"onuDevice": onuDevice,
"sn": sn,
"onu-id": onuID,
@@ -1150,7 +1151,7 @@
onuID = onuDevice.ProxyAddress.OnuId
//Insert the ONU into cache to use in OnuIndication.
//TODO: Do we need to remove this from the cache on ONU change, or wait for overwritten on next discovery.
- logger.Debugw("onu-discovery-indication-key-create",
+ logger.Debugw(ctx, "onu-discovery-indication-key-create",
log.Fields{"onu-id": onuID,
"intfId": onuDiscInd.GetIntfId(),
"sn": sn})
@@ -1158,7 +1159,7 @@
onuDev := NewOnuDevice(onuDevice.Id, onuDevice.Type, onuDevice.SerialNumber, onuID, onuDiscInd.GetIntfId(), onuDevice.ProxyAddress.DeviceId, false)
dh.onus.Store(onuKey, onuDev)
- logger.Debugw("new-onu-device-discovered",
+ logger.Debugw(ctx, "new-onu-device-discovered",
log.Fields{"onu": onuDev,
"sn": sn})
@@ -1167,7 +1168,7 @@
"device-id": onuDevice.Id,
"serial-number": sn}, err)
}
- logger.Infow("onu-discovered-reachable", log.Fields{"device-id": onuDevice.Id, "sn": sn})
+ logger.Infow(ctx, "onu-discovered-reachable", log.Fields{"device-id": onuDevice.Id, "sn": sn})
if err = dh.activateONU(ctx, onuDiscInd.IntfId, int64(onuID), onuDiscInd.SerialNumber, sn); err != nil {
return olterrors.NewErrAdapter("onu-activation-failed", log.Fields{
"device-id": onuDevice.Id,
@@ -1176,7 +1177,7 @@
return nil
}
-func (dh *DeviceHandler) onuIndication(onuInd *oop.OnuIndication) error {
+func (dh *DeviceHandler) onuIndication(ctx context.Context, onuInd *oop.OnuIndication) error {
serialNumber := dh.stringifySerialNumber(onuInd.SerialNumber)
kwargs := make(map[string]interface{})
@@ -1184,7 +1185,7 @@
var onuDevice *voltha.Device
var err error
foundInCache := false
- logger.Debugw("onu-indication-key-create",
+ logger.Debugw(ctx, "onu-indication-key-create",
log.Fields{"onuId": onuInd.OnuId,
"intfId": onuInd.GetIntfId(),
"device-id": dh.device.Id})
@@ -1217,13 +1218,13 @@
}
if onuDevice.ParentPortNo != ponPort {
- logger.Warnw("onu-is-on-a-different-intf-id-now", log.Fields{
+ logger.Warnw(ctx, "onu-is-on-a-different-intf-id-now", log.Fields{
"previousIntfId": onuDevice.ParentPortNo,
"currentIntfId": ponPort})
}
if onuDevice.ProxyAddress.OnuId != onuInd.OnuId {
- logger.Warnw("onu-id-mismatch-possible-if-voltha-and-olt-rebooted", log.Fields{
+ logger.Warnw(ctx, "onu-id-mismatch-possible-if-voltha-and-olt-rebooted", log.Fields{
"expected-onu-id": onuDevice.ProxyAddress.OnuId,
"received-onu-id": onuInd.OnuId,
"device-id": dh.device.Id})
@@ -1234,15 +1235,15 @@
dh.onus.Store(onuKey, NewOnuDevice(onuDevice.Id, onuDevice.Type, onuDevice.SerialNumber, onuInd.GetOnuId(), onuInd.GetIntfId(), onuDevice.ProxyAddress.DeviceId, false))
}
- if err := dh.updateOnuStates(onuDevice, onuInd); err != nil {
+ if err := dh.updateOnuStates(ctx, onuDevice, onuInd); err != nil {
return olterrors.NewErrCommunication("state-update-failed", errFields, err)
}
return nil
}
-func (dh *DeviceHandler) updateOnuStates(onuDevice *voltha.Device, onuInd *oop.OnuIndication) error {
- ctx := context.TODO()
- logger.Debugw("onu-indication-for-state", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+func (dh *DeviceHandler) updateOnuStates(ctx context.Context, onuDevice *voltha.Device, onuInd *oop.OnuIndication) error {
+ ctx = context.TODO()
+ logger.Debugw(ctx, "onu-indication-for-state", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
if onuInd.AdminState == "down" || onuInd.OperState == "down" {
// The ONU has gone admin_state "down" or oper_state "down" - we expect the ONU to send discovery again
// The ONU admin_state is "up" while "oper_state" is down in cases where ONU activation fails. In this case
@@ -1250,14 +1251,14 @@
dh.discOnus.Delete(onuDevice.SerialNumber)
// Tests have shown that we sometimes get OperState as NOT down even if AdminState is down, forcing it
if onuInd.OperState != "down" {
- logger.Warnw("onu-admin-state-down", log.Fields{"operState": onuInd.OperState})
+ logger.Warnw(ctx, "onu-admin-state-down", log.Fields{"operState": onuInd.OperState})
onuInd.OperState = "down"
}
}
switch onuInd.OperState {
case "down":
- logger.Debugw("sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+ logger.Debugw(ctx, "sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
// TODO NEW CORE do not hardcode adapter name. Handler needs Adapter reference
err := dh.AdapterProxy.SendInterAdapterMessage(ctx, onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
@@ -1269,7 +1270,7 @@
"device-id": onuDevice.Id}, err)
}
case "up":
- logger.Debugw("sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
+ logger.Debugw(ctx, "sending-interadapter-onu-indication", log.Fields{"onuIndication": onuInd, "device-id": onuDevice.Id, "operStatus": onuDevice.OperStatus, "adminStatus": onuDevice.AdminState})
// TODO NEW CORE do not hardcode adapter name. Handler needs Adapter reference
err := dh.AdapterProxy.SendInterAdapterMessage(ctx, onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
@@ -1321,8 +1322,8 @@
}
//GetChildDevice returns the child device for given parent port and onu id
-func (dh *DeviceHandler) GetChildDevice(parentPort, onuID uint32) (*voltha.Device, error) {
- logger.Debugw("getchilddevice",
+func (dh *DeviceHandler) GetChildDevice(ctx context.Context, parentPort, onuID uint32) (*voltha.Device, error) {
+ logger.Debugw(ctx, "getchilddevice",
log.Fields{"pon-port": parentPort,
"onu-id": onuID,
"device-id": dh.device.Id})
@@ -1335,16 +1336,16 @@
"intf-id": parentPort,
"onu-id": onuID}, err)
}
- logger.Debugw("successfully-received-child-device-from-core", log.Fields{"child-device-id": onuDevice.Id, "child-device-sn": onuDevice.SerialNumber})
+ logger.Debugw(ctx, "successfully-received-child-device-from-core", log.Fields{"child-device-id": onuDevice.Id, "child-device-sn": onuDevice.SerialNumber})
return onuDevice, nil
}
// SendPacketInToCore sends packet-in to core
// For this, it calls SendPacketIn of the core-proxy which uses a device specific topic to send the request.
// The adapter handling the device creates a device specific topic
-func (dh *DeviceHandler) SendPacketInToCore(logicalPort uint32, packetPayload []byte) error {
+func (dh *DeviceHandler) SendPacketInToCore(ctx context.Context, logicalPort uint32, packetPayload []byte) error {
if logger.V(log.DebugLevel) {
- logger.Debugw("send-packet-in-to-core", log.Fields{
+ logger.Debugw(ctx, "send-packet-in-to-core", log.Fields{
"port": logicalPort,
"packet": hex.EncodeToString(packetPayload),
"device-id": dh.device.Id,
@@ -1359,7 +1360,7 @@
"packet": hex.EncodeToString(packetPayload)}, err)
}
if logger.V(log.DebugLevel) {
- logger.Debugw("sent-packet-in-to-core-successfully", log.Fields{
+ logger.Debugw(ctx, "sent-packet-in-to-core-successfully", log.Fields{
"packet": hex.EncodeToString(packetPayload),
"device-id": dh.device.Id,
})
@@ -1368,26 +1369,26 @@
}
// AddUniPortToOnu adds the uni port to the onu device
-func (dh *DeviceHandler) AddUniPortToOnu(intfID, onuID, uniPort uint32) {
+func (dh *DeviceHandler) AddUniPortToOnu(ctx context.Context, intfID, onuID, uniPort uint32) {
onuKey := dh.formOnuKey(intfID, onuID)
if onuDevice, ok := dh.onus.Load(onuKey); ok {
// add it to the uniPort map for the onu device
if _, ok = onuDevice.(*OnuDevice).uniPorts[uniPort]; !ok {
onuDevice.(*OnuDevice).uniPorts[uniPort] = struct{}{}
- logger.Debugw("adding-uni-port", log.Fields{"port": uniPort, "intf-id": intfID, "onuId": onuID})
+ logger.Debugw(ctx, "adding-uni-port", log.Fields{"port": uniPort, "intf-id": intfID, "onuId": onuID})
}
}
}
// UpdatePmConfig updates the pm metrics.
-func (dh *DeviceHandler) UpdatePmConfig(pmConfigs *voltha.PmConfigs) {
+func (dh *DeviceHandler) UpdatePmConfig(ctx context.Context, pmConfigs *voltha.PmConfigs) {
- logger.Infow("update-pm-configs", log.Fields{"device-id": dh.device.Id, "pm-configs": pmConfigs})
+ logger.Infow(ctx, "update-pm-configs", log.Fields{"device-id": dh.device.Id, "pm-configs": pmConfigs})
if pmConfigs.DefaultFreq != dh.metrics.ToPmConfigs().DefaultFreq {
dh.metrics.UpdateFrequency(pmConfigs.DefaultFreq)
- logger.Debugf("frequency-updated")
+ logger.Debugf(ctx, "frequency-updated")
}
if pmConfigs.Grouped == false {
@@ -1401,15 +1402,15 @@
//UpdateFlowsIncrementally updates the device flow
func (dh *DeviceHandler) UpdateFlowsIncrementally(ctx context.Context, device *voltha.Device, flows *of.FlowChanges, groups *of.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
- logger.Debugw("received-incremental-flowupdate-in-device-handler", log.Fields{"device-id": device.Id, "flows": flows, "groups": groups, "flowMetadata": flowMetadata})
+ logger.Debugw(ctx, "received-incremental-flowupdate-in-device-handler", log.Fields{"device-id": device.Id, "flows": flows, "groups": groups, "flowMetadata": flowMetadata})
var errorsList []error
if flows != nil {
for _, flow := range flows.ToRemove.Items {
- dh.incrementActiveFlowRemoveCount(flow)
+ dh.incrementActiveFlowRemoveCount(ctx, flow)
- logger.Debugw("removing-flow",
+ logger.Debugw(ctx, "removing-flow",
log.Fields{"device-id": device.Id,
"flowToRemove": flow})
err := dh.flowMgr.RemoveFlow(ctx, flow)
@@ -1417,15 +1418,15 @@
errorsList = append(errorsList, err)
}
- dh.decrementActiveFlowRemoveCount(flow)
+ dh.decrementActiveFlowRemoveCount(ctx, flow)
}
for _, flow := range flows.ToAdd.Items {
- logger.Debugw("adding-flow",
+ logger.Debugw(ctx, "adding-flow",
log.Fields{"device-id": device.Id,
"flowToAdd": flow})
// If there are active Flow Remove in progress for a given subscriber, wait until it completes
- dh.waitForFlowRemoveToFinish(flow)
+ dh.waitForFlowRemoveToFinish(ctx, flow)
err := dh.flowMgr.AddFlow(ctx, flow, flowMetadata)
if err != nil {
errorsList = append(errorsList, err)
@@ -1448,13 +1449,13 @@
}
}
if len(groups.ToRemove.Items) != 0 {
- logger.Debugw("group-delete-operation-not-supported", log.Fields{"device-id": dh.device.Id})
+ logger.Debugw(ctx, "group-delete-operation-not-supported", log.Fields{"device-id": dh.device.Id})
}
}
if len(errorsList) > 0 {
return fmt.Errorf("errors-installing-flows-groups, errors:%v", errorsList)
}
- logger.Debugw("updated-flows-incrementally-successfully", log.Fields{"device-id": dh.device.Id})
+ logger.Debugw(ctx, "updated-flows-incrementally-successfully", log.Fields{"device-id": dh.device.Id})
return nil
}
@@ -1463,7 +1464,7 @@
//Device-Handler Admin-State : down
//Device Port-State: UNKNOWN
//Device Oper-State: UNKNOWN
-func (dh *DeviceHandler) DisableDevice(device *voltha.Device) error {
+func (dh *DeviceHandler) DisableDevice(ctx context.Context, device *voltha.Device) error {
/* On device disable ,admin state update has to be done prior sending request to agent since
the indication thread may processes invalid indications of ONU and OLT*/
if dh.Client != nil {
@@ -1473,7 +1474,7 @@
}
}
}
- logger.Debugw("olt-disabled", log.Fields{"device-id": device.Id})
+ logger.Debugw(ctx, "olt-disabled", log.Fields{"device-id": device.Id})
/* Discovered ONUs entries need to be cleared , since on device disable the child devices goes to
UNREACHABLE state which needs to be configured again*/
@@ -1483,7 +1484,7 @@
//stopping the stats collector
dh.stopCollector <- true
- go dh.notifyChildDevices("unreachable")
+ go dh.notifyChildDevices(ctx, "unreachable")
cloned := proto.Clone(device).(*voltha.Device)
//Update device Admin state
dh.device = cloned
@@ -1497,11 +1498,11 @@
}
}
- logger.Debugw("disable-device-end", log.Fields{"device-id": device.Id})
+ logger.Debugw(ctx, "disable-device-end", log.Fields{"device-id": device.Id})
return nil
}
-func (dh *DeviceHandler) notifyChildDevices(state string) {
+func (dh *DeviceHandler) notifyChildDevices(ctx context.Context, state string) {
// Update onu state as unreachable in onu adapter
onuInd := oop.OnuIndication{}
@@ -1509,14 +1510,14 @@
//get the child device for the parent device
onuDevices, err := dh.coreProxy.GetChildDevices(context.TODO(), dh.device.Id)
if err != nil {
- logger.Errorw("failed-to-get-child-devices-information", log.Fields{"device-id": dh.device.Id, "error": err})
+ logger.Errorw(ctx, "failed-to-get-child-devices-information", log.Fields{"device-id": dh.device.Id, "error": err})
}
if onuDevices != nil {
for _, onuDevice := range onuDevices.Items {
err := dh.AdapterProxy.SendInterAdapterMessage(context.TODO(), &onuInd, ic.InterAdapterMessageType_ONU_IND_REQUEST,
"openolt", onuDevice.Type, onuDevice.Id, onuDevice.ProxyAddress.DeviceId, "")
if err != nil {
- logger.Errorw("failed-to-send-inter-adapter-message", log.Fields{"OnuInd": onuInd,
+ logger.Errorw(ctx, "failed-to-send-inter-adapter-message", log.Fields{"OnuInd": onuInd,
"From Adapter": "openolt", "DeviceType": onuDevice.Type, "device-id": onuDevice.Id})
}
@@ -1530,19 +1531,19 @@
//Device-Handler Admin-State : up
//Device Port-State: ACTIVE
//Device Oper-State: ACTIVE
-func (dh *DeviceHandler) ReenableDevice(device *voltha.Device) error {
+func (dh *DeviceHandler) ReenableDevice(ctx context.Context, device *voltha.Device) error {
if _, err := dh.Client.ReenableOlt(context.Background(), new(oop.Empty)); err != nil {
if e, ok := status.FromError(err); ok && e.Code() == codes.Internal {
return olterrors.NewErrAdapter("olt-reenable-failed", log.Fields{"device-id": dh.device.Id}, err)
}
}
- logger.Debug("olt-reenabled")
+ logger.Debug(ctx, "olt-reenabled")
cloned := proto.Clone(device).(*voltha.Device)
// Update the all ports state on that device to enable
- if err := dh.disableAdminDownPorts(device); err != nil {
+ if err := dh.disableAdminDownPorts(ctx, device); err != nil {
return olterrors.NewErrAdapter("port-status-update-failed-after-olt-reenable", log.Fields{"device": device}, err)
}
//Update the device oper status as ACTIVE
@@ -1556,7 +1557,7 @@
"oper-status": cloned.OperStatus}, err)
}
- logger.Debugw("reenabledevice-end", log.Fields{"device-id": device.Id})
+ logger.Debugw(ctx, "reenabledevice-end", log.Fields{"device-id": device.Id})
return nil
}
@@ -1566,12 +1567,12 @@
var err error
for _, port := range onu.UniPorts {
uniID = UniIDFromPortNum(uint32(port))
- logger.Debugw("clearing-resource-data-for-uni-port", log.Fields{"port": port, "uni-id": uniID})
+ logger.Debugw(ctx, "clearing-resource-data-for-uni-port", log.Fields{"port": port, "uni-id": uniID})
/* Delete tech-profile instance from the KV store */
if err = dh.flowMgr.DeleteTechProfileInstances(ctx, onu.IntfID, onu.OnuID, uniID, onu.SerialNumber); err != nil {
- logger.Debugw("failed-to-remove-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "failed-to-remove-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
}
- logger.Debugw("deleted-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "deleted-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
flowIDs := dh.resourceMgr.GetCurrentFlowIDsForOnu(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID))
for _, flowID := range flowIDs {
dh.resourceMgr.FreeFlowID(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID), flowID)
@@ -1579,21 +1580,21 @@
tpIDList := dh.resourceMgr.GetTechProfileIDForOnu(ctx, onu.IntfID, onu.OnuID, uniID)
for _, tpID := range tpIDList {
if err = dh.resourceMgr.RemoveMeterIDForOnu(ctx, "upstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
- logger.Debugw("failed-to-remove-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "failed-to-remove-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
}
- logger.Debugw("removed-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "removed-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
if err = dh.resourceMgr.RemoveMeterIDForOnu(ctx, "downstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
- logger.Debugw("failed-to-remove-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "failed-to-remove-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
}
- logger.Debugw("removed-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "removed-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
}
dh.resourceMgr.FreePONResourcesForONU(ctx, onu.IntfID, onu.OnuID, uniID)
if err = dh.resourceMgr.RemoveTechProfileIDsForOnu(ctx, onu.IntfID, onu.OnuID, uniID); err != nil {
- logger.Debugw("failed-to-remove-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "failed-to-remove-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
}
- logger.Debugw("removed-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
+ logger.Debugw(ctx, "removed-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
if err = dh.resourceMgr.DelGemPortPktIn(ctx, onu.IntfID, onu.OnuID, uint32(port)); err != nil {
- logger.Debugw("failed-to-remove-gemport-pkt-in", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
+ logger.Debugw(ctx, "failed-to-remove-gemport-pkt-in", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
}
}
return nil
@@ -1611,10 +1612,10 @@
if err != nil {
return olterrors.NewErrPersistence("get", "nni", 0, nil, err)
}
- logger.Debugw("nni-", log.Fields{"nni": nni})
+ logger.Debugw(ctx, "nni-", log.Fields{"nni": nni})
for _, nniIntfID := range nni {
flowIDs := dh.resourceMgr.GetCurrentFlowIDsForOnu(ctx, uint32(nniIntfID), int32(nniOnuID), int32(nniUniID))
- logger.Debugw("current-flow-ids-for-nni", log.Fields{"flow-ids": flowIDs})
+ logger.Debugw(ctx, "current-flow-ids-for-nni", log.Fields{"flow-ids": flowIDs})
for _, flowID := range flowIDs {
dh.resourceMgr.FreeFlowID(ctx, uint32(nniIntfID), -1, -1, uint32(flowID))
}
@@ -1628,13 +1629,13 @@
// DeleteDevice deletes the device instance from openolt handler array. Also clears allocated resource manager resources. Also reboots the OLT hardware!
func (dh *DeviceHandler) DeleteDevice(ctx context.Context, device *voltha.Device) error {
- logger.Debug("function-entry-delete-device")
+ logger.Debug(ctx, "function-entry-delete-device")
/* Clear the KV store data associated with the all the UNI ports
This clears up flow data and also resource map data for various
other pon resources like alloc_id and gemport_id
*/
go dh.cleanupDeviceResources(ctx)
- logger.Debug("removed-device-from-Resource-manager-KV-store")
+ logger.Debug(ctx, "removed-device-from-Resource-manager-KV-store")
// Stop the Stats collector
dh.stopCollector <- true
// stop the heartbeat check routine
@@ -1670,9 +1671,9 @@
}
for _, onu := range onuGemData {
onuID := make([]uint32, 1)
- logger.Debugw("onu-data", log.Fields{"onu": onu})
+ logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
if err = dh.clearUNIData(ctx, &onu); err != nil {
- logger.Errorw("failed-to-clear-data-for-onu", log.Fields{"onu-device": onu})
+ logger.Errorw(ctx, "failed-to-clear-data-for-onu", log.Fields{"onu-device": onu})
}
// Clear flowids for gem cache.
for _, gem := range onu.GemPorts {
@@ -1685,14 +1686,14 @@
onuGemData = nil
err = dh.resourceMgr.DelOnuGemInfoForIntf(ctx, ponPort)
if err != nil {
- logger.Errorw("failed-to-update-onugem-info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
+ logger.Errorw(ctx, "failed-to-update-onugem-info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
}
}
/* Clear the flows from KV store associated with NNI port.
There are mostly trap rules from NNI port (like LLDP)
*/
if err := dh.clearNNIData(ctx); err != nil {
- logger.Errorw("failed-to-clear-data-for-NNI-port", log.Fields{"device-id": dh.device.Id})
+ logger.Errorw(ctx, "failed-to-clear-data-for-NNI-port", log.Fields{"device-id": dh.device.Id})
}
/* Clear the resource pool for each PON port in the background */
@@ -1715,17 +1716,17 @@
}
//RebootDevice reboots the given device
-func (dh *DeviceHandler) RebootDevice(device *voltha.Device) error {
+func (dh *DeviceHandler) RebootDevice(ctx context.Context, device *voltha.Device) error {
if _, err := dh.Client.Reboot(context.Background(), new(oop.Empty)); err != nil {
return olterrors.NewErrAdapter("olt-reboot-failed", log.Fields{"device-id": dh.device.Id}, err)
}
- logger.Debugw("rebooted-device-successfully", log.Fields{"device-id": device.Id})
+ logger.Debugw(ctx, "rebooted-device-successfully", log.Fields{"device-id": device.Id})
return nil
}
func (dh *DeviceHandler) handlePacketIndication(ctx context.Context, packetIn *oop.PacketIndication) error {
if logger.V(log.DebugLevel) {
- logger.Debugw("received-packet-in", log.Fields{
+ logger.Debugw(ctx, "received-packet-in", log.Fields{
"packet-indication": *packetIn,
"device-id": dh.device.Id,
"packet": hex.EncodeToString(packetIn.Pkt),
@@ -1736,12 +1737,13 @@
return olterrors.NewErrNotFound("logical-port", log.Fields{"packet": hex.EncodeToString(packetIn.Pkt)}, err)
}
if logger.V(log.DebugLevel) {
- logger.Debugw("sending-packet-in-to-core", log.Fields{
+ logger.Debugw(ctx, "sending-packet-in-to-core", log.Fields{
"logical-port-num": logicalPortNum,
"device-id": dh.device.Id,
"packet": hex.EncodeToString(packetIn.Pkt),
})
}
+
if err := dh.coreProxy.SendPacketIn(context.TODO(), dh.device.Id, logicalPortNum, packetIn.Pkt); err != nil {
return olterrors.NewErrCommunication("send-packet-in", log.Fields{
"destination": "core",
@@ -1750,8 +1752,9 @@
"packet": hex.EncodeToString(packetIn.Pkt),
}, err)
}
+
if logger.V(log.DebugLevel) {
- logger.Debugw("success-sending-packet-in-to-core!", log.Fields{
+ logger.Debugw(ctx, "success-sending-packet-in-to-core!", log.Fields{
"packet": hex.EncodeToString(packetIn.Pkt),
"device-id": dh.device.Id,
})
@@ -1762,7 +1765,7 @@
// PacketOut sends packet-out from VOLTHA to OLT on the egress port provided
func (dh *DeviceHandler) PacketOut(ctx context.Context, egressPortNo int, packet *of.OfpPacketOut) error {
if logger.V(log.DebugLevel) {
- logger.Debugw("incoming-packet-out", log.Fields{
+ logger.Debugw(ctx, "incoming-packet-out", log.Fields{
"device-id": dh.device.Id,
"egress-port-no": egressPortNo,
"pkt-length": len(packet.Data),
@@ -1779,7 +1782,7 @@
// ONOS has no clue about uni/nni ports, it just packets out on all
// available ports on the Logical Switch. It should not be interested
// in the UNI links.
- logger.Debugw("dropping-lldp-packet-out-on-uni", log.Fields{
+ logger.Debugw(ctx, "dropping-lldp-packet-out-on-uni", log.Fields{
"device-id": dh.device.Id,
})
return nil
@@ -1790,7 +1793,7 @@
// slice out the outer tag.
packet.Data = append(packet.Data[:12], packet.Data[16:]...)
if logger.V(log.DebugLevel) {
- logger.Debugw("packet-now-single-tagged", log.Fields{
+ logger.Debugw(ctx, "packet-now-single-tagged", log.Fields{
"packet-data": hex.EncodeToString(packet.Data),
"device-id": dh.device.Id,
})
@@ -1806,7 +1809,7 @@
// In this case the openolt agent will receive the gemPortID as 0.
// The agent tries to retrieve the gemPortID in this case.
// This may not always succeed at the agent and packetOut may fail.
- logger.Errorw("failed-to-retrieve-gemport-id-for-packet-out", log.Fields{
+ logger.Errorw(ctx, "failed-to-retrieve-gemport-id-for-packet-out", log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID,
@@ -1818,7 +1821,7 @@
onuPkt := oop.OnuPacket{IntfId: intfID, OnuId: onuID, PortNo: uint32(egressPortNo), GemportId: gemPortID, Pkt: packet.Data}
if logger.V(log.DebugLevel) {
- logger.Debugw("sending-packet-to-onu", log.Fields{
+ logger.Debugw(ctx, "sending-packet-to-onu", log.Fields{
"egress-port-no": egressPortNo,
"intf-id": intfID,
"onu-id": onuID,
@@ -1843,7 +1846,7 @@
}, err)
}
} else if egressPortType == voltha.Port_ETHERNET_NNI {
- nniIntfID, err := IntfIDFromNniPortNum(uint32(egressPortNo))
+ nniIntfID, err := IntfIDFromNniPortNum(ctx, uint32(egressPortNo))
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{
"egress-nni-port": egressPortNo,
@@ -1853,7 +1856,7 @@
uplinkPkt := oop.UplinkPacket{IntfId: nniIntfID, Pkt: packet.Data}
if logger.V(log.DebugLevel) {
- logger.Debugw("sending-packet-to-nni", log.Fields{
+ logger.Debugw(ctx, "sending-packet-to-nni", log.Fields{
"uplink-pkt": uplinkPkt,
"packet": hex.EncodeToString(packet.Data),
"device-id": dh.device.Id,
@@ -1867,7 +1870,7 @@
}, err)
}
} else {
- logger.Warnw("packet-out-to-this-interface-type-not-implemented", log.Fields{
+ logger.Warnw(ctx, "packet-out-to-this-interface-type-not-implemented", log.Fields{
"egress-port-no": egressPortNo,
"egressPortType": egressPortType,
"packet": hex.EncodeToString(packet.Data),
@@ -1891,7 +1894,7 @@
case <-heartbeatTimer.C:
ctxWithTimeout, cancel := context.WithTimeout(context.Background(), dh.openOLT.GrpcTimeoutInterval)
if heartBeat, err := dh.Client.HeartbeatCheck(ctxWithTimeout, new(oop.Empty)); err != nil {
- logger.Warnw("hearbeat-failed", log.Fields{"device-id": dh.device.Id})
+ logger.Warnw(ctx, "hearbeat-failed", log.Fields{"device-id": dh.device.Id})
if timerCheck == nil {
// start a after func, when expired will update the state to the core
timerCheck = time.AfterFunc(dh.openOLT.HeartbeatFailReportInterval, func() { dh.updateStateUnreachable(ctx) })
@@ -1899,17 +1902,17 @@
} else {
if timerCheck != nil {
if timerCheck.Stop() {
- logger.Debugw("got-hearbeat-within-timeout", log.Fields{"device-id": dh.device.Id})
+ logger.Debugw(ctx, "got-hearbeat-within-timeout", log.Fields{"device-id": dh.device.Id})
}
timerCheck = nil
}
- logger.Debugw("hearbeat",
+ logger.Debugw(ctx, "hearbeat",
log.Fields{"signature": heartBeat,
"device-id": dh.device.Id})
}
cancel()
case <-dh.stopHeartbeatCheck:
- logger.Debugw("stopping-heart-beat-check", log.Fields{"device-id": dh.device.Id})
+ logger.Debugw(ctx, "stopping-heart-beat-check", log.Fields{"device-id": dh.device.Id})
return
}
}
@@ -1946,24 +1949,23 @@
}
// EnablePort to enable Pon interface
-func (dh *DeviceHandler) EnablePort(port *voltha.Port) error {
- logger.Debugw("enable-port", log.Fields{"Device": dh.device, "port": port})
- return dh.modifyPhyPort(port, true)
+func (dh *DeviceHandler) EnablePort(ctx context.Context, port *voltha.Port) error {
+ logger.Debugw(ctx, "enable-port", log.Fields{"Device": dh.device, "port": port})
+ return dh.modifyPhyPort(ctx, port, true)
}
// DisablePort to disable pon interface
-func (dh *DeviceHandler) DisablePort(port *voltha.Port) error {
- logger.Debugw("disable-port", log.Fields{"Device": dh.device, "port": port})
- return dh.modifyPhyPort(port, false)
+func (dh *DeviceHandler) DisablePort(ctx context.Context, port *voltha.Port) error {
+ logger.Debugw(ctx, "disable-port", log.Fields{"Device": dh.device, "port": port})
+ return dh.modifyPhyPort(ctx, port, false)
}
//modifyPhyPort is common function to enable and disable the port. parm :enablePort, true to enablePort and false to disablePort.
-func (dh *DeviceHandler) modifyPhyPort(port *voltha.Port, enablePort bool) error {
- ctx := context.Background()
- logger.Infow("modifyPhyPort", log.Fields{"port": port, "Enable": enablePort, "device-id": dh.device.Id})
+func (dh *DeviceHandler) modifyPhyPort(ctx context.Context, port *voltha.Port, enablePort bool) error {
+ logger.Infow(ctx, "modifyPhyPort", log.Fields{"port": port, "Enable": enablePort, "device-id": dh.device.Id})
if port.GetType() == voltha.Port_ETHERNET_NNI {
// Bug is opened for VOL-2505 to support NNI disable feature.
- logger.Infow("voltha-supports-single-nni-hence-disable-of-nni-not-allowed",
+ logger.Infow(ctx, "voltha-supports-single-nni-hence-disable-of-nni-not-allowed",
log.Fields{"device": dh.device, "port": port})
return olterrors.NewErrAdapter("illegal-port-request", log.Fields{
"port-type": port.GetType,
@@ -1984,7 +1986,7 @@
}
// updating interface local cache for collecting stats
dh.activePorts.Store(ponID, true)
- logger.Infow("enabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
+ logger.Infow(ctx, "enabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
} else {
operStatus = voltha.OperStatus_UNKNOWN
out, err := dh.Client.DisablePonIf(ctx, ponIntf)
@@ -1995,7 +1997,7 @@
}
// updating interface local cache for collecting stats
dh.activePorts.Store(ponID, false)
- logger.Infow("disabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
+ logger.Infow(ctx, "disabled-pon-port", log.Fields{"out": out, "device-id": dh.device, "Port": port})
}
if err := dh.coreProxy.PortStateUpdate(ctx, dh.device.Id, voltha.Port_PON_OLT, port.PortNo, operStatus); err != nil {
return olterrors.NewErrAdapter("port-state-update-failed", log.Fields{
@@ -2006,13 +2008,13 @@
}
//disableAdminDownPorts disables the ports, if the corresponding port Adminstate is disabled on reboot and Renable device.
-func (dh *DeviceHandler) disableAdminDownPorts(device *voltha.Device) error {
+func (dh *DeviceHandler) disableAdminDownPorts(ctx context.Context, device *voltha.Device) error {
cloned := proto.Clone(device).(*voltha.Device)
// Disable the port and update the oper_port_status to core
// if the Admin state of the port is disabled on reboot and re-enable device.
for _, port := range cloned.Ports {
if port.AdminState == common.AdminState_DISABLED {
- if err := dh.DisablePort(port); err != nil {
+ if err := dh.DisablePort(ctx, port); err != nil {
return olterrors.NewErrAdapter("port-disable-failed", log.Fields{
"device-id": dh.device.Id,
"port": port}, err)
@@ -2023,8 +2025,8 @@
}
//populateActivePorts to populate activePorts map
-func (dh *DeviceHandler) populateActivePorts(device *voltha.Device) {
- logger.Infow("populateActiveports", log.Fields{"Device": device})
+func (dh *DeviceHandler) populateActivePorts(ctx context.Context, device *voltha.Device) {
+ logger.Infow(ctx, "populateActiveports", log.Fields{"Device": device})
for _, port := range device.Ports {
if port.Type == voltha.Port_ETHERNET_NNI {
if port.OperStatus == voltha.OperStatus_ACTIVE {
@@ -2045,7 +2047,7 @@
// ChildDeviceLost deletes ONU and clears pon resources related to it.
func (dh *DeviceHandler) ChildDeviceLost(ctx context.Context, pPortNo uint32, onuID uint32) error {
- logger.Debugw("child-device-lost", log.Fields{"pdeviceID": dh.device.Id})
+ logger.Debugw(ctx, "child-device-lost", log.Fields{"pdeviceID": dh.device.Id})
intfID := PortNoToIntfID(pPortNo, voltha.Port_PON_OLT)
onuKey := dh.formOnuKey(intfID, onuID)
onuDevice, ok := dh.onus.Load(onuKey)
@@ -2092,21 +2094,21 @@
//clear PON resources associated with ONU
var onuGemData []rsrcMgr.OnuGemInfo
if onuMgr, ok := dh.resourceMgr.ResourceMgrs[intfID]; !ok {
- logger.Warnw("failed-to-get-resource-manager-for-interface-Id", log.Fields{
+ logger.Warnw(ctx, "failed-to-get-resource-manager-for-interface-Id", log.Fields{
"device-id": dh.device.Id,
"intf-id": intfID})
} else {
if err := onuMgr.GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
- logger.Warnw("failed-to-get-onu-info-for-pon-port", log.Fields{
+ logger.Warnw(ctx, "failed-to-get-onu-info-for-pon-port", log.Fields{
"device-id": dh.device.Id,
"intf-id": intfID,
"error": err})
} else {
for i, onu := range onuGemData {
if onu.OnuID == onuID && onu.SerialNumber == onuDevice.(*OnuDevice).serialNumber {
- logger.Debugw("onu-data", log.Fields{"onu": onu})
+ logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
if err := dh.clearUNIData(ctx, &onu); err != nil {
- logger.Warnw("failed-to-clear-uni-data-for-onu", log.Fields{
+ logger.Warnw(ctx, "failed-to-clear-uni-data-for-onu", log.Fields{
"device-id": dh.device.Id,
"onu-device": onu,
"error": err})
@@ -2118,14 +2120,14 @@
onuGemData = append(onuGemData[:i], onuGemData[i+1:]...)
err := onuMgr.AddOnuGemInfo(ctx, intfID, onuGemData)
if err != nil {
- logger.Warnw("persistence-update-onu-gem-info-failed", log.Fields{
+ logger.Warnw(ctx, "persistence-update-onu-gem-info-failed", log.Fields{
"intf-id": intfID,
"onu-device": onu,
"onu-gem": onuGemData,
"error": err})
//Not returning error on cleanup.
}
- logger.Debugw("removed-onu-gem-info", log.Fields{"intf": intfID, "onu-device": onu, "onugem": onuGemData})
+ logger.Debugw(ctx, "removed-onu-gem-info", log.Fields{"intf": intfID, "onu-device": onu, "onugem": onuGemData})
dh.resourceMgr.FreeonuID(ctx, intfID, []uint32{onu.OnuID})
break
}
@@ -2157,13 +2159,13 @@
return InvalidPort
}
-func (dh *DeviceHandler) incrementActiveFlowRemoveCount(flow *of.OfpFlowStats) {
+func (dh *DeviceHandler) incrementActiveFlowRemoveCount(ctx context.Context, flow *of.OfpFlowStats) {
inPort, outPort := getPorts(flow)
- logger.Debugw("increment-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
+ logger.Debugw(ctx, "increment-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
if inPort != InvalidPort && outPort != InvalidPort {
_, intfID, onuID, uniID := ExtractAccessFromFlow(inPort, outPort)
key := pendingFlowRemoveDataKey{intfID: intfID, onuID: onuID, uniID: uniID}
- logger.Debugw("increment-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+ logger.Debugw(ctx, "increment-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
dh.lockDevice.Lock()
defer dh.lockDevice.Unlock()
@@ -2177,29 +2179,29 @@
flowRemoveData.pendingFlowRemoveCount++
dh.pendingFlowRemoveDataPerSubscriber[key] = flowRemoveData
- logger.Debugw("current-flow-remove-count–increment",
+ logger.Debugw(ctx, "current-flow-remove-count–increment",
log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID,
"currCnt": dh.pendingFlowRemoveDataPerSubscriber[key].pendingFlowRemoveCount})
}
}
-func (dh *DeviceHandler) decrementActiveFlowRemoveCount(flow *of.OfpFlowStats) {
+func (dh *DeviceHandler) decrementActiveFlowRemoveCount(ctx context.Context, flow *of.OfpFlowStats) {
inPort, outPort := getPorts(flow)
- logger.Debugw("decrement-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
+ logger.Debugw(ctx, "decrement-flow-remove-count-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
if inPort != InvalidPort && outPort != InvalidPort {
_, intfID, onuID, uniID := ExtractAccessFromFlow(uint32(inPort), uint32(outPort))
key := pendingFlowRemoveDataKey{intfID: intfID, onuID: onuID, uniID: uniID}
- logger.Debugw("decrement-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+ logger.Debugw(ctx, "decrement-flow-remove-count-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
dh.lockDevice.Lock()
defer dh.lockDevice.Unlock()
if val, ok := dh.pendingFlowRemoveDataPerSubscriber[key]; !ok {
- logger.Fatalf("flow-remove-key-not-found", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+ logger.Fatalf(ctx, "flow-remove-key-not-found", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
} else {
if val.pendingFlowRemoveCount > 0 {
val.pendingFlowRemoveCount--
}
- logger.Debugw("current-flow-remove-count-after-decrement",
+ logger.Debugw(ctx, "current-flow-remove-count-after-decrement",
log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID,
"currCnt": dh.pendingFlowRemoveDataPerSubscriber[key].pendingFlowRemoveCount})
// If all flow removes have finished, then close the channel to signal the receiver
@@ -2214,19 +2216,19 @@
}
}
-func (dh *DeviceHandler) waitForFlowRemoveToFinish(flow *of.OfpFlowStats) {
+func (dh *DeviceHandler) waitForFlowRemoveToFinish(ctx context.Context, flow *of.OfpFlowStats) {
var flowRemoveData pendingFlowRemoveData
var ok bool
inPort, outPort := getPorts(flow)
- logger.Debugw("wait-for-flow-remove-to-finish-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
+ logger.Debugw(ctx, "wait-for-flow-remove-to-finish-for-inPort-out-port", log.Fields{"inPort": inPort, "out-port": outPort})
if inPort != InvalidPort && outPort != InvalidPort {
_, intfID, onuID, uniID := ExtractAccessFromFlow(inPort, outPort)
key := pendingFlowRemoveDataKey{intfID: intfID, onuID: onuID, uniID: uniID}
- logger.Debugw("wait-for-flow-remove-to-finish-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+ logger.Debugw(ctx, "wait-for-flow-remove-to-finish-for-subscriber", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
dh.lockDevice.RLock()
if flowRemoveData, ok = dh.pendingFlowRemoveDataPerSubscriber[key]; !ok {
- logger.Debugw("no-pending-flow-to-remove", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+ logger.Debugw(ctx, "no-pending-flow-to-remove", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
dh.lockDevice.RUnlock()
return
}
@@ -2235,7 +2237,7 @@
// Wait for all flow removes to finish first
<-flowRemoveData.allFlowsRemoved
- logger.Debugw("all-flows-cleared--handling-flow-add-now", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
+ logger.Debugw(ctx, "all-flows-cleared--handling-flow-add-now", log.Fields{"intf-id": intfID, "onu-id": onuID, "uni-id": uniID})
}
}
@@ -2287,11 +2289,11 @@
}
// setOnuITUPonAlarmConfig sets the parameters in the openolt agent for raising the ONU ITU PON alarms.
-func (dh *DeviceHandler) setOnuITUPonAlarmConfig(config *oop.OnuItuPonAlarm) error {
+func (dh *DeviceHandler) setOnuITUPonAlarmConfig(ctx context.Context, config *oop.OnuItuPonAlarm) error {
if _, err := dh.Client.OnuItuPonAlarmSet(context.Background(), config); err != nil {
return err
}
- logger.Debugw("onu-itu-pon-alarm-config-set-successful", log.Fields{"config": config})
+ logger.Debugw(ctx, "onu-itu-pon-alarm-config-set-successful", log.Fields{"config": config})
return nil
}
diff --git a/internal/pkg/core/device_handler_test.go b/internal/pkg/core/device_handler_test.go
index 531c27b..38affa0 100644
--- a/internal/pkg/core/device_handler_test.go
+++ b/internal/pkg/core/device_handler_test.go
@@ -222,6 +222,7 @@
return dh
}
func Test_generateMacFromHost(t *testing.T) {
+ ctx := context.Background()
type args struct {
host string
}
@@ -238,7 +239,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := generateMacFromHost(tt.args.host)
+ got, err := generateMacFromHost(ctx, tt.args.host)
if (err != nil) != tt.wantErr {
t.Errorf("generateMacFromHost() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -313,6 +314,7 @@
}
func TestDeviceHandler_GetChildDevice(t *testing.T) {
+ ctx := context.Background()
dh1 := newMockDeviceHandler()
dh2 := negativeDeviceHandler()
type args struct {
@@ -351,7 +353,7 @@
*/
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := tt.devicehandler.GetChildDevice(tt.args.parentPort, tt.args.onuID)
+ got, err := tt.devicehandler.GetChildDevice(ctx, tt.args.parentPort, tt.args.onuID)
if reflect.TypeOf(err) != tt.errType || !sparseCompare([]string{"Id", "ParentId", "ParentPortNo"}, tt.want, got) {
t.Errorf("GetportLabel() => want=(%v, %v) got=(%v, %v)",
tt.want, tt.errType, got, reflect.TypeOf(err))
@@ -395,6 +397,7 @@
}
func TestDeviceHandler_ProcessInterAdapterMessage(t *testing.T) {
+ ctx := context.Background()
dh := newMockDeviceHandler()
proxyAddr := dh.device.ProxyAddress
body := &ic.InterAdapterOmciMessage{
@@ -410,18 +413,18 @@
var err error
if marshalledData, err = ptypes.MarshalAny(body); err != nil {
- logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
}
var marshalledData1 *any.Any
if marshalledData1, err = ptypes.MarshalAny(body2); err != nil {
- logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
}
var marshalledData2 *any.Any
if marshalledData2, err = ptypes.MarshalAny(body3); err != nil {
- logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
}
type args struct {
msg *ic.InterAdapterMessage
@@ -505,7 +508,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if err := dh.ProcessInterAdapterMessage(tt.args.msg); reflect.TypeOf(err) != tt.wantErr {
+ if err := dh.ProcessInterAdapterMessage(ctx, tt.args.msg); reflect.TypeOf(err) != tt.wantErr {
t.Errorf("DeviceHandler.ProcessInterAdapterMessage() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -513,6 +516,7 @@
}
func TestDeviceHandler_sendProxiedMessage(t *testing.T) {
+ ctx := context.Background()
dh1 := newMockDeviceHandler()
dh2 := negativeDeviceHandler()
device1 := &voltha.Device{
@@ -566,7 +570,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- tt.devicehandler.sendProxiedMessage(tt.args.onuDevice, tt.args.omciMsg)
+ tt.devicehandler.sendProxiedMessage(ctx, tt.args.onuDevice, tt.args.omciMsg)
})
}
}
@@ -590,7 +594,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- tt.devicehandler.SendPacketInToCore(tt.args.logicalPort, tt.args.packetPayload)
+ tt.devicehandler.SendPacketInToCore(context.Background(), tt.args.logicalPort, tt.args.packetPayload)
})
}
}
@@ -613,7 +617,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if err := tt.devicehandler.DisableDevice(tt.args.device); (err != nil) != tt.wantErr {
+ if err := tt.devicehandler.DisableDevice(context.Background(), tt.args.device); (err != nil) != tt.wantErr {
t.Errorf("DeviceHandler.DisableDevice() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -639,7 +643,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dh := tt.devicehandler
- if err := dh.ReenableDevice(tt.args.device); (err != nil) != tt.wantErr {
+ if err := dh.ReenableDevice(context.Background(), tt.args.device); (err != nil) != tt.wantErr {
t.Errorf("DeviceHandler.ReenableDevice() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -666,7 +670,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if err := tt.devicehandler.RebootDevice(tt.args.device); (err != nil) != tt.wantErr {
+ if err := tt.devicehandler.RebootDevice(context.Background(), tt.args.device); (err != nil) != tt.wantErr {
t.Errorf("DeviceHandler.RebootDevice() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -783,7 +787,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- tt.devicehandler.addPort(tt.args.intfID, tt.args.portType, tt.args.state)
+ tt.devicehandler.addPort(context.Background(), tt.args.intfID, tt.args.portType, tt.args.state)
})
}
}
@@ -1082,7 +1086,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- _, err := tt.devicehandler.populateDeviceInfo()
+ _, err := tt.devicehandler.populateDeviceInfo(context.Background())
if (err != nil) != tt.wantErr {
t.Errorf("DeviceHandler.populateDeviceInfo() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -1156,7 +1160,7 @@
time.Sleep(1 * time.Second) // simulated wait time to stop startCollector
tt.args.dh.stopCollector <- true
}()
- startCollector(tt.args.dh)
+ startCollector(context.Background(), tt.args.dh)
})
}
}
diff --git a/internal/pkg/core/olt_platform.go b/internal/pkg/core/olt_platform.go
index b5e8f36..4b36102 100644
--- a/internal/pkg/core/olt_platform.go
+++ b/internal/pkg/core/olt_platform.go
@@ -18,6 +18,7 @@
package core
import (
+ "context"
"github.com/opencord/voltha-lib-go/v3/pkg/flows"
"github.com/opencord/voltha-lib-go/v3/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
@@ -123,10 +124,10 @@
var controllerPorts = []uint32{0xfffd, 0x7ffffffd, 0xfffffffd}
//MkUniPortNum returns new UNIportNum based on intfID, inuID and uniID
-func MkUniPortNum(intfID, onuID, uniID uint32) uint32 {
+func MkUniPortNum(ctx context.Context, intfID, onuID, uniID uint32) uint32 {
var limit = int(onuID)
if limit > MaxOnusPerPon {
- logger.Warn("exceeded-the-max-onus-per-pon")
+ logger.Warn(ctx, "exceeded-the-max-onus-per-pon")
}
return (intfID << (bitsForUniID + bitsForONUID)) | (onuID << bitsForUniID) | uniID
}
@@ -169,9 +170,9 @@
}
//IntfIDFromNniPortNum returns Intf ID derived from portNum
-func IntfIDFromNniPortNum(portNum uint32) (uint32, error) {
+func IntfIDFromNniPortNum(ctx context.Context, portNum uint32) (uint32, error) {
if portNum < minNniIntPortNum || portNum > maxNniPortNum {
- logger.Errorw("nniportnumber-is-not-in-valid-range", log.Fields{"portnum": portNum})
+ logger.Errorw(ctx, "nniportnumber-is-not-in-valid-range", log.Fields{"portnum": portNum})
return uint32(0), olterrors.ErrInvalidPortRange
}
return (portNum & 0xFFFF), nil
@@ -222,7 +223,7 @@
}
//FlowExtractInfo fetches uniport from the flow, based on which it gets and returns ponInf, onuID, uniID, inPort and ethType
-func FlowExtractInfo(flow *ofp.OfpFlowStats, flowDirection string) (uint32, uint32, uint32, uint32, uint32, uint32, error) {
+func FlowExtractInfo(ctx context.Context, flow *ofp.OfpFlowStats, flowDirection string) (uint32, uint32, uint32, uint32, uint32, uint32, error) {
var uniPortNo uint32
var ponIntf uint32
var onuID uint32
@@ -268,7 +269,7 @@
onuID = OnuIDFromUniPortNum(uniPortNo)
uniID = UniIDFromPortNum(uniPortNo)
- logger.Debugw("flow-extract-info-result",
+ logger.Debugw(ctx, "flow-extract-info-result",
log.Fields{
"uniportno": uniPortNo,
"pon-intf": ponIntf,
diff --git a/internal/pkg/core/olt_platform_test.go b/internal/pkg/core/olt_platform_test.go
index 211459f..71414c1 100644
--- a/internal/pkg/core/olt_platform_test.go
+++ b/internal/pkg/core/olt_platform_test.go
@@ -18,14 +18,14 @@
package core
import (
- "math"
- "reflect"
- "testing"
-
+ "context"
fu "github.com/opencord/voltha-lib-go/v3/pkg/flows"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
"github.com/opencord/voltha-protos/v3/go/voltha"
+ "math"
+ "reflect"
+ "testing"
)
func TestMkUniPortNum(t *testing.T) {
@@ -47,7 +47,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := MkUniPortNum(tt.args.intfID, tt.args.onuID, tt.args.uniID); got != tt.want {
+ if got := MkUniPortNum(context.Background(), tt.args.intfID, tt.args.onuID, tt.args.uniID); got != tt.want {
t.Errorf("MkUniPortNum() = %v, want %v", got, tt.want)
} else {
t.Logf("Expected %v , Actual %v \n", tt.want, got)
@@ -187,7 +187,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := IntfIDFromNniPortNum(tt.args.portNum)
+ got, err := IntfIDFromNniPortNum(context.Background(), tt.args.portNum)
if got != tt.want || err != tt.wantErr {
t.Errorf("IntfIDFromNniPortNum(): FOR[%v] WANT[%v and %v] GOT[%v and %v]",
tt.args.portNum, tt.want, tt.wantErr, got, err)
@@ -350,7 +350,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, got1, got2, got3, got4, got5, err := FlowExtractInfo(tt.args.flow, tt.args.flowDirection)
+ got, got1, got2, got3, got4, got5, err := FlowExtractInfo(context.Background(), tt.args.flow, tt.args.flowDirection)
if (err != nil) != tt.wantErr {
t.Errorf("FlowExtractInfo() error = %v, wantErr %v", err, tt.wantErr)
return
diff --git a/internal/pkg/core/olt_state_transitions.go b/internal/pkg/core/olt_state_transitions.go
index cd6df1d..58e2251 100644
--- a/internal/pkg/core/olt_state_transitions.go
+++ b/internal/pkg/core/olt_state_transitions.go
@@ -149,7 +149,7 @@
// Check whether the transtion is valid from current state
if !tMap.isValidTransition(trigger) {
- logger.Errorw("invalid-transition-triggered",
+ logger.Errorw(ctx, "invalid-transition-triggered",
log.Fields{
"current-state": tMap.currentDeviceState,
"trigger": trigger})
@@ -159,31 +159,31 @@
// Invoke the before handlers
beforeHandlers := tMap.transitions[trigger].before
if beforeHandlers == nil {
- logger.Debugw("no-handlers-for-before", log.Fields{"trigger": trigger})
+ logger.Debugw(ctx, "no-handlers-for-before", log.Fields{"trigger": trigger})
}
for _, handler := range beforeHandlers {
- logger.Debugw("running-before-handler", log.Fields{"handler": funcName(handler)})
+ logger.Debugw(ctx, "running-before-handler", log.Fields{"handler": funcName(handler)})
if err := handler(ctx); err != nil {
// TODO handle error
- logger.Error(err)
+ logger.Error(ctx, err)
return
}
}
// Update the state
tMap.currentDeviceState = tMap.transitions[trigger].currentState
- logger.Debugw("updated-device-state ", log.Fields{"current-device-state": tMap.currentDeviceState})
+ logger.Debugw(ctx, "updated-device-state ", log.Fields{"current-device-state": tMap.currentDeviceState})
// Invoke the after handlers
afterHandlers := tMap.transitions[trigger].after
if afterHandlers == nil {
- logger.Debugw("no-handlers-for-after", log.Fields{"trigger": trigger})
+ logger.Debugw(ctx, "no-handlers-for-after", log.Fields{"trigger": trigger})
}
for _, handler := range afterHandlers {
- logger.Debugw("running-after-handler", log.Fields{"handler": funcName(handler)})
+ logger.Debugw(ctx, "running-after-handler", log.Fields{"handler": funcName(handler)})
if err := handler(ctx); err != nil {
// TODO handle error
- logger.Error(err)
+ logger.Error(ctx, err)
return
}
}
diff --git a/internal/pkg/core/openolt.go b/internal/pkg/core/openolt.go
index 4a25dba..fe33876 100644
--- a/internal/pkg/core/openolt.go
+++ b/internal/pkg/core/openolt.go
@@ -74,16 +74,16 @@
//Start starts (logs) the device manager
func (oo *OpenOLT) Start(ctx context.Context) error {
- logger.Info("starting-device-manager")
- logger.Info("device-manager-started")
+ logger.Info(ctx, "starting-device-manager")
+ logger.Info(ctx, "device-manager-started")
return nil
}
//Stop terminates the session
func (oo *OpenOLT) Stop(ctx context.Context) error {
- logger.Info("stopping-device-manager")
+ logger.Info(ctx, "stopping-device-manager")
oo.exitChannel <- 1
- logger.Info("device-manager-stopped")
+ logger.Info(ctx, "device-manager-stopped")
return nil
}
@@ -92,10 +92,10 @@
// Returned response only of the ctx has not been canceled/timeout/etc
// Channel is automatically closed when a context is Done
ch <- result
- logger.Debugw("sendResponse", log.Fields{"result": result})
+ logger.Debugw(ctx, "sendResponse", log.Fields{"result": result})
} else {
// Should the transaction be reverted back?
- logger.Debugw("sendResponse-context-error", log.Fields{"context-error": ctx.Err()})
+ logger.Debugw(ctx, "sendResponse-context-error", log.Fields{"context-error": ctx.Err()})
}
}
@@ -123,24 +123,23 @@
}
//createDeviceTopic returns
-func (oo *OpenOLT) createDeviceTopic(device *voltha.Device) error {
- logger.Infow("create-device-topic", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) createDeviceTopic(ctx context.Context, device *voltha.Device) error {
+ logger.Infow(ctx, "create-device-topic", log.Fields{"deviceId": device.Id})
defaultTopic := oo.kafkaICProxy.GetDefaultTopic()
deviceTopic := kafka.Topic{Name: defaultTopic.Name + "_" + device.Id}
// TODO for the offset
- if err := oo.kafkaICProxy.SubscribeWithDefaultRequestHandler(deviceTopic, 0); err != nil {
+ if err := oo.kafkaICProxy.SubscribeWithDefaultRequestHandler(ctx, deviceTopic, 0); err != nil {
return olterrors.NewErrAdapter("subscribe-for-device-topic-failed", log.Fields{"device-topic": deviceTopic}, err)
}
return nil
}
// Adopt_device creates a new device handler if not present already and then adopts the device
-func (oo *OpenOLT) Adopt_device(device *voltha.Device) error {
- ctx := context.Background()
+func (oo *OpenOLT) Adopt_device(ctx context.Context, device *voltha.Device) error {
if device == nil {
return olterrors.NewErrInvalidValue(log.Fields{"device": nil}, nil).Log()
}
- logger.Infow("adopt-device", log.Fields{"deviceId": device.Id})
+ logger.Infow(ctx, "adopt-device", log.Fields{"deviceId": device.Id})
var handler *DeviceHandler
if handler = oo.getDeviceHandler(device.Id); handler == nil {
handler := NewDeviceHandler(oo.coreProxy, oo.adapterProxy, oo.eventProxy, device, oo)
@@ -153,8 +152,8 @@
}
//Get_ofp_device_info returns OFP information for the given device
-func (oo *OpenOLT) Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error) {
- logger.Infow("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error) {
+ logger.Infow(ctx, "Get_ofp_device_info", log.Fields{"deviceId": device.Id})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
return handler.GetOfpDeviceInfo(device)
}
@@ -162,41 +161,40 @@
}
//Process_inter_adapter_message sends messages to a target device (between adapters)
-func (oo *OpenOLT) Process_inter_adapter_message(msg *ic.InterAdapterMessage) error {
- logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": msg.Header.Id})
+func (oo *OpenOLT) Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error {
+ logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": msg.Header.Id})
targetDevice := msg.Header.ProxyDeviceId // Request?
if targetDevice == "" && msg.Header.ToDeviceId != "" {
// Typical response
targetDevice = msg.Header.ToDeviceId
}
if handler := oo.getDeviceHandler(targetDevice); handler != nil {
- return handler.ProcessInterAdapterMessage(msg)
+ return handler.ProcessInterAdapterMessage(ctx, msg)
}
return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": targetDevice}, nil)
}
//Adapter_descriptor not implemented
-func (oo *OpenOLT) Adapter_descriptor() error {
+func (oo *OpenOLT) Adapter_descriptor(ctx context.Context) error {
return olterrors.ErrNotImplemented
}
//Device_types unimplemented
-func (oo *OpenOLT) Device_types() (*voltha.DeviceTypes, error) {
+func (oo *OpenOLT) Device_types(ctx context.Context) (*voltha.DeviceTypes, error) {
return nil, olterrors.ErrNotImplemented
}
//Health returns unimplemented
-func (oo *OpenOLT) Health() (*voltha.HealthStatus, error) {
+func (oo *OpenOLT) Health(ctx context.Context) (*voltha.HealthStatus, error) {
return nil, olterrors.ErrNotImplemented
}
//Reconcile_device unimplemented
-func (oo *OpenOLT) Reconcile_device(device *voltha.Device) error {
- ctx := context.Background()
+func (oo *OpenOLT) Reconcile_device(ctx context.Context, device *voltha.Device) error {
if device == nil {
return olterrors.NewErrInvalidValue(log.Fields{"device": nil}, nil)
}
- logger.Infow("reconcile-device", log.Fields{"deviceId": device.Id})
+ logger.Infow(ctx, "reconcile-device", log.Fields{"deviceId": device.Id})
var handler *DeviceHandler
if handler = oo.getDeviceHandler(device.Id); handler == nil {
handler := NewDeviceHandler(oo.coreProxy, oo.adapterProxy, oo.eventProxy, device, oo)
@@ -208,49 +206,48 @@
}
//Abandon_device unimplemented
-func (oo *OpenOLT) Abandon_device(device *voltha.Device) error {
+func (oo *OpenOLT) Abandon_device(ctx context.Context, device *voltha.Device) error {
return olterrors.ErrNotImplemented
}
//Disable_device disables the given device
-func (oo *OpenOLT) Disable_device(device *voltha.Device) error {
- logger.Infow("disable-device", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Disable_device(ctx context.Context, device *voltha.Device) error {
+ logger.Infow(ctx, "disable-device", log.Fields{"deviceId": device.Id})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
- return handler.DisableDevice(device)
+ return handler.DisableDevice(ctx, device)
}
return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
}
//Reenable_device enables the olt device after disable
-func (oo *OpenOLT) Reenable_device(device *voltha.Device) error {
- logger.Infow("reenable-device", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Reenable_device(ctx context.Context, device *voltha.Device) error {
+ logger.Infow(ctx, "reenable-device", log.Fields{"deviceId": device.Id})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
- return handler.ReenableDevice(device)
+ return handler.ReenableDevice(ctx, device)
}
return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
}
//Reboot_device reboots the given device
-func (oo *OpenOLT) Reboot_device(device *voltha.Device) error {
- logger.Infow("reboot-device", log.Fields{"deviceId": device.Id})
+func (oo *OpenOLT) Reboot_device(ctx context.Context, device *voltha.Device) error {
+ logger.Infow(ctx, "reboot-device", log.Fields{"deviceId": device.Id})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
- return handler.RebootDevice(device)
+ return handler.RebootDevice(ctx, device)
}
return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
}
//Self_test_device unimplented
-func (oo *OpenOLT) Self_test_device(device *voltha.Device) error {
+func (oo *OpenOLT) Self_test_device(ctx context.Context, device *voltha.Device) error {
return olterrors.ErrNotImplemented
}
//Delete_device unimplemented
-func (oo *OpenOLT) Delete_device(device *voltha.Device) error {
- logger.Infow("delete-device", log.Fields{"deviceId": device.Id})
- ctx := context.Background()
+func (oo *OpenOLT) Delete_device(ctx context.Context, device *voltha.Device) error {
+ logger.Infow(ctx, "delete-device", log.Fields{"deviceId": device.Id})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
if err := handler.DeleteDevice(ctx, device); err != nil {
- logger.Errorw("failed-to-handle-delete-device", log.Fields{"device-id": device.Id})
+ logger.Errorw(ctx, "failed-to-handle-delete-device", log.Fields{"device-id": device.Id})
}
oo.deleteDeviceHandlerToMap(handler)
return nil
@@ -259,19 +256,18 @@
}
//Get_device_details unimplemented
-func (oo *OpenOLT) Get_device_details(device *voltha.Device) error {
+func (oo *OpenOLT) Get_device_details(ctx context.Context, device *voltha.Device) error {
return olterrors.ErrNotImplemented
}
//Update_flows_bulk returns
-func (oo *OpenOLT) Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error {
+func (oo *OpenOLT) Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error {
return olterrors.ErrNotImplemented
}
//Update_flows_incrementally updates (add/remove) the flows on a given device
-func (oo *OpenOLT) Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
- logger.Debugw("Update_flows_incrementally", log.Fields{"deviceId": device.Id, "flows": flows, "flowMetadata": flowMetadata})
- ctx := context.Background()
+func (oo *OpenOLT) Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
+ logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"deviceId": device.Id, "flows": flows, "flowMetadata": flowMetadata})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
return handler.UpdateFlowsIncrementally(ctx, device, flows, groups, flowMetadata)
}
@@ -279,19 +275,18 @@
}
//Update_pm_config returns PmConfigs nil or error
-func (oo *OpenOLT) Update_pm_config(device *voltha.Device, pmConfigs *voltha.PmConfigs) error {
- logger.Debugw("Update_pm_config", log.Fields{"device-id": device.Id, "pm-configs": pmConfigs})
+func (oo *OpenOLT) Update_pm_config(ctx context.Context, device *voltha.Device, pmConfigs *voltha.PmConfigs) error {
+ logger.Debugw(ctx, "Update_pm_config", log.Fields{"device-id": device.Id, "pm-configs": pmConfigs})
if handler := oo.getDeviceHandler(device.Id); handler != nil {
- handler.UpdatePmConfig(pmConfigs)
+ handler.UpdatePmConfig(ctx, pmConfigs)
return nil
}
return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": device.Id}, nil)
}
//Receive_packet_out sends packet out to the device
-func (oo *OpenOLT) Receive_packet_out(deviceID string, egressPortNo int, packet *openflow_13.OfpPacketOut) error {
- logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceID, "egress_port_no": egressPortNo, "pkt": packet})
- ctx := context.Background()
+func (oo *OpenOLT) Receive_packet_out(ctx context.Context, deviceID string, egressPortNo int, packet *openflow_13.OfpPacketOut) error {
+ logger.Debugw(ctx, "Receive_packet_out", log.Fields{"deviceId": deviceID, "egress_port_no": egressPortNo, "pkt": packet})
if handler := oo.getDeviceHandler(deviceID); handler != nil {
return handler.PacketOut(ctx, egressPortNo, packet)
}
@@ -299,55 +294,55 @@
}
//Suppress_event unimplemented
-func (oo *OpenOLT) Suppress_event(filter *voltha.EventFilter) error {
+func (oo *OpenOLT) Suppress_event(ctx context.Context, filter *voltha.EventFilter) error {
return olterrors.ErrNotImplemented
}
//Unsuppress_event unimplemented
-func (oo *OpenOLT) Unsuppress_event(filter *voltha.EventFilter) error {
+func (oo *OpenOLT) Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error {
return olterrors.ErrNotImplemented
}
//Download_image unimplemented
-func (oo *OpenOLT) Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
return nil, olterrors.ErrNotImplemented
}
//Get_image_download_status unimplemented
-func (oo *OpenOLT) Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
return nil, olterrors.ErrNotImplemented
}
//Cancel_image_download unimplemented
-func (oo *OpenOLT) Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
return nil, olterrors.ErrNotImplemented
}
//Activate_image_update unimplemented
-func (oo *OpenOLT) Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
return nil, olterrors.ErrNotImplemented
}
//Revert_image_update unimplemented
-func (oo *OpenOLT) Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
+func (oo *OpenOLT) Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error) {
return nil, olterrors.ErrNotImplemented
}
// Enable_port to Enable PON/NNI interface
-func (oo *OpenOLT) Enable_port(deviceID string, port *voltha.Port) error {
- logger.Infow("Enable_port", log.Fields{"deviceId": deviceID, "port": port})
- return oo.enableDisablePort(deviceID, port, true)
+func (oo *OpenOLT) Enable_port(ctx context.Context, deviceID string, port *voltha.Port) error {
+ logger.Infow(ctx, "Enable_port", log.Fields{"deviceId": deviceID, "port": port})
+ return oo.enableDisablePort(ctx, deviceID, port, true)
}
// Disable_port to Disable pon/nni interface
-func (oo *OpenOLT) Disable_port(deviceID string, port *voltha.Port) error {
- logger.Infow("Disable_port", log.Fields{"deviceId": deviceID, "port": port})
- return oo.enableDisablePort(deviceID, port, false)
+func (oo *OpenOLT) Disable_port(ctx context.Context, deviceID string, port *voltha.Port) error {
+ logger.Infow(ctx, "Disable_port", log.Fields{"deviceId": deviceID, "port": port})
+ return oo.enableDisablePort(ctx, deviceID, port, false)
}
// enableDisablePort to Disable pon or Enable PON interface
-func (oo *OpenOLT) enableDisablePort(deviceID string, port *voltha.Port, enablePort bool) error {
- logger.Infow("enableDisablePort", log.Fields{"deviceId": deviceID, "port": port})
+func (oo *OpenOLT) enableDisablePort(ctx context.Context, deviceID string, port *voltha.Port, enablePort bool) error {
+ logger.Infow(ctx, "enableDisablePort", log.Fields{"deviceId": deviceID, "port": port})
if port == nil {
return olterrors.NewErrInvalidValue(log.Fields{
"reason": "port cannot be nil",
@@ -355,13 +350,13 @@
"port": nil}, nil)
}
if handler := oo.getDeviceHandler(deviceID); handler != nil {
- logger.Debugw("Enable_Disable_Port", log.Fields{"deviceId": deviceID, "port": port})
+ logger.Debugw(ctx, "Enable_Disable_Port", log.Fields{"deviceId": deviceID, "port": port})
if enablePort {
- if err := handler.EnablePort(port); err != nil {
+ if err := handler.EnablePort(ctx, port); err != nil {
return olterrors.NewErrAdapter("error-occurred-during-enable-port", log.Fields{"deviceID": deviceID, "port": port}, err)
}
} else {
- if err := handler.DisablePort(port); err != nil {
+ if err := handler.DisablePort(ctx, port); err != nil {
return olterrors.NewErrAdapter("error-occurred-during-disable-port", log.Fields{"deviceID": deviceID, "port": port}, err)
}
}
@@ -370,9 +365,8 @@
}
//Child_device_lost deletes the ONU and its references from PONResources
-func (oo *OpenOLT) Child_device_lost(deviceID string, pPortNo uint32, onuID uint32) error {
- logger.Infow("Child-device-lost", log.Fields{"parentId": deviceID})
- ctx := context.Background()
+func (oo *OpenOLT) Child_device_lost(ctx context.Context, deviceID string, pPortNo uint32, onuID uint32) error {
+ logger.Infow(ctx, "Child-device-lost", log.Fields{"parentId": deviceID})
if handler := oo.getDeviceHandler(deviceID); handler != nil {
return handler.ChildDeviceLost(ctx, pPortNo, onuID)
}
@@ -380,12 +374,12 @@
}
//Start_omci_test not implemented
-func (oo *OpenOLT) Start_omci_test(device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error) {
+func (oo *OpenOLT) Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error) {
return nil, olterrors.ErrNotImplemented
}
//Get_ext_value retrieves a value on a particular ONU
-func (oo *OpenOLT) Get_ext_value(deviceID string, device *voltha.Device, valueparam voltha.ValueType_Type) (*voltha.ReturnValues, error) {
+func (oo *OpenOLT) Get_ext_value(ctx context.Context, deviceID string, device *voltha.Device, valueparam voltha.ValueType_Type) (*voltha.ReturnValues, error) {
var err error
resp := new(voltha.ReturnValues)
log.Infow("Get_ext_value", log.Fields{"device-id": deviceID, "onu-id": device.Id})
diff --git a/internal/pkg/core/openolt_eventmgr.go b/internal/pkg/core/openolt_eventmgr.go
index a11d3f7..76d1ca7 100644
--- a/internal/pkg/core/openolt_eventmgr.go
+++ b/internal/pkg/core/openolt_eventmgr.go
@@ -18,7 +18,7 @@
package core
import (
- ctx "context"
+ "context"
"errors"
"fmt"
"strconv"
@@ -103,60 +103,60 @@
// ProcessEvents is function to process and publish OpenOLT event
// nolint: gocyclo
-func (em *OpenOltEventMgr) ProcessEvents(alarmInd *oop.AlarmIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) ProcessEvents(ctx context.Context, alarmInd *oop.AlarmIndication, deviceID string, raisedTs int64) error {
var err error
switch alarmInd.Data.(type) {
case *oop.AlarmIndication_LosInd:
- logger.Debugw("received-los-indication", log.Fields{"alarm-ind": alarmInd})
- err = em.oltLosIndication(alarmInd.GetLosInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-los-indication", log.Fields{"alarm-ind": alarmInd})
+ err = em.oltLosIndication(ctx, alarmInd.GetLosInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuAlarmInd:
- logger.Debugw("received-onu-alarm-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuAlarmIndication(alarmInd.GetOnuAlarmInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-alarm-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuAlarmIndication(ctx, alarmInd.GetOnuAlarmInd(), deviceID, raisedTs)
case *oop.AlarmIndication_DyingGaspInd:
- logger.Debugw("received-dying-gasp-indication", log.Fields{"alarm-ind": alarmInd})
- err = em.onuDyingGaspIndication(alarmInd.GetDyingGaspInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-dying-gasp-indication", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuDyingGaspIndication(ctx, alarmInd.GetDyingGaspInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuActivationFailInd:
- logger.Debugw("received-onu-activation-fail-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuActivationFailIndication(alarmInd.GetOnuActivationFailInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-activation-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuActivationFailIndication(ctx, alarmInd.GetOnuActivationFailInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuLossOmciInd:
- logger.Debugw("received-onu-loss-omci-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuLossOmciIndication(alarmInd.GetOnuLossOmciInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-loss-omci-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuLossOmciIndication(ctx, alarmInd.GetOnuLossOmciInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuDriftOfWindowInd:
- logger.Debugw("received-onu-drift-of-window-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuDriftOfWindowIndication(alarmInd.GetOnuDriftOfWindowInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-drift-of-window-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuDriftOfWindowIndication(ctx, alarmInd.GetOnuDriftOfWindowInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuSignalDegradeInd:
- logger.Debugw("received-onu-signal-degrade-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuSignalDegradeIndication(alarmInd.GetOnuSignalDegradeInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-signal-degrade-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuSignalDegradeIndication(ctx, alarmInd.GetOnuSignalDegradeInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuSignalsFailInd:
- logger.Debugw("received-onu-signal-fail-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuSignalsFailIndication(alarmInd.GetOnuSignalsFailInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-signal-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuSignalsFailIndication(ctx, alarmInd.GetOnuSignalsFailInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuStartupFailInd:
- logger.Debugw("received-onu-startup-fail-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuStartupFailedIndication(alarmInd.GetOnuStartupFailInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-startup-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuStartupFailedIndication(ctx, alarmInd.GetOnuStartupFailInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuTiwiInd:
- logger.Debugw("received-onu-transmission-warning-indication ", log.Fields{"alarm-ind": alarmInd})
- logger.Warnw("not-implemented-yet", log.Fields{"alarm-ind": "Onu-Transmission-indication"})
+ logger.Debugw(ctx, "received-onu-transmission-warning-indication ", log.Fields{"alarm-ind": alarmInd})
+ logger.Warnw(ctx, "not-implemented-yet", log.Fields{"alarm-ind": "Onu-Transmission-indication"})
case *oop.AlarmIndication_OnuLossOfSyncFailInd:
- logger.Debugw("received-onu-loss-of-sync-fail-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuLossOfSyncIndication(alarmInd.GetOnuLossOfSyncFailInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-loss-of-sync-fail-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuLossOfSyncIndication(ctx, alarmInd.GetOnuLossOfSyncFailInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuItuPonStatsInd:
- logger.Debugw("received-onu-itu-pon-stats-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuItuPonStatsIndication(alarmInd.GetOnuItuPonStatsInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-itu-pon-stats-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuItuPonStatsIndication(ctx, alarmInd.GetOnuItuPonStatsInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuDeactivationFailureInd:
- logger.Debugw("received-onu-deactivation-failure-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuDeactivationFailureIndication(alarmInd.GetOnuDeactivationFailureInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-deactivation-failure-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuDeactivationFailureIndication(ctx, alarmInd.GetOnuDeactivationFailureInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuLossGemDelineationInd:
- logger.Debugw("received-onu-loss-of-gem-channel-delineation-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuLossOfGEMChannelDelineationIndication(alarmInd.GetOnuLossGemDelineationInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-loss-of-gem-channel-delineation-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuLossOfGEMChannelDelineationIndication(ctx, alarmInd.GetOnuLossGemDelineationInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuPhysicalEquipmentErrorInd:
- logger.Debugw("received-onu-physical-equipment-error-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuPhysicalEquipmentErrorIndication(alarmInd.GetOnuPhysicalEquipmentErrorInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-physical-equipment-error-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuPhysicalEquipmentErrorIndication(ctx, alarmInd.GetOnuPhysicalEquipmentErrorInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuLossOfAckInd:
- logger.Debugw("received-onu-loss-of-acknowledgement-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuLossOfAcknowledgementIndication(alarmInd.GetOnuLossOfAckInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-loss-of-acknowledgement-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuLossOfAcknowledgementIndication(ctx, alarmInd.GetOnuLossOfAckInd(), deviceID, raisedTs)
case *oop.AlarmIndication_OnuDiffReachExceededInd:
- logger.Debugw("received-onu-differential-reach-exceeded-indication ", log.Fields{"alarm-ind": alarmInd})
- err = em.onuDifferentialReachExceededIndication(alarmInd.GetOnuDiffReachExceededInd(), deviceID, raisedTs)
+ logger.Debugw(ctx, "received-onu-differential-reach-exceeded-indication ", log.Fields{"alarm-ind": alarmInd})
+ err = em.onuDifferentialReachExceededIndication(ctx, alarmInd.GetOnuDiffReachExceededInd(), deviceID, raisedTs)
default:
err = olterrors.NewErrInvalidValue(log.Fields{"indication-type": alarmInd}, nil)
}
@@ -167,7 +167,7 @@
}
// oltUpDownIndication handles Up and Down state of an OLT
-func (em *OpenOltEventMgr) oltUpDownIndication(oltIndication *oop.OltIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) oltUpDownIndication(ctx context.Context, oltIndication *oop.OltIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -181,15 +181,15 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", oltIndicationDown, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, olt, raisedTs); err != nil {
return olterrors.NewErrCommunication("send-olt-event", log.Fields{"device-id": deviceID}, err)
}
- logger.Debugw("olt-updown-event-sent-to-kafka", log.Fields{})
+ logger.Debugw(ctx, "olt-updown-event-sent-to-kafka", log.Fields{})
return nil
}
// OnuDiscoveryIndication is an exported method to handle ONU discovery event
-func (em *OpenOltEventMgr) OnuDiscoveryIndication(onuDisc *oop.OnuDiscIndication, oltDeviceID string, onuDeviceID string, OnuID uint32, serialNumber string, raisedTs int64) error {
+func (em *OpenOltEventMgr) OnuDiscoveryIndication(ctx context.Context, onuDisc *oop.OnuDiscIndication, oltDeviceID string, onuDeviceID string, OnuID uint32, serialNumber string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -202,20 +202,20 @@
de.ResourceId = oltDeviceID
de.DeviceEventName = fmt.Sprintf("%s_%s", onuDiscoveryEvent, "RAISE_EVENT")
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, equipment, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, equipment, pon, raisedTs); err != nil {
return olterrors.NewErrCommunication("send-onu-discovery-event",
log.Fields{
"serial-number": serialNumber,
"intf-id": onuDisc.IntfId}, err)
}
- logger.Debugw("onu-discovery-event-sent-to-kafka",
+ logger.Debugw(ctx, "onu-discovery-event-sent-to-kafka",
log.Fields{
"serial-number": serialNumber,
"intf-id": onuDisc.IntfId})
return nil
}
-func (em *OpenOltEventMgr) oltLosIndication(oltLos *oop.LosIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) oltLosIndication(ctx context.Context, oltLos *oop.LosIndication, deviceID string, raisedTs int64) error {
var err error = nil
var de voltha.DeviceEvent
var alarmInd oop.OnuAlarmIndication
@@ -239,7 +239,7 @@
alarmInd.IntfId = ponIntdID
alarmInd.OnuId = onuInCache.(*OnuDevice).onuID
alarmInd.LosStatus = statusCheckOn
- err = em.onuAlarmIndication(&alarmInd, deviceID, raisedTs)
+ err = em.onuAlarmIndication(ctx, &alarmInd, deviceID, raisedTs)
}
return true
})
@@ -251,14 +251,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", oltLosEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, olt, raisedTs); err != nil {
return err
}
- logger.Debugw("olt-los-event-sent-to-kafka", log.Fields{"intf-id": oltLos.IntfId})
+ logger.Debugw(ctx, "olt-los-event-sent-to-kafka", log.Fields{"intf-id": oltLos.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuDyingGaspIndication(dgi *oop.DyingGaspIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDyingGaspIndication(ctx context.Context, dgi *oop.DyingGaspIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
var serialNumber string
context := make(map[string]string)
@@ -276,22 +276,22 @@
de.ResourceId = deviceID
de.DeviceEventName = fmt.Sprintf("%s_%s", onuDyingGaspEvent, "EVENT")
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-dying-gasp-event-sent-to-kafka", log.Fields{"intf-id": dgi.IntfId})
+ logger.Debugw(ctx, "onu-dying-gasp-event-sent-to-kafka", log.Fields{"intf-id": dgi.IntfId})
return nil
}
//wasLosRaised checks whether los raised already. If already raised returns true else false
-func (em *OpenOltEventMgr) wasLosRaised(onuAlarm *oop.OnuAlarmIndication) bool {
+func (em *OpenOltEventMgr) wasLosRaised(ctx context.Context, onuAlarm *oop.OnuAlarmIndication) bool {
onuKey := em.handler.formOnuKey(onuAlarm.IntfId, onuAlarm.OnuId)
if onuInCache, ok := em.handler.onus.Load(onuKey); ok {
- logger.Debugw("onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
+ logger.Debugw(ctx, "onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
if onuAlarm.LosStatus == statusCheckOn {
if onuInCache.(*OnuDevice).losRaised {
- logger.Warnw("onu-los-raised-already", log.Fields{"onu_id": onuAlarm.OnuId,
+ logger.Warnw(ctx, "onu-los-raised-already", log.Fields{"onu_id": onuAlarm.OnuId,
"intf_id": onuAlarm.IntfId, "LosStatus": onuAlarm.LosStatus})
return true
}
@@ -302,14 +302,14 @@
}
//wasLosCleared checks whether los cleared already. If already cleared returns true else false
-func (em *OpenOltEventMgr) wasLosCleared(onuAlarm *oop.OnuAlarmIndication) bool {
+func (em *OpenOltEventMgr) wasLosCleared(ctx context.Context, onuAlarm *oop.OnuAlarmIndication) bool {
onuKey := em.handler.formOnuKey(onuAlarm.IntfId, onuAlarm.OnuId)
if onuInCache, ok := em.handler.onus.Load(onuKey); ok {
- logger.Debugw("onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
+ logger.Debugw(ctx, "onu-device-found-in-cache.", log.Fields{"intfID": onuAlarm.IntfId, "onuID": onuAlarm.OnuId})
if onuAlarm.LosStatus == statusCheckOff {
if !onuInCache.(*OnuDevice).losRaised {
- logger.Warnw("onu-los-cleared-already", log.Fields{"onu_id": onuAlarm.OnuId,
+ logger.Warnw(ctx, "onu-los-cleared-already", log.Fields{"onu_id": onuAlarm.OnuId,
"intf_id": onuAlarm.IntfId, "LosStatus": onuAlarm.LosStatus})
return true
}
@@ -349,7 +349,7 @@
return deviceEventName
}
-func (em *OpenOltEventMgr) onuAlarmIndication(onuAlarm *oop.OnuAlarmIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuAlarmIndication(ctx context.Context, onuAlarm *oop.OnuAlarmIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
var serialNumber string
context := make(map[string]string)
@@ -370,7 +370,7 @@
switch onuAlarm.LosStatus {
case statusCheckOn:
- if em.wasLosRaised(onuAlarm) {
+ if em.wasLosRaised(ctx, onuAlarm) {
/* No need to raise Onu Los Event as it might have already raised
or Onu might have deleted */
return nil
@@ -383,7 +383,7 @@
onuInCache.(*OnuDevice).proxyDeviceID, true))
}
case statusCheckOff:
- if em.wasLosCleared(onuAlarm) {
+ if em.wasLosCleared(ctx, onuAlarm) {
/* No need to clear Onu Los Event as it might have already cleared
or Onu might have deleted */
return nil
@@ -398,14 +398,14 @@
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-los-event-sent-to-kafka", log.Fields{"onu-id": onuAlarm.OnuId, "intf-id": onuAlarm.IntfId})
+ logger.Debugw(ctx, "onu-los-event-sent-to-kafka", log.Fields{"onu-id": onuAlarm.OnuId, "intf-id": onuAlarm.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuActivationFailIndication(oaf *oop.OnuActivationFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuActivationFailIndication(ctx context.Context, oaf *oop.OnuActivationFailureIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -417,14 +417,14 @@
de.ResourceId = deviceID
de.DeviceEventName = fmt.Sprintf("%s_%s", onuActivationFailEvent, "RAISE_EVENT")
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, equipment, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, equipment, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-activation-failure-event-sent-to-kafka", log.Fields{"onu-id": oaf.OnuId, "intf-id": oaf.IntfId})
+ logger.Debugw(ctx, "onu-activation-failure-event-sent-to-kafka", log.Fields{"onu-id": oaf.OnuId, "intf-id": oaf.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuLossOmciIndication(onuLossOmci *oop.OnuLossOfOmciChannelIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOmciIndication(ctx context.Context, onuLossOmci *oop.OnuLossOfOmciChannelIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -439,14 +439,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOmciEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-loss-of-omci-channel-event-sent-to-kafka", log.Fields{"onu-id": onuLossOmci.OnuId, "intf-id": onuLossOmci.IntfId})
+ logger.Debugw(ctx, "onu-loss-of-omci-channel-event-sent-to-kafka", log.Fields{"onu-id": onuLossOmci.OnuId, "intf-id": onuLossOmci.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuDriftOfWindowIndication(onuDriftWindow *oop.OnuDriftOfWindowIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDriftOfWindowIndication(ctx context.Context, onuDriftWindow *oop.OnuDriftOfWindowIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -463,14 +463,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuDriftOfWindowEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-drift-of-window-event-sent-to-kafka", log.Fields{"onu-id": onuDriftWindow.OnuId, "intf-id": onuDriftWindow.IntfId})
+ logger.Debugw(ctx, "onu-drift-of-window-event-sent-to-kafka", log.Fields{"onu-id": onuDriftWindow.OnuId, "intf-id": onuDriftWindow.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuSignalDegradeIndication(onuSignalDegrade *oop.OnuSignalDegradeIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuSignalDegradeIndication(ctx context.Context, onuSignalDegrade *oop.OnuSignalDegradeIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -486,14 +486,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalDegradeEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-signal-degrade-event-sent-to-kafka", log.Fields{"onu-id": onuSignalDegrade.OnuId, "intf-id": onuSignalDegrade.IntfId})
+ logger.Debugw(ctx, "onu-signal-degrade-event-sent-to-kafka", log.Fields{"onu-id": onuSignalDegrade.OnuId, "intf-id": onuSignalDegrade.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuSignalsFailIndication(onuSignalsFail *oop.OnuSignalsFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuSignalsFailIndication(ctx context.Context, onuSignalsFail *oop.OnuSignalsFailureIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -509,14 +509,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuSignalsFailEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-signals-fail-event-sent-to-kafka", log.Fields{"onu-id": onuSignalsFail.OnuId, "intf-id": onuSignalsFail.IntfId})
+ logger.Debugw(ctx, "onu-signals-fail-event-sent-to-kafka", log.Fields{"onu-id": onuSignalsFail.OnuId, "intf-id": onuSignalsFail.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuStartupFailedIndication(onuStartupFail *oop.OnuStartupFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuStartupFailedIndication(ctx context.Context, onuStartupFail *oop.OnuStartupFailureIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -532,14 +532,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuStartupFailEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, pon, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, pon, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-startup-fail-event-sent-to-kafka", log.Fields{"onu-id": onuStartupFail.OnuId, "intf-id": onuStartupFail.IntfId})
+ logger.Debugw(ctx, "onu-startup-fail-event-sent-to-kafka", log.Fields{"onu-id": onuStartupFail.OnuId, "intf-id": onuStartupFail.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuLossOfSyncIndication(onuLOKI *oop.OnuLossOfKeySyncFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOfSyncIndication(ctx context.Context, onuLOKI *oop.OnuLossOfKeySyncFailureIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -555,27 +555,27 @@
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, security, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, security, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-loss-of-key-sync-event-sent-to-kafka", log.Fields{"onu-id": onuLOKI.OnuId, "intf-id": onuLOKI.IntfId})
+ logger.Debugw(ctx, "onu-loss-of-key-sync-event-sent-to-kafka", log.Fields{"onu-id": onuLOKI.OnuId, "intf-id": onuLOKI.IntfId})
return nil
}
// oltIntfOperIndication handles Up and Down state of an OLT PON ports
-func (em *OpenOltEventMgr) oltIntfOperIndication(ifindication *oop.IntfOperIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) oltIntfOperIndication(ctx context.Context, ifindication *oop.IntfOperIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
- context := make(map[string]string)
portID := IntfIDToPortNo(ifindication.IntfId, voltha.Port_PON_OLT)
- device, err := em.handler.coreProxy.GetDevice(ctx.Background(), deviceID, deviceID)
+ device, err := em.handler.coreProxy.GetDevice(context.Background(), deviceID, deviceID)
if err != nil {
return olterrors.NewErrAdapter("error-while-fetching-device-object", log.Fields{"DeviceId": deviceID}, err)
}
+ context := make(map[string]string)
for _, port := range device.Ports {
if port.PortNo == portID {
// Events are suppressed if the Port Adminstate is not enabled.
if port.AdminState != common.AdminState_ENABLED {
- logger.Debugw("port-disable/enable-event-not-generated--the-port-is-not-enabled-by-operator", log.Fields{"deviceId": deviceID, "port": port})
+ logger.Debugw(ctx, "port-disable/enable-event-not-generated--the-port-is-not-enabled-by-operator", log.Fields{"deviceId": deviceID, "port": port})
return nil
}
break
@@ -593,14 +593,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", ponIntfDownIndiction, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, communication, olt, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, communication, olt, raisedTs); err != nil {
return olterrors.NewErrCommunication("send-olt-intf-oper-status-event", log.Fields{"device-id": deviceID, "intf-id": ifindication.IntfId, "oper-state": ifindication.OperState}, err).Log()
}
- logger.Debug("sent-olt-intf-oper-status-event-to-kafka")
+ logger.Debug(ctx, "sent-olt-intf-oper-status-event-to-kafka")
return nil
}
-func (em *OpenOltEventMgr) onuDeactivationFailureIndication(onuDFI *oop.OnuDeactivationFailureIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDeactivationFailureIndication(ctx context.Context, onuDFI *oop.OnuDeactivationFailureIndication, deviceID string, raisedTs int64) error {
var de voltha.DeviceEvent
context := make(map[string]string)
/* Populating event context */
@@ -615,13 +615,13 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuDeactivationFailureEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(&de, equipment, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, &de, equipment, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-deactivation-failure-event-sent-to-kafka", log.Fields{"onu-id": onuDFI.OnuId, "intf-id": onuDFI.IntfId})
+ logger.Debugw(ctx, "onu-deactivation-failure-event-sent-to-kafka", log.Fields{"onu-id": onuDFI.OnuId, "intf-id": onuDFI.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuRemoteDefectIndication(onuID uint32, intfID uint32, rdiCount uint64, status string, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuRemoteDefectIndication(ctx context.Context, onuID uint32, intfID uint32, rdiCount uint64, status string, deviceID string, raisedTs int64) error {
/* Populating event context */
context := map[string]string{
"onu-id": strconv.FormatUint(uint64(onuID), base10),
@@ -639,29 +639,29 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuRemoteDefectIndication, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-remote-defect-event-sent-to-kafka", log.Fields{"onu-id": onuID, "intf-id": intfID})
+ logger.Debugw(ctx, "onu-remote-defect-event-sent-to-kafka", log.Fields{"onu-id": onuID, "intf-id": intfID})
return nil
}
-func (em *OpenOltEventMgr) onuItuPonStatsIndication(onuIPS *oop.OnuItuPonStatsIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuItuPonStatsIndication(ctx context.Context, onuIPS *oop.OnuItuPonStatsIndication, deviceID string, raisedTs int64) error {
onuDevice, found := em.handler.onus.Load(em.handler.formOnuKey(onuIPS.IntfId, onuIPS.OnuId))
if !found {
return errors.New("unknown-onu-device")
}
if onuIPS.GetRdiErrorInd().Status == statusCheckOn {
if !onuDevice.(*OnuDevice).rdiRaised {
- if err := em.onuRemoteDefectIndication(onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOn, deviceID, raisedTs); err != nil {
+ if err := em.onuRemoteDefectIndication(ctx, onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOn, deviceID, raisedTs); err != nil {
return err
}
onuDevice.(*OnuDevice).rdiRaised = true
return nil
}
- logger.Debugw("onu-remote-defect-already-raised", log.Fields{"onu-id": onuIPS.OnuId, "intf-id": onuIPS.IntfId})
+ logger.Debugw(ctx, "onu-remote-defect-already-raised", log.Fields{"onu-id": onuIPS.OnuId, "intf-id": onuIPS.IntfId})
} else {
- if err := em.onuRemoteDefectIndication(onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOff, deviceID, raisedTs); err != nil {
+ if err := em.onuRemoteDefectIndication(ctx, onuIPS.OnuId, onuIPS.IntfId, onuIPS.GetRdiErrorInd().RdiErrorCount, statusCheckOff, deviceID, raisedTs); err != nil {
return err
}
onuDevice.(*OnuDevice).rdiRaised = false
@@ -669,7 +669,7 @@
return nil
}
-func (em *OpenOltEventMgr) onuLossOfGEMChannelDelineationIndication(onuGCD *oop.OnuLossOfGEMChannelDelineationIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOfGEMChannelDelineationIndication(ctx context.Context, onuGCD *oop.OnuLossOfGEMChannelDelineationIndication, deviceID string, raisedTs int64) error {
/* Populating event context */
context := map[string]string{
"onu-id": strconv.FormatUint(uint64(onuGCD.OnuId), base10),
@@ -687,14 +687,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfGEMChannelDelineationEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(de, communication, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, de, communication, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-loss-of-gem-channel-delineation-event-sent-to-kafka", log.Fields{"onu-id": onuGCD.OnuId, "intf-id": onuGCD.IntfId})
+ logger.Debugw(ctx, "onu-loss-of-gem-channel-delineation-event-sent-to-kafka", log.Fields{"onu-id": onuGCD.OnuId, "intf-id": onuGCD.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuPhysicalEquipmentErrorIndication(onuErr *oop.OnuPhysicalEquipmentErrorIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuPhysicalEquipmentErrorIndication(ctx context.Context, onuErr *oop.OnuPhysicalEquipmentErrorIndication, deviceID string, raisedTs int64) error {
/* Populating event context */
context := map[string]string{
"onu-id": strconv.FormatUint(uint64(onuErr.OnuId), base10),
@@ -711,14 +711,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuPhysicalEquipmentErrorEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuErr.OnuId, "intf-id": onuErr.IntfId})
+ logger.Debugw(ctx, "onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuErr.OnuId, "intf-id": onuErr.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuLossOfAcknowledgementIndication(onuLOA *oop.OnuLossOfAcknowledgementIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuLossOfAcknowledgementIndication(ctx context.Context, onuLOA *oop.OnuLossOfAcknowledgementIndication, deviceID string, raisedTs int64) error {
/* Populating event context */
context := map[string]string{
"onu-id": strconv.FormatUint(uint64(onuLOA.OnuId), base10),
@@ -735,14 +735,14 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuLossOfAcknowledgementEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
return err
}
- logger.Debugw("onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuLOA.OnuId, "intf-id": onuLOA.IntfId})
+ logger.Debugw(ctx, "onu-physical-equipment-error-event-sent-to-kafka", log.Fields{"onu-id": onuLOA.OnuId, "intf-id": onuLOA.IntfId})
return nil
}
-func (em *OpenOltEventMgr) onuDifferentialReachExceededIndication(onuDRE *oop.OnuDifferentialReachExceededIndication, deviceID string, raisedTs int64) error {
+func (em *OpenOltEventMgr) onuDifferentialReachExceededIndication(ctx context.Context, onuDRE *oop.OnuDifferentialReachExceededIndication, deviceID string, raisedTs int64) error {
/* Populating event context */
context := map[string]string{
"onu-id": strconv.FormatUint(uint64(onuDRE.OnuId), base10),
@@ -760,7 +760,7 @@
de.DeviceEventName = fmt.Sprintf("%s_%s", onuDifferentialReachExceededEvent, "CLEAR_EVENT")
}
/* Send event to KAFKA */
- if err := em.eventProxy.SendDeviceEvent(de, equipment, onu, raisedTs); err != nil {
+ if err := em.eventProxy.SendDeviceEvent(ctx, de, equipment, onu, raisedTs); err != nil {
return err
}
log.Debugw("onu-differential-reach-exceeded–event-sent-to-kafka", log.Fields{"onu-id": onuDRE.OnuId, "intf-id": onuDRE.IntfId})
diff --git a/internal/pkg/core/openolt_eventmgr_test.go b/internal/pkg/core/openolt_eventmgr_test.go
index ebbf976..fbc3539 100644
--- a/internal/pkg/core/openolt_eventmgr_test.go
+++ b/internal/pkg/core/openolt_eventmgr_test.go
@@ -18,12 +18,12 @@
package core
import (
+ "context"
+ "github.com/opencord/voltha-openolt-adapter/pkg/mocks"
+ oop "github.com/opencord/voltha-protos/v3/go/openolt"
"sync"
"testing"
"time"
-
- "github.com/opencord/voltha-openolt-adapter/pkg/mocks"
- oop "github.com/opencord/voltha-protos/v3/go/openolt"
)
func mockEventMgr() *OpenOltEventMgr {
@@ -132,7 +132,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- em.ProcessEvents(tt.args.alarmInd, tt.args.deviceID, tt.args.raisedTs)
+ em.ProcessEvents(context.Background(), tt.args.alarmInd, tt.args.deviceID, tt.args.raisedTs)
})
}
}
@@ -158,7 +158,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- em.OnuDiscoveryIndication(tt.args.onuDisc, tt.args.oltDeviceID, tt.args.onuDeviceID, tt.args.OnuID, tt.args.serialNumber, tt.args.raisedTs)
+ em.OnuDiscoveryIndication(context.Background(), tt.args.onuDisc, tt.args.oltDeviceID, tt.args.onuDeviceID, tt.args.OnuID, tt.args.serialNumber, tt.args.raisedTs)
})
}
}
diff --git a/internal/pkg/core/openolt_flowmgr.go b/internal/pkg/core/openolt_flowmgr.go
index d14194e..b31c312 100644
--- a/internal/pkg/core/openolt_flowmgr.go
+++ b/internal/pkg/core/openolt_flowmgr.go
@@ -228,7 +228,7 @@
//NewFlowManager creates OpenOltFlowMgr object and initializes the parameters
func NewFlowManager(ctx context.Context, dh *DeviceHandler, rMgr *rsrcMgr.OpenOltResourceMgr) *OpenOltFlowMgr {
- logger.Infow("initializing-flow-manager", log.Fields{"device-id": dh.device.Id})
+ logger.Infow(ctx, "initializing-flow-manager", log.Fields{"device-id": dh.device.Id})
var flowMgr OpenOltFlowMgr
var err error
var idx uint32
@@ -236,8 +236,8 @@
flowMgr.deviceHandler = dh
flowMgr.resourceMgr = rMgr
flowMgr.techprofile = make(map[uint32]tp.TechProfileIf)
- if err = flowMgr.populateTechProfilePerPonPort(); err != nil {
- logger.Errorw("error-while-populating-tech-profile-mgr", log.Fields{"error": err})
+ if err = flowMgr.populateTechProfilePerPonPort(ctx); err != nil {
+ logger.Errorw(ctx, "error-while-populating-tech-profile-mgr", log.Fields{"error": err})
return nil
}
flowMgr.onuIdsLock = sync.RWMutex{}
@@ -248,7 +248,7 @@
//Load the onugem info cache from kv store on flowmanager start
for idx = 0; idx < ponPorts; idx++ {
if flowMgr.onuGemInfo[idx], err = rMgr.GetOnuGemInfo(ctx, idx); err != nil {
- logger.Error("failed-to-load-onu-gem-info-cache")
+ logger.Error(ctx, "failed-to-load-onu-gem-info-cache")
}
//Load flowID list per gem map per interface from the kvstore.
flowMgr.loadFlowIDlistForGem(ctx, idx)
@@ -259,19 +259,19 @@
flowMgr.interfaceToMcastQueueMap = make(map[uint32]*queueInfoBrief)
//load interface to multicast queue map from kv store
flowMgr.loadInterfaceToMulticastQueueMap(ctx)
- logger.Info("initialization-of-flow-manager-success")
+ logger.Info(ctx, "initialization-of-flow-manager-success")
return &flowMgr
}
-func (f *OpenOltFlowMgr) generateStoredFlowID(flowID uint32, direction string) (uint64, error) {
+func (f *OpenOltFlowMgr) generateStoredFlowID(ctx context.Context, flowID uint32, direction string) (uint64, error) {
if direction == Upstream {
- logger.Debugw("upstream-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Debugw(ctx, "upstream-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
return 0x1<<15 | uint64(flowID), nil
} else if direction == Downstream {
- logger.Debugw("downstream-flow-not-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Debugw(ctx, "downstream-flow-not-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
return uint64(flowID), nil
} else if direction == Multicast {
- logger.Debugw("multicast-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Debugw(ctx, "multicast-flow-shifting-id", log.Fields{"device-id": f.deviceHandler.device.Id})
return 0x2<<15 | uint64(flowID), nil
} else {
return 0, olterrors.NewErrInvalidValue(log.Fields{"direction": direction}, nil).Log()
@@ -279,7 +279,7 @@
}
func (f *OpenOltFlowMgr) registerFlow(ctx context.Context, flowFromCore *ofp.OfpFlowStats, deviceFlow *openoltpb2.Flow) {
- logger.Debugw("registering-flow-for-device ",
+ logger.Debugw(ctx, "registering-flow-for-device ",
log.Fields{
"flow": flowFromCore,
"device-id": f.deviceHandler.device.Id})
@@ -301,7 +301,7 @@
var gemPorts []uint32
var TpInst interface{}
- logger.Infow("dividing-flow", log.Fields{
+ logger.Infow(ctx, "dividing-flow", log.Fields{
"device-id": f.deviceHandler.device.Id,
"intf-id": intfID,
"onu-id": onuID,
@@ -316,7 +316,7 @@
// is because the flow is an NNI flow and there would be no onu resources associated with it
// TODO: properly deal with NNI flows
if onuID <= 0 {
- logger.Errorw("no-onu-id-for-flow",
+ logger.Errorw(ctx, "no-onu-id-for-flow",
log.Fields{
"port-no": portNo,
"classifer": classifierInfo,
@@ -326,13 +326,13 @@
}
uni := getUniPortPath(f.deviceHandler.device.Id, intfID, int32(onuID), int32(uniID))
- logger.Debugw("uni-port-path", log.Fields{
+ logger.Debugw(ctx, "uni-port-path", log.Fields{
"uni": uni,
"device-id": f.deviceHandler.device.Id})
tpLockMapKey := tpLockKey{intfID, onuID, uniID}
if f.perUserFlowHandleLock.TryLock(tpLockMapKey) {
- logger.Debugw("dividing-flow-create-tcont-gem-ports", log.Fields{
+ logger.Debugw(ctx, "dividing-flow-create-tcont-gem-ports", log.Fields{
"device-id": f.deviceHandler.device.Id,
"intf-id": intfID,
"onu-id": onuID,
@@ -345,7 +345,7 @@
"tp-id": TpID})
allocID, gemPorts, TpInst = f.createTcontGemports(ctx, intfID, onuID, uniID, uni, portNo, TpID, UsMeterID, DsMeterID, flowMetadata)
if allocID == 0 || gemPorts == nil || TpInst == nil {
- logger.Error("alloc-id-gem-ports-tp-unavailable")
+ logger.Error(ctx, "alloc-id-gem-ports-tp-unavailable")
f.perUserFlowHandleLock.Unlock(tpLockMapKey)
return
}
@@ -362,7 +362,7 @@
f.checkAndAddFlow(ctx, args, classifierInfo, actionInfo, flow, TpInst, gemPorts, TpID, uni)
f.perUserFlowHandleLock.Unlock(tpLockMapKey)
} else {
- logger.Errorw("failed-to-acquire-per-user-flow-handle-lock",
+ logger.Errorw(ctx, "failed-to-acquire-per-user-flow-handle-lock",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -377,7 +377,7 @@
// CreateSchedulerQueues creates traffic schedulers on the device with the given scheduler configuration and traffic shaping info
func (f *OpenOltFlowMgr) CreateSchedulerQueues(ctx context.Context, sq schedQueue) error {
- logger.Debugw("CreateSchedulerQueues",
+ logger.Debugw(ctx, "CreateSchedulerQueues",
log.Fields{"dir": sq.direction,
"intf-id": sq.intfID,
"onu-id": sq.onuID,
@@ -410,7 +410,7 @@
if KvStoreMeter != nil {
if KvStoreMeter.MeterId == sq.meterID {
- logger.Debugw("scheduler-already-created-for-upstream", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Debugw(ctx, "scheduler-already-created-for-upstream", log.Fields{"device-id": f.deviceHandler.device.Id})
return nil
}
return olterrors.NewErrInvalidValue(log.Fields{
@@ -420,16 +420,16 @@
"device-id": f.deviceHandler.device.Id}, nil)
}
- logger.Debugw("meter-does-not-exist-creating-new",
+ logger.Debugw(ctx, "meter-does-not-exist-creating-new",
log.Fields{
"meter-id": sq.meterID,
"direction": Direction,
"device-id": f.deviceHandler.device.Id})
if sq.direction == tp_pb.Direction_UPSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(sq.tpInst.(*tp.TechProfile))
+ SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(sq.tpInst.(*tp.TechProfile))
+ SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
}
if err != nil {
@@ -446,14 +446,14 @@
for _, meter := range sq.flowMetadata.Meters {
if sq.meterID == meter.MeterId {
meterConfig = meter
- logger.Debugw("found-meter-config-from-flowmetadata",
+ logger.Debugw(ctx, "found-meter-config-from-flowmetadata",
log.Fields{"meterConfig": meterConfig,
"device-id": f.deviceHandler.device.Id})
break
}
}
} else {
- logger.Errorw("flow-metadata-not-present-in-flow", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Errorw(ctx, "flow-metadata-not-present-in-flow", log.Fields{"device-id": f.deviceHandler.device.Id})
}
if meterConfig == nil {
return olterrors.NewErrNotFound("meterbands", log.Fields{
@@ -462,7 +462,7 @@
"meter-id": sq.meterID,
"device-id": f.deviceHandler.device.Id}, nil)
} else if len(meterConfig.Bands) < MaxMeterBand {
- logger.Errorw("invalid-number-of-bands-in-meter",
+ logger.Errorw(ctx, "invalid-number-of-bands-in-meter",
log.Fields{"Bands": meterConfig.Bands,
"meter-id": sq.meterID,
"device-id": f.deviceHandler.device.Id})
@@ -500,7 +500,7 @@
"meter-id": sq.meterID,
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Infow("updated-meter-info-into-kv-store-successfully",
+ logger.Infow(ctx, "updated-meter-info-into-kv-store-successfully",
log.Fields{"direction": Direction,
"Meter": meterConfig,
"device-id": f.deviceHandler.device.Id})
@@ -508,8 +508,7 @@
}
func (f *OpenOltFlowMgr) pushSchedulerQueuesToDevice(ctx context.Context, sq schedQueue, TrafficShaping *tp_pb.TrafficShapingInfo, TrafficSched []*tp_pb.TrafficScheduler) error {
-
- trafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(sq.tpInst.(*tp.TechProfile), sq.direction)
+ trafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile), sq.direction)
if err != nil {
return olterrors.NewErrAdapter("unable-to-construct-traffic-queue-configuration",
@@ -518,7 +517,7 @@
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Debugw("sending-traffic-scheduler-create-to-device",
+ logger.Debugw(ctx, "sending-traffic-scheduler-create-to-device",
log.Fields{
"direction": sq.direction,
"TrafficScheds": TrafficSched,
@@ -529,14 +528,14 @@
TrafficScheds: TrafficSched}); err != nil {
return olterrors.NewErrAdapter("failed-to-create-traffic-schedulers-in-device", log.Fields{"TrafficScheds": TrafficSched}, err)
}
- logger.Infow("successfully-created-traffic-schedulers", log.Fields{
+ logger.Infow(ctx, "successfully-created-traffic-schedulers", log.Fields{
"direction": sq.direction,
"traffic-queues": trafficQueues,
"device-id": f.deviceHandler.device.Id})
// On receiving the CreateTrafficQueues request, the driver should create corresponding
// downstream queues.
- logger.Debugw("sending-traffic-queues-create-to-device",
+ logger.Debugw(ctx, "sending-traffic-queues-create-to-device",
log.Fields{"direction": sq.direction,
"traffic-queues": trafficQueues,
"device-id": f.deviceHandler.device.Id})
@@ -547,19 +546,19 @@
TechProfileId: TrafficSched[0].TechProfileId}); err != nil {
return olterrors.NewErrAdapter("failed-to-create-traffic-queues-in-device", log.Fields{"traffic-queues": trafficQueues}, err)
}
- logger.Infow("successfully-created-traffic-schedulers", log.Fields{
+ logger.Infow(ctx, "successfully-created-traffic-schedulers", log.Fields{
"direction": sq.direction,
"traffic-queues": trafficQueues,
"device-id": f.deviceHandler.device.Id})
if sq.direction == tp_pb.Direction_DOWNSTREAM {
- multicastTrafficQueues := f.techprofile[sq.intfID].GetMulticastTrafficQueues(sq.tpInst.(*tp.TechProfile))
+ multicastTrafficQueues := f.techprofile[sq.intfID].GetMulticastTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile))
if len(multicastTrafficQueues) > 0 {
if _, present := f.interfaceToMcastQueueMap[sq.intfID]; !present {
//assumed that there is only one queue per PON for the multicast service
//the default queue with multicastQueuePerPonPort.Priority per a pon interface is used for multicast service
//just put it in interfaceToMcastQueueMap to use for building group members
- logger.Debugw("multicast-traffic-queues", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Debugw(ctx, "multicast-traffic-queues", log.Fields{"device-id": f.deviceHandler.device.Id})
multicastQueuePerPonPort := multicastTrafficQueues[0]
f.interfaceToMcastQueueMap[sq.intfID] = &queueInfoBrief{
gemPortID: multicastQueuePerPonPort.GemportId,
@@ -570,7 +569,7 @@
multicastQueuePerPonPort.GemportId,
multicastQueuePerPonPort.Priority)
- logger.Infow("multicast-queues-successfully-updated", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Infow(ctx, "multicast-queues-successfully-updated", log.Fields{"device-id": f.deviceHandler.device.Id})
}
}
}
@@ -583,7 +582,7 @@
var Direction string
var SchedCfg *tp_pb.SchedulerConfig
var err error
- logger.Infow("removing-schedulers-and-queues-in-olt",
+ logger.Infow(ctx, "removing-schedulers-and-queues-in-olt",
log.Fields{
"direction": sq.direction,
"intf-id": sq.intfID,
@@ -592,10 +591,10 @@
"uni-port": sq.uniPort,
"device-id": f.deviceHandler.device.Id})
if sq.direction == tp_pb.Direction_UPSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(sq.tpInst.(*tp.TechProfile))
+ SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
Direction = "upstream"
} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(sq.tpInst.(*tp.TechProfile))
+ SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
Direction = "downstream"
}
@@ -615,7 +614,7 @@
"device-id": f.deviceHandler.device.Id}, err)
}
if KVStoreMeter == nil {
- logger.Warnw("no-meter-installed-yet",
+ logger.Warnw(ctx, "no-meter-installed-yet",
log.Fields{
"direction": Direction,
"intf-id": sq.intfID,
@@ -636,7 +635,7 @@
TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile[sq.intfID].GetTrafficScheduler(sq.tpInst.(*tp.TechProfile), SchedCfg, TrafficShaping)}
TrafficSched[0].TechProfileId = sq.tpID
- TrafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(sq.tpInst.(*tp.TechProfile), sq.direction)
+ TrafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile), sq.direction)
if err != nil {
return olterrors.NewErrAdapter("unable-to-construct-traffic-queue-configuration",
log.Fields{
@@ -656,7 +655,7 @@
"traffic-queues": TrafficQueues,
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Infow("removed-traffic-queues-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Infow(ctx, "removed-traffic-queues-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
if _, err = f.deviceHandler.Client.RemoveTrafficSchedulers(ctx, &tp_pb.TrafficSchedulers{
IntfId: sq.intfID, OnuId: sq.onuID,
UniId: sq.uniID, PortNo: sq.uniPort,
@@ -667,7 +666,7 @@
"traffic-schedulers": TrafficSched}, err)
}
- logger.Infow("removed-traffic-schedulers-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Infow(ctx, "removed-traffic-schedulers-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
/* After we successfully remove the scheduler configuration on the OLT device,
* delete the meter id on the KV store.
@@ -680,7 +679,7 @@
"meter": KVStoreMeter.MeterId,
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Infow("removed-meter-from-KV-store-successfully",
+ logger.Infow(ctx, "removed-meter-from-KV-store-successfully",
log.Fields{
"meter-id": KVStoreMeter.MeterId,
"dir": Direction,
@@ -699,9 +698,9 @@
allocIDs = f.resourceMgr.GetCurrentAllocIDsForOnu(ctx, intfID, onuID, uniID)
allgemPortIDs = f.resourceMgr.GetCurrentGEMPortIDsForOnu(ctx, intfID, onuID, uniID)
- tpPath := f.getTPpath(intfID, uni, TpID)
+ tpPath := f.getTPpath(ctx, intfID, uni, TpID)
- logger.Debugw("creating-new-tcont-and-gem", log.Fields{
+ logger.Debugw(ctx, "creating-new-tcont-and-gem", log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID,
@@ -711,14 +710,14 @@
// Check tech profile instance already exists for derived port name
techProfileInstance, _ := f.techprofile[intfID].GetTPInstanceFromKVStore(ctx, TpID, tpPath)
if techProfileInstance == nil {
- logger.Infow("tp-instance-not-found--creating-new",
+ logger.Infow(ctx, "tp-instance-not-found--creating-new",
log.Fields{
"path": tpPath,
"device-id": f.deviceHandler.device.Id})
techProfileInstance, err = f.techprofile[intfID].CreateTechProfInstance(ctx, TpID, uni, intfID)
if err != nil {
// This should not happen, something wrong in KV backend transaction
- logger.Errorw("tp-instance-create-failed",
+ logger.Errorw(ctx, "tp-instance-create-failed",
log.Fields{
"error": err,
"tp-id": TpID,
@@ -727,7 +726,7 @@
}
f.resourceMgr.UpdateTechProfileIDForOnu(ctx, intfID, onuID, uniID, TpID)
} else {
- logger.Debugw("tech-profile-instance-already-exist-for-given port-name",
+ logger.Debugw(ctx, "tech-profile-instance-already-exist-for-given port-name",
log.Fields{
"uni": uni,
"device-id": f.deviceHandler.device.Id})
@@ -740,7 +739,7 @@
sq := schedQueue{direction: tp_pb.Direction_UPSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
uniPort: uniPort, tpInst: techProfileInstance, meterID: UsMeterID, flowMetadata: flowMetadata}
if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
- logger.Errorw("CreateSchedulerQueues-failed-upstream",
+ logger.Errorw(ctx, "CreateSchedulerQueues-failed-upstream",
log.Fields{
"error": err,
"meter-id": UsMeterID,
@@ -752,7 +751,7 @@
sq := schedQueue{direction: tp_pb.Direction_DOWNSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
uniPort: uniPort, tpInst: techProfileInstance, meterID: DsMeterID, flowMetadata: flowMetadata}
if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
- logger.Errorw("CreateSchedulerQueues-failed-downstream",
+ logger.Errorw(ctx, "CreateSchedulerQueues-failed-downstream",
log.Fields{
"error": err,
"meter-id": DsMeterID,
@@ -773,7 +772,7 @@
for _, gemPortID := range gemPortIDs {
allgemPortIDs = appendUnique(allgemPortIDs, gemPortID)
}
- logger.Infow("allocated-tcont-and-gem-ports",
+ logger.Infow(ctx, "allocated-tcont-and-gem-ports",
log.Fields{
"alloc-ids": allocIDs,
"gemports": allgemPortIDs,
@@ -797,7 +796,7 @@
for _, gemPortID := range gemPortIDs {
allgemPortIDs = appendUnique(allgemPortIDs, gemPortID)
}
- logger.Infow("allocated-tcont-and-gem-ports",
+ logger.Infow(ctx, "allocated-tcont-and-gem-ports",
log.Fields{
"alloc-ids": allocIDs,
"gemports": allgemPortIDs,
@@ -806,17 +805,16 @@
f.storeTcontsGEMPortsIntoKVStore(ctx, intfID, onuID, uniID, allocIDs, allgemPortIDs)
return allocID, gemPortIDs, techProfileInstance
default:
- logger.Errorw("unknown-tech",
+ logger.Errorw(ctx, "unknown-tech",
log.Fields{
"tpInst": tpInst})
return 0, nil, nil
}
-
}
func (f *OpenOltFlowMgr) storeTcontsGEMPortsIntoKVStore(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID []uint32, gemPortIDs []uint32) {
- logger.Debugw("storing-allocated-tconts-and-gem-ports-into-KV-store",
+ logger.Debugw(ctx, "storing-allocated-tconts-and-gem-ports-into-KV-store",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -826,27 +824,27 @@
"device-id": f.deviceHandler.device.Id})
/* Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV store */
if err := f.resourceMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocID); err != nil {
- logger.Errorw("error-while-uploading-allocid-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Errorw(ctx, "error-while-uploading-allocid-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
}
if err := f.resourceMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs); err != nil {
- logger.Errorw("error-while-uploading-gemports-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Errorw(ctx, "error-while-uploading-gemports-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
}
if err := f.resourceMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, gemPortIDs, intfID, onuID, uniID); err != nil {
- logger.Error("error-while-uploading-gemtopon-map-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Error(ctx, "error-while-uploading-gemtopon-map-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
}
- logger.Infow("stored-tconts-and-gem-into-kv-store-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Infow(ctx, "stored-tconts-and-gem-into-kv-store-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
for _, gemPort := range gemPortIDs {
f.addGemPortToOnuInfoMap(ctx, intfID, onuID, gemPort)
}
}
-func (f *OpenOltFlowMgr) populateTechProfilePerPonPort() error {
+func (f *OpenOltFlowMgr) populateTechProfilePerPonPort(ctx context.Context) error {
var tpCount int
for _, techRange := range f.resourceMgr.DevInfo.Ranges {
for _, intfID := range techRange.IntfIds {
f.techprofile[intfID] = f.resourceMgr.ResourceMgrs[uint32(intfID)].TechProfileMgr
tpCount++
- logger.Debugw("init-tech-profile-done",
+ logger.Debugw(ctx, "init-tech-profile-done",
log.Fields{
"intf-id": intfID,
"device-id": f.deviceHandler.device.Id})
@@ -860,7 +858,7 @@
"pon-port-count": f.resourceMgr.DevInfo.GetPonPorts(),
"device-id": f.deviceHandler.device.Id}, nil)
}
- logger.Infow("populated-techprofile-for-ponports-successfully",
+ logger.Infow(ctx, "populated-techprofile-for-ponports-successfully",
log.Fields{
"numofTech": tpCount,
"numPonPorts": f.resourceMgr.DevInfo.GetPonPorts(),
@@ -873,7 +871,7 @@
uplinkAction map[string]interface{}, logicalFlow *ofp.OfpFlowStats,
allocID uint32, gemportID uint32, tpID uint32) error {
uplinkClassifier[PacketTagType] = SingleTag
- logger.Debugw("adding-upstream-data-flow",
+ logger.Debugw(ctx, "adding-upstream-data-flow",
log.Fields{
"uplinkClassifier": uplinkClassifier,
"uplinkAction": uplinkAction})
@@ -887,7 +885,7 @@
downlinkAction map[string]interface{}, logicalFlow *ofp.OfpFlowStats,
allocID uint32, gemportID uint32, tpID uint32) error {
downlinkClassifier[PacketTagType] = DoubleTag
- logger.Debugw("adding-downstream-data-flow",
+ logger.Debugw(ctx, "adding-downstream-data-flow",
log.Fields{
"downlinkClassifier": downlinkClassifier,
"downlinkAction": downlinkAction})
@@ -895,8 +893,8 @@
if vlan, exists := downlinkClassifier[VlanVid]; exists {
if vlan.(uint32) == (uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 4000) { //private VLAN given by core
if metadata, exists := downlinkClassifier[Metadata]; exists { // inport is filled in metadata by core
- if uint32(metadata.(uint64)) == MkUniPortNum(intfID, onuID, uniID) {
- logger.Infow("ignoring-dl-trap-device-flow-from-core",
+ if uint32(metadata.(uint64)) == MkUniPortNum(ctx, intfID, onuID, uniID) {
+ logger.Infow(ctx, "ignoring-dl-trap-device-flow-from-core",
log.Fields{
"flow": logicalFlow,
"device-id": f.deviceHandler.device.Id,
@@ -935,7 +933,7 @@
takes priority over flow_cookie to find any available HSIA_FLOW
id for the ONU.
*/
- logger.Infow("adding-hsia-flow",
+ logger.Infow(ctx, "adding-hsia-flow",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -951,14 +949,14 @@
var vlanVid uint32
if _, ok := classifier[VlanPcp]; ok {
vlanPbit = classifier[VlanPcp].(uint32)
- logger.Debugw("found-pbit-in-flow",
+ logger.Debugw(ctx, "found-pbit-in-flow",
log.Fields{
"vlan-pbit": vlanPbit,
"intf-id": intfID,
"onu-id": onuID,
"device-id": f.deviceHandler.device.Id})
} else {
- logger.Debugw("pbit-not-found-in-flow",
+ logger.Debugw(ctx, "pbit-not-found-in-flow",
log.Fields{
"vlan-pcp": VlanPcp,
"intf-id": intfID,
@@ -974,9 +972,9 @@
"onu-id": onuID,
"device-id": f.deviceHandler.device.Id})
}
- flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+ flowStoreCookie := getFlowStoreCookie(ctx, classifier, gemPortID)
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Infow("flow-already-exists",
+ logger.Infow(ctx, "flow-already-exists",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"intf-id": intfID,
@@ -997,7 +995,7 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier, "device-id": f.deviceHandler.device.Id}, err).Log()
}
- logger.Debugw("created-classifier-proto",
+ logger.Debugw(ctx, "created-classifier-proto",
log.Fields{
"classifier": *classifierProto,
"device-id": f.deviceHandler.device.Id})
@@ -1005,11 +1003,11 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"action": action, "device-id": f.deviceHandler.device.Id}, err).Log()
}
- logger.Debugw("created-action-proto",
+ logger.Debugw(ctx, "created-action-proto",
log.Fields{
"action": *actionProto,
"device-id": f.deviceHandler.device.Id})
- networkIntfID, err := getNniIntfID(classifier, action)
+ networkIntfID, err := getNniIntfID(ctx, classifier, action)
if err != nil {
return olterrors.NewErrNotFound("nni-interface-id",
log.Fields{
@@ -1036,7 +1034,7 @@
if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
return olterrors.NewErrFlowOp("add", flowID, nil, err).Log()
}
- logger.Infow("hsia-flow-added-to-device-successfully",
+ logger.Infow(ctx, "hsia-flow-added-to-device-successfully",
log.Fields{"direction": direction,
"device-id": f.deviceHandler.device.Id,
"flow": flow,
@@ -1061,7 +1059,7 @@
classifier map[string]interface{}, action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32,
gemPortID uint32, tpID uint32) error {
- networkIntfID, err := getNniIntfID(classifier, action)
+ networkIntfID, err := getNniIntfID(ctx, classifier, action)
if err != nil {
return olterrors.NewErrNotFound("nni-interface-id", log.Fields{
"classifier": classifier,
@@ -1081,9 +1079,9 @@
classifier[PacketTagType] = SingleTag
delete(classifier, VlanVid)
- flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+ flowStoreCookie := getFlowStoreCookie(ctx, classifier, gemPortID)
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Infow("flow-exists--not-re-adding",
+ logger.Infow(ctx, "flow-exists--not-re-adding",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"intf-id": intfID,
@@ -1103,7 +1101,7 @@
err).Log()
}
- logger.Debugw("creating-ul-dhcp-flow",
+ logger.Debugw(ctx, "creating-ul-dhcp-flow",
log.Fields{
"ul_classifier": classifier,
"ul_action": action,
@@ -1116,7 +1114,7 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier}, err).Log()
}
- logger.Debugw("created-classifier-proto", log.Fields{"classifier": *classifierProto})
+ logger.Debugw(ctx, "created-classifier-proto", log.Fields{"classifier": *classifierProto})
actionProto, err := makeOpenOltActionField(action, classifier)
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"action": action}, err).Log()
@@ -1140,7 +1138,7 @@
if err := f.addFlowToDevice(ctx, logicalFlow, &dhcpFlow); err != nil {
return olterrors.NewErrFlowOp("add", flowID, log.Fields{"dhcp-flow": dhcpFlow}, err).Log()
}
- logger.Infow("dhcp-ul-flow-added-to-device-successfully",
+ logger.Infow(ctx, "dhcp-ul-flow-added-to-device-successfully",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"flow-id": flowID,
@@ -1170,7 +1168,7 @@
func (f *OpenOltFlowMgr) addUpstreamTrapFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32, classifier map[string]interface{},
action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32, gemPortID uint32, flowType string, tpID uint32) error {
- networkIntfID, err := getNniIntfID(classifier, action)
+ networkIntfID, err := getNniIntfID(ctx, classifier, action)
if err != nil {
return olterrors.NewErrNotFound("nni-interface-id",
log.Fields{
@@ -1189,9 +1187,9 @@
classifier[PacketTagType] = SingleTag
delete(classifier, VlanVid)
- flowStoreCookie := getFlowStoreCookie(classifier, gemPortID)
+ flowStoreCookie := getFlowStoreCookie(ctx, classifier, gemPortID)
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkIntfID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Infow("flow-exists-not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Infow(ctx, "flow-exists-not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
return nil
}
@@ -1209,7 +1207,7 @@
err).Log()
}
- logger.Debugw("creating-upstream-trap-flow",
+ logger.Debugw(ctx, "creating-upstream-trap-flow",
log.Fields{
"ul_classifier": classifier,
"ul_action": action,
@@ -1223,7 +1221,7 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier, "device-id": f.deviceHandler.device.Id}, err).Log()
}
- logger.Debugw("created-classifier-proto",
+ logger.Debugw(ctx, "created-classifier-proto",
log.Fields{
"classifier": *classifierProto,
"device-id": f.deviceHandler.device.Id})
@@ -1251,7 +1249,7 @@
if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": flow, "device-id": f.deviceHandler.device.Id}, err).Log()
}
- logger.Infof("%s ul-flow-added-to-device-successfully", flowType)
+ logger.Infof(ctx, "%s ul-flow-added-to-device-successfully", flowType)
flowsToKVStore := f.getUpdatedFlowInfo(ctx, &flow, flowStoreCookie, flowType, flowID, logicalFlow.Id)
if err := f.updateFlowInfoToKVStore(ctx, flow.AccessIntfId,
@@ -1268,7 +1266,7 @@
func (f *OpenOltFlowMgr) addEAPOLFlow(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, portNo uint32,
classifier map[string]interface{}, action map[string]interface{}, logicalFlow *ofp.OfpFlowStats, allocID uint32,
gemPortID uint32, vlanID uint32, tpID uint32) error {
- logger.Infow("adding-eapol-to-device",
+ logger.Infow(ctx, "adding-eapol-to-device",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -1288,9 +1286,9 @@
uplinkClassifier[VlanPcp] = classifier[VlanPcp]
// Fill action
uplinkAction[TrapToHost] = true
- flowStoreCookie := getFlowStoreCookie(uplinkClassifier, gemPortID)
+ flowStoreCookie := getFlowStoreCookie(ctx, uplinkClassifier, gemPortID)
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(intfID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Infow("flow-exists-not-re-adding", log.Fields{
+ logger.Infow(ctx, "flow-exists-not-re-adding", log.Fields{
"device-id": f.deviceHandler.device.Id,
"onu-id": onuID,
"intf-id": intfID})
@@ -1307,7 +1305,7 @@
"device-id": f.deviceHandler.device.Id},
err).Log()
}
- logger.Debugw("creating-ul-eapol-flow",
+ logger.Debugw(ctx, "creating-ul-eapol-flow",
log.Fields{
"ul_classifier": uplinkClassifier,
"ul_action": uplinkAction,
@@ -1322,7 +1320,7 @@
"classifier": uplinkClassifier,
"device-id": f.deviceHandler.device.Id}, err).Log()
}
- logger.Debugw("created-classifier-proto",
+ logger.Debugw(ctx, "created-classifier-proto",
log.Fields{
"classifier": *classifierProto,
"device-id": f.deviceHandler.device.Id})
@@ -1330,11 +1328,11 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"action": uplinkAction, "device-id": f.deviceHandler.device.Id}, err).Log()
}
- logger.Debugw("created-action-proto",
+ logger.Debugw(ctx, "created-action-proto",
log.Fields{
"action": *actionProto,
"device-id": f.deviceHandler.device.Id})
- networkIntfID, err := getNniIntfID(classifier, action)
+ networkIntfID, err := getNniIntfID(ctx, classifier, action)
if err != nil {
return olterrors.NewErrNotFound("nni-interface-id", log.Fields{
"classifier": classifier,
@@ -1361,7 +1359,7 @@
if err := f.addFlowToDevice(ctx, logicalFlow, &upstreamFlow); err != nil {
return olterrors.NewErrFlowOp("add", uplinkFlowID, log.Fields{"flow": upstreamFlow}, err).Log()
}
- logger.Infow("eapol-ul-flow-added-to-device-successfully",
+ logger.Infow(ctx, "eapol-ul-flow-added-to-device-successfully",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"onu-id": onuID,
@@ -1459,8 +1457,8 @@
}
// getTPpath return the ETCD path for a given UNI port
-func (f *OpenOltFlowMgr) getTPpath(intfID uint32, uniPath string, TpID uint32) string {
- return f.techprofile[intfID].GetTechProfileInstanceKVPath(TpID, uniPath)
+func (f *OpenOltFlowMgr) getTPpath(ctx context.Context, intfID uint32, uniPath string, TpID uint32) string {
+ return f.techprofile[intfID].GetTechProfileInstanceKVPath(ctx, TpID, uniPath)
}
// DeleteTechProfileInstances removes the tech profile instances from persistent storage
@@ -1494,12 +1492,12 @@
return nil
}
-func getFlowStoreCookie(classifier map[string]interface{}, gemPortID uint32) uint64 {
+func getFlowStoreCookie(ctx context.Context, classifier map[string]interface{}, gemPortID uint32) uint64 {
if len(classifier) == 0 { // should never happen
- logger.Error("invalid-classfier-object")
+ logger.Error(ctx, "invalid-classfier-object")
return 0
}
- logger.Debugw("generating-flow-store-cookie",
+ logger.Debugw(ctx, "generating-flow-store-cookie",
log.Fields{
"classifier": classifier,
"gemport-id": gemPortID})
@@ -1508,7 +1506,7 @@
var err error
// TODO: Do we need to marshall ??
if jsonData, err = json.Marshal(classifier); err != nil {
- logger.Error("failed-to-encode-classifier")
+ logger.Error(ctx, "failed-to-encode-classifier")
return 0
}
flowString = string(jsonData)
@@ -1520,7 +1518,7 @@
hash := big.NewInt(0)
hash.SetBytes(h.Sum(nil))
generatedHash := hash.Uint64()
- logger.Debugw("hash-generated", log.Fields{"hash": generatedHash})
+ logger.Debugw(ctx, "hash-generated", log.Fields{"hash": generatedHash})
return generatedHash
}
@@ -1538,7 +1536,7 @@
// Get existing flows matching flowid for given subscriber from KV store
existingFlows := f.resourceMgr.GetFlowIDInfo(ctx, intfID, flow.OnuId, flow.UniId, flow.FlowId)
if existingFlows != nil {
- logger.Debugw("flow-exists-for-given-flowID--appending-it-to-current-flow",
+ logger.Debugw(ctx, "flow-exists-for-given-flowID--appending-it-to-current-flow",
log.Fields{
"flow-id": flow.FlowId,
"device-id": f.deviceHandler.device.Id,
@@ -1549,7 +1547,7 @@
//}
flows = append(flows, *existingFlows...)
}
- logger.Debugw("updated-flows-for-given-flowID-and-onuid",
+ logger.Debugw(ctx, "updated-flows-for-given-flowID-and-onuid",
log.Fields{
"updatedflow": flows,
"flow-id": flow.FlowId,
@@ -1572,30 +1570,30 @@
// // Get existing flows matching flowid for given subscriber from KV store
// existingFlows := f.resourceMgr.GetFlowIDInfo(intfId, uint32(flow.OnuId), uint32(flow.UniId), flow.FlowId)
// if existingFlows != nil {
-// logger.Debugw("Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
+// logger.Debugw(ctx, "Flow exists for given flowID, appending it to current flow", log.Fields{"flowID": flow.FlowId})
// for _, f := range *existingFlows {
// flows = append(flows, f)
// }
// }
-// logger.Debugw("Updated flows for given flowID and onuid", log.Fields{"updatedflow": flows, "flowid": flow.FlowId, "onu": flow.OnuId})
+// logger.Debugw(ctx, "Updated flows for given flowID and onuid", log.Fields{"updatedflow": flows, "flowid": flow.FlowId, "onu": flow.OnuId})
// return &flows
//}
func (f *OpenOltFlowMgr) updateFlowInfoToKVStore(ctx context.Context, intfID int32, onuID int32, uniID int32, flowID uint32, flows *[]rsrcMgr.FlowInfo) error {
- logger.Debugw("storing-flow(s)-into-kv-store", log.Fields{
+ logger.Debugw(ctx, "storing-flow(s)-into-kv-store", log.Fields{
"flow-id": flowID,
"device-id": f.deviceHandler.device.Id,
"intf-id": intfID,
"onu-id": onuID})
if err := f.resourceMgr.UpdateFlowIDInfo(ctx, intfID, onuID, uniID, flowID, flows); err != nil {
- logger.Warnw("error-while-storing-flow-into-kv-store", log.Fields{
+ logger.Warnw(ctx, "error-while-storing-flow-into-kv-store", log.Fields{
"device-id": f.deviceHandler.device.Id,
"onu-id": onuID,
"intf-id": intfID,
"flow-id": flowID})
return err
}
- logger.Infow("stored-flow(s)-into-kv-store-successfully!", log.Fields{
+ logger.Infow(ctx, "stored-flow(s)-into-kv-store-successfully!", log.Fields{
"device-id": f.deviceHandler.device.Id,
"onu-id": onuID,
"intf-id": intfID,
@@ -1616,7 +1614,7 @@
intfID = uint32(deviceFlow.NetworkIntfId)
}
- logger.Debugw("sending-flow-to-device-via-grpc", log.Fields{
+ logger.Debugw(ctx, "sending-flow-to-device-via-grpc", log.Fields{
"flow": *deviceFlow,
"device-id": f.deviceHandler.device.Id,
"intf-id": intfID})
@@ -1624,7 +1622,7 @@
st, _ := status.FromError(err)
if st.Code() == codes.AlreadyExists {
- logger.Debug("flow-already-exists", log.Fields{
+ logger.Debug(ctx, "flow-already-exists", log.Fields{
"err": err,
"deviceFlow": deviceFlow,
"device-id": f.deviceHandler.device.Id,
@@ -1633,7 +1631,7 @@
}
if err != nil {
- logger.Errorw("failed-to-add-flow-to-device",
+ logger.Errorw(ctx, "failed-to-add-flow-to-device",
log.Fields{"err": err,
"device-flow": deviceFlow,
"device-id": f.deviceHandler.device.Id,
@@ -1645,7 +1643,7 @@
// No need to register the flow if it is a trap on nni flow.
f.registerFlow(ctx, logicalFlow, deviceFlow)
}
- logger.Infow("flow-added-to-device-successfully ",
+ logger.Infow(ctx, "flow-added-to-device-successfully ",
log.Fields{
"flow": *deviceFlow,
"device-id": f.deviceHandler.device.Id,
@@ -1653,15 +1651,15 @@
return nil
}
-func (f *OpenOltFlowMgr) removeFlowFromDevice(deviceFlow *openoltpb2.Flow, ofFlowID uint64) error {
- logger.Debugw("sending-flow-to-device-via-grpc",
+func (f *OpenOltFlowMgr) removeFlowFromDevice(ctx context.Context, deviceFlow *openoltpb2.Flow, ofFlowID uint64) error {
+ logger.Debugw(ctx, "sending-flow-to-device-via-grpc",
log.Fields{
"flow": *deviceFlow,
"device-id": f.deviceHandler.device.Id})
_, err := f.deviceHandler.Client.FlowRemove(context.Background(), deviceFlow)
if err != nil {
if f.deviceHandler.device.ConnectStatus == common.ConnectStatus_UNREACHABLE {
- logger.Warnw("can-not-remove-flow-from-device--unreachable",
+ logger.Warnw(ctx, "can-not-remove-flow-from-device--unreachable",
log.Fields{
"err": err,
"deviceFlow": deviceFlow,
@@ -1672,7 +1670,7 @@
return olterrors.NewErrFlowOp("remove", deviceFlow.FlowId, log.Fields{"deviceFlow": deviceFlow}, err)
}
- logger.Infow("flow-removed-from-device-successfully", log.Fields{
+ logger.Infow(ctx, "flow-removed-from-device-successfully", log.Fields{
"of-flow-id": ofFlowID,
"flow": *deviceFlow,
"device-id": f.deviceHandler.device.Id,
@@ -1687,13 +1685,13 @@
func generateStoredId(flowId uint32, direction string)uint32{
if direction == Upstream{
- logger.Debug("Upstream flow shifting flowid")
+ logger.Debug(ctx, "Upstream flow shifting flowid")
return ((0x1 << 15) | flowId)
}else if direction == Downstream{
- logger.Debug("Downstream flow not shifting flowid")
+ logger.Debug(ctx, "Downstream flow not shifting flowid")
return flowId
}else{
- logger.Errorw("Unrecognized direction",log.Fields{"direction": direction})
+ logger.Errorw(ctx, "Unrecognized direction",log.Fields{"direction": direction})
return flowId
}
}
@@ -1725,13 +1723,13 @@
var uniID = -1
var gemPortID = -1
- networkInterfaceID, err := IntfIDFromNniPortNum(portNo)
+ networkInterfaceID, err := IntfIDFromNniPortNum(ctx, portNo)
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"nni-port-number": portNo}, err).Log()
}
- var flowStoreCookie = getFlowStoreCookie(classifierInfo, uint32(0))
+ var flowStoreCookie = getFlowStoreCookie(ctx, classifierInfo, uint32(0))
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Infow("flow-exists--not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
+ logger.Infow(ctx, "flow-exists--not-re-adding", log.Fields{"device-id": f.deviceHandler.device.Id})
return nil
}
flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0)
@@ -1754,7 +1752,7 @@
"classifier": classifierInfo,
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Debugw("created-classifier-proto",
+ logger.Debugw(ctx, "created-classifier-proto",
log.Fields{
"classifier": *classifierProto,
"device-id": f.deviceHandler.device.Id})
@@ -1765,7 +1763,7 @@
"action": actionInfo,
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Debugw("created-action-proto",
+ logger.Debugw(ctx, "created-action-proto",
log.Fields{
"action": *actionProto,
"device-id": f.deviceHandler.device.Id})
@@ -1788,7 +1786,7 @@
"flow": downstreamflow,
"device-id": f.deviceHandler.device.Id}, err)
}
- logger.Infow("lldp-trap-on-nni-flow-added-to-device-successfully",
+ logger.Infow(ctx, "lldp-trap-on-nni-flow-added-to-device-successfully",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"onu-id": onuID,
@@ -1811,16 +1809,16 @@
}
//getOnuDevice to fetch onu from cache or core.
-func (f *OpenOltFlowMgr) getOnuDevice(intfID uint32, onuID uint32) (*OnuDevice, error) {
+func (f *OpenOltFlowMgr) getOnuDevice(ctx context.Context, intfID uint32, onuID uint32) (*OnuDevice, error) {
onuKey := f.deviceHandler.formOnuKey(intfID, onuID)
onuDev, ok := f.deviceHandler.onus.Load(onuKey)
if !ok {
- logger.Debugw("couldnt-find-onu-in-cache",
+ logger.Debugw(ctx, "couldnt-find-onu-in-cache",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"device-id": f.deviceHandler.device.Id})
- onuDevice, err := f.getChildDevice(intfID, onuID)
+ onuDevice, err := f.getChildDevice(ctx, intfID, onuID)
if err != nil {
return nil, olterrors.NewErrNotFound("onu-child-device",
log.Fields{
@@ -1832,7 +1830,7 @@
//better to ad the device to cache here.
f.deviceHandler.StoreOnuDevice(onuDev.(*OnuDevice))
} else {
- logger.Debugw("found-onu-in-cache",
+ logger.Debugw(ctx, "found-onu-in-cache",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -1843,14 +1841,14 @@
}
//getChildDevice to fetch onu
-func (f *OpenOltFlowMgr) getChildDevice(intfID uint32, onuID uint32) (*voltha.Device, error) {
- logger.Infow("GetChildDevice",
+func (f *OpenOltFlowMgr) getChildDevice(ctx context.Context, intfID uint32, onuID uint32) (*voltha.Device, error) {
+ logger.Infow(ctx, "GetChildDevice",
log.Fields{
"pon-port": intfID,
"onu-id": onuID,
"device-id": f.deviceHandler.device.Id})
parentPortNo := IntfIDToPortNo(intfID, voltha.Port_PON_OLT)
- onuDevice, err := f.deviceHandler.GetChildDevice(parentPortNo, onuID)
+ onuDevice, err := f.deviceHandler.GetChildDevice(ctx, parentPortNo, onuID)
if err != nil {
return nil, olterrors.NewErrNotFound("onu",
log.Fields{
@@ -1859,7 +1857,7 @@
"device-id": f.deviceHandler.device.Id},
err)
}
- logger.Infow("successfully-received-child-device-from-core",
+ logger.Infow(ctx, "successfully-received-child-device-from-core",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"child_device_id": onuDevice.Id,
@@ -1867,13 +1865,13 @@
return onuDevice, nil
}
-func findNextFlow(flow *ofp.OfpFlowStats) *ofp.OfpFlowStats {
- logger.Info("unimplemented-flow %v", flow)
+func findNextFlow(ctx context.Context, flow *ofp.OfpFlowStats) *ofp.OfpFlowStats {
+ logger.Info(ctx, "unimplemented-flow %v", flow)
return nil
}
-func (f *OpenOltFlowMgr) clearFlowsAndSchedulerForLogicalPort(childDevice *voltha.Device, logicalPort *voltha.LogicalPort) {
- logger.Info("unimplemented-device %v, logicalport %v", childDevice, logicalPort)
+func (f *OpenOltFlowMgr) clearFlowsAndSchedulerForLogicalPort(ctx context.Context, childDevice *voltha.Device, logicalPort *voltha.LogicalPort) {
+ logger.Info(ctx, "unimplemented-device %v, logicalport %v", childDevice, logicalPort)
}
func (f *OpenOltFlowMgr) decodeStoredID(id uint64) (uint64, string) {
@@ -1883,10 +1881,10 @@
return id, Downstream
}
-func (f *OpenOltFlowMgr) sendDeleteGemPortToChild(intfID uint32, onuID uint32, uniID uint32, gemPortID uint32, tpPath string) error {
- onuDev, err := f.getOnuDevice(intfID, onuID)
+func (f *OpenOltFlowMgr) sendDeleteGemPortToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, gemPortID uint32, tpPath string) error {
+ onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
if err != nil {
- logger.Debugw("couldnt-find-onu-child-device",
+ logger.Debugw(ctx, "couldnt-find-onu-child-device",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -1896,7 +1894,7 @@
}
delGemPortMsg := &ic.InterAdapterDeleteGemPortMessage{UniId: uniID, TpPath: tpPath, GemPortId: gemPortID}
- logger.Debugw("sending-gem-port-delete-to-openonu-adapter",
+ logger.Debugw(ctx, "sending-gem-port-delete-to-openonu-adapter",
log.Fields{
"msg": *delGemPortMsg,
"device-id": f.deviceHandler.device.Id})
@@ -1915,7 +1913,7 @@
"proxyDeviceID": onuDev.proxyDeviceID,
"device-id": f.deviceHandler.device.Id}, sendErr)
}
- logger.Infow("success-sending-del-gem-port-to-onu-adapter",
+ logger.Infow(ctx, "success-sending-del-gem-port-to-onu-adapter",
log.Fields{
"msg": delGemPortMsg,
"from-adapter": f.deviceHandler.device.Type,
@@ -1924,10 +1922,10 @@
return nil
}
-func (f *OpenOltFlowMgr) sendDeleteTcontToChild(intfID uint32, onuID uint32, uniID uint32, allocID uint32, tpPath string) error {
- onuDev, err := f.getOnuDevice(intfID, onuID)
+func (f *OpenOltFlowMgr) sendDeleteTcontToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID uint32, tpPath string) error {
+ onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
if err != nil {
- logger.Warnw("couldnt-find-onu-child-device",
+ logger.Warnw(ctx, "couldnt-find-onu-child-device",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -1937,7 +1935,7 @@
}
delTcontMsg := &ic.InterAdapterDeleteTcontMessage{UniId: uniID, TpPath: tpPath, AllocId: allocID}
- logger.Debugw("sending-tcont-delete-to-openonu-adapter",
+ logger.Debugw(ctx, "sending-tcont-delete-to-openonu-adapter",
log.Fields{
"msg": *delTcontMsg,
"device-id": f.deviceHandler.device.Id})
@@ -1955,20 +1953,20 @@
"proxyDeviceID": onuDev.proxyDeviceID,
"device-id": f.deviceHandler.device.Id}, sendErr)
}
- logger.Infow("success-sending-del-tcont-to-onu-adapter",
+ logger.Infow(ctx, "success-sending-del-tcont-to-onu-adapter",
log.Fields{
"msg": delTcontMsg,
"device-id": f.deviceHandler.device.Id})
return nil
}
-func (f *OpenOltFlowMgr) deletePendingFlows(Intf uint32, onuID int32, uniID int32) {
+func (f *OpenOltFlowMgr) deletePendingFlows(ctx context.Context, Intf uint32, onuID int32, uniID int32) {
pnFlDelKey := pendingFlowDeleteKey{Intf, uint32(onuID), uint32(uniID)}
if val, ok := f.pendingFlowDelete.Load(pnFlDelKey); ok {
if val.(int) > 0 {
pnFlDels := val.(int) - 1
if pnFlDels > 0 {
- logger.Debugw("flow-delete-succeeded--more-pending",
+ logger.Debugw(ctx, "flow-delete-succeeded--more-pending",
log.Fields{
"intf": Intf,
"onu-id": onuID,
@@ -1977,7 +1975,7 @@
"device-id": f.deviceHandler.device.Id})
f.pendingFlowDelete.Store(pnFlDelKey, pnFlDels)
} else {
- logger.Debugw("all-pending-flow-deletes-handled--removing-entry-from-map",
+ logger.Debugw(ctx, "all-pending-flow-deletes-handled--removing-entry-from-map",
log.Fields{
"intf": Intf,
"onu-id": onuID,
@@ -1987,7 +1985,7 @@
}
}
} else {
- logger.Debugw("no-pending-delete-flows-found",
+ logger.Debugw(ctx, "no-pending-delete-flows-found",
log.Fields{
"intf": Intf,
"onu-id": onuID,
@@ -2002,12 +2000,12 @@
// which was used for deriving the gemport->logicalPortNo during packet-in.
// Otherwise stale info continues to exist after gemport is freed and wrong logicalPortNo
// is conveyed to ONOS during packet-in OF message.
-func (f *OpenOltFlowMgr) deleteGemPortFromLocalCache(intfID uint32, onuID uint32, gemPortID uint32) {
+func (f *OpenOltFlowMgr) deleteGemPortFromLocalCache(ctx context.Context, intfID uint32, onuID uint32, gemPortID uint32) {
f.onuGemInfoLock[intfID].Lock()
defer f.onuGemInfoLock[intfID].Unlock()
- logger.Infow("deleting-gem-from-local-cache",
+ logger.Infow(ctx, "deleting-gem-from-local-cache",
log.Fields{
"gem": gemPortID,
"intf-id": intfID,
@@ -2022,7 +2020,7 @@
if gem == gemPortID {
onu.GemPorts = append(onu.GemPorts[:j], onu.GemPorts[j+1:]...)
onugem[i] = onu
- logger.Infow("removed-gemport-from-local-cache",
+ logger.Infow(ctx, "removed-gemport-from-local-cache",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -2043,7 +2041,7 @@
gemPortID int32, flowID uint32, flowDirection string,
portNum uint32, updatedFlows []rsrcMgr.FlowInfo) error {
- tpID, err := getTpIDFromFlow(flow)
+ tpID, err := getTpIDFromFlow(ctx, flow)
if err != nil {
return olterrors.NewErrNotFound("tp-id",
log.Fields{
@@ -2070,7 +2068,7 @@
if onuID != -1 && uniID != -1 {
pnFlDelKey := pendingFlowDeleteKey{Intf, uint32(onuID), uint32(uniID)}
if val, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok {
- logger.Debugw("creating-entry-for-pending-flow-delete",
+ logger.Debugw(ctx, "creating-entry-for-pending-flow-delete",
log.Fields{
"flow-id": flowID,
"intf": Intf,
@@ -2080,7 +2078,7 @@
f.pendingFlowDelete.Store(pnFlDelKey, 1)
} else {
pnFlDels := val.(int) + 1
- logger.Debugw("updating-flow-delete-entry",
+ logger.Debugw(ctx, "updating-flow-delete-entry",
log.Fields{
"flow-id": flowID,
"intf": Intf,
@@ -2091,10 +2089,10 @@
f.pendingFlowDelete.Store(pnFlDelKey, pnFlDels)
}
- defer f.deletePendingFlows(Intf, onuID, uniID)
+ defer f.deletePendingFlows(ctx, Intf, onuID, uniID)
}
- logger.Debugw("releasing-flow-id-to-resource-manager",
+ logger.Debugw(ctx, "releasing-flow-id-to-resource-manager",
log.Fields{
"Intf": Intf,
"onu-id": onuID,
@@ -2104,8 +2102,8 @@
f.resourceMgr.FreeFlowID(ctx, Intf, int32(onuID), int32(uniID), flowID)
uni := getUniPortPath(f.deviceHandler.device.Id, Intf, onuID, uniID)
- tpPath := f.getTPpath(Intf, uni, tpID)
- logger.Debugw("getting-techprofile-instance-for-subscriber",
+ tpPath := f.getTPpath(ctx, Intf, uni, tpID)
+ logger.Debugw(ctx, "getting-techprofile-instance-for-subscriber",
log.Fields{
"TP-PATH": tpPath,
"device-id": f.deviceHandler.device.Id})
@@ -2130,19 +2128,19 @@
break
}
}
- logger.Debugw("gem-port-id-is-still-used-by-other-flows",
+ logger.Debugw(ctx, "gem-port-id-is-still-used-by-other-flows",
log.Fields{
"gemport-id": gemPortID,
"usedByFlows": flowIDs,
"device-id": f.deviceHandler.device.Id})
return nil
}
- logger.Debugf("gem-port-id %d is-not-used-by-another-flow--releasing-the-gem-port", gemPortID)
+ logger.Debugf(ctx, "gem-port-id %d is-not-used-by-another-flow--releasing-the-gem-port", gemPortID)
f.resourceMgr.RemoveGemPortIDForOnu(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
// TODO: The TrafficQueue corresponding to this gem-port also should be removed immediately.
// But it is anyway eventually removed later when the TechProfile is freed, so not a big issue for now.
f.resourceMgr.RemoveGEMportPonportToOnuMapOnKVStore(ctx, uint32(gemPortID), Intf)
- f.deleteGemPortFromLocalCache(Intf, uint32(onuID), uint32(gemPortID))
+ f.deleteGemPortFromLocalCache(ctx, Intf, uint32(onuID), uint32(gemPortID))
f.onuIdsLock.Lock()
//everytime an entry is deleted from flowsUsedByGemPort cache, the same should be updated in kv as well
// by calling DeleteFlowIDsForGem
@@ -2151,8 +2149,8 @@
f.resourceMgr.FreeGemPortID(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
f.onuIdsLock.Unlock()
// Delete the gem port on the ONU.
- if err := f.sendDeleteGemPortToChild(Intf, uint32(onuID), uint32(uniID), uint32(gemPortID), tpPath); err != nil {
- logger.Errorw("error-processing-delete-gem-port-towards-onu",
+ if err := f.sendDeleteGemPortToChild(ctx, Intf, uint32(onuID), uint32(uniID), uint32(gemPortID), tpPath); err != nil {
+ logger.Errorw(ctx, "error-processing-delete-gem-port-towards-onu",
log.Fields{
"err": err,
"intf": Intf,
@@ -2171,8 +2169,8 @@
f.RemoveSchedulerQueues(ctx, schedQueue{direction: tp_pb.Direction_DOWNSTREAM, intfID: Intf, onuID: uint32(onuID), uniID: uint32(uniID), tpID: tpID, uniPort: portNum, tpInst: techprofileInst})
f.resourceMgr.FreeAllocID(ctx, Intf, uint32(onuID), uint32(uniID), techprofileInst.UsScheduler.AllocID)
// Delete the TCONT on the ONU.
- if err := f.sendDeleteTcontToChild(Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.UsScheduler.AllocID), tpPath); err != nil {
- logger.Errorw("error-processing-delete-tcont-towards-onu",
+ if err := f.sendDeleteTcontToChild(ctx, Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.UsScheduler.AllocID), tpPath); err != nil {
+ logger.Errorw(ctx, "error-processing-delete-tcont-towards-onu",
log.Fields{
"intf": Intf,
"onu-id": onuID,
@@ -2186,8 +2184,8 @@
f.DeleteTechProfileInstance(ctx, Intf, uint32(onuID), uint32(uniID), "", tpID)
f.resourceMgr.FreeAllocID(ctx, Intf, uint32(onuID), uint32(uniID), techprofileInst.AllocID)
// Delete the TCONT on the ONU.
- if err := f.sendDeleteTcontToChild(Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.AllocID), tpPath); err != nil {
- logger.Errorw("error-processing-delete-tcont-towards-onu",
+ if err := f.sendDeleteTcontToChild(ctx, Intf, uint32(onuID), uint32(uniID), uint32(techprofileInst.AllocID), tpPath); err != nil {
+ logger.Errorw(ctx, "error-processing-delete-tcont-towards-onu",
log.Fields{
"intf": Intf,
"onu-id": onuID,
@@ -2196,7 +2194,7 @@
"alloc-id": techprofileInst.AllocID})
}
default:
- logger.Errorw("error-unknown-tech",
+ logger.Errorw(ctx, "error-unknown-tech",
log.Fields{
"techprofileInst": techprofileInst})
}
@@ -2208,7 +2206,7 @@
// nolint: gocyclo
func (f *OpenOltFlowMgr) clearFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats, flowDirection string) {
- logger.Infow("clear-flow-from-resource-manager",
+ logger.Infow(ctx, "clear-flow-from-resource-manager",
log.Fields{
"flowDirection": flowDirection,
"flow": *flow,
@@ -2222,9 +2220,9 @@
var updatedFlows []rsrcMgr.FlowInfo
classifierInfo := make(map[string]interface{})
- portNum, Intf, onu, uni, inPort, ethType, err := FlowExtractInfo(flow, flowDirection)
+ portNum, Intf, onu, uni, inPort, ethType, err := FlowExtractInfo(ctx, flow, flowDirection)
if err != nil {
- logger.Error(err)
+ logger.Error(ctx, err)
return
}
@@ -2234,10 +2232,10 @@
for _, field := range flows.GetOfbFields(flow) {
if field.Type == flows.IP_PROTO {
classifierInfo[IPProto] = field.GetIpProto()
- logger.Debugw("field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
+ logger.Debugw(ctx, "field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
}
}
- logger.Infow("extracted-access-info-from-flow-to-be-deleted",
+ logger.Infow(ctx, "extracted-access-info-from-flow-to-be-deleted",
log.Fields{
"flow-id": flow.Id,
"intf-id": Intf,
@@ -2247,10 +2245,10 @@
if ethType == LldpEthType || ((classifierInfo[IPProto] == IPProtoDhcp) && (flowDirection == "downstream")) {
onuID = -1
uniID = -1
- logger.Debug("trap-on-nni-flow-set-oni--uni-to- -1")
- Intf, err = IntfIDFromNniPortNum(inPort)
+ logger.Debug(ctx, "trap-on-nni-flow-set-oni--uni-to- -1")
+ Intf, err = IntfIDFromNniPortNum(ctx, inPort)
if err != nil {
- logger.Errorw("invalid-in-port-number",
+ logger.Errorw(ctx, "invalid-in-port-number",
log.Fields{
"port-number": inPort,
"error": err})
@@ -2261,7 +2259,7 @@
for _, flowID := range flowIds {
flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, Intf, onuID, uniID, flowID)
if flowInfo == nil {
- logger.Debugw("no-flowinfo-found-in-kv-store",
+ logger.Debugw(ctx, "no-flowinfo-found-in-kv-store",
log.Fields{
"intf": Intf,
"onu-id": onuID,
@@ -2277,13 +2275,13 @@
for i, storedFlow := range updatedFlows {
if flow.Id == storedFlow.LogicalFlowID {
removeFlowMessage := openoltpb2.Flow{FlowId: storedFlow.Flow.FlowId, FlowType: storedFlow.Flow.FlowType}
- logger.Debugw("flow-to-be-deleted", log.Fields{"flow": storedFlow})
+ logger.Debugw(ctx, "flow-to-be-deleted", log.Fields{"flow": storedFlow})
// DKB
- if err = f.removeFlowFromDevice(&removeFlowMessage, flow.Id); err != nil {
- logger.Errorw("failed-to-remove-flow", log.Fields{"error": err})
+ if err = f.removeFlowFromDevice(ctx, &removeFlowMessage, flow.Id); err != nil {
+ logger.Errorw(ctx, "failed-to-remove-flow", log.Fields{"error": err})
return
}
- logger.Info("flow-removed-from-device-successfully", log.Fields{
+ logger.Info(ctx, "flow-removed-from-device-successfully", log.Fields{
"flow-id": flow.Id,
"stored-flow": storedFlow,
"device-id": f.deviceHandler.device.Id,
@@ -2295,7 +2293,7 @@
updatedFlows = append(updatedFlows[:i], updatedFlows[i+1:]...)
if err = f.clearResources(ctx, flow, Intf, onuID, uniID, storedFlow.Flow.GemportId,
flowID, flowDirection, portNum, updatedFlows); err != nil {
- logger.Error("failed-to-clear-resources-for-flow", log.Fields{
+ logger.Error(ctx, "failed-to-clear-resources-for-flow", log.Fields{
"flow-id": flow.Id,
"stored-flow": storedFlow,
"device-id": f.deviceHandler.device.Id,
@@ -2314,11 +2312,11 @@
// clears resources reserved for this multicast flow
func (f *OpenOltFlowMgr) clearMulticastFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats) {
classifierInfo := make(map[string]interface{})
- formulateClassifierInfoFromFlow(classifierInfo, flow)
+ formulateClassifierInfoFromFlow(ctx, classifierInfo, flow)
networkInterfaceID, err := f.getNNIInterfaceIDOfMulticastFlow(ctx, classifierInfo)
if err != nil {
- logger.Warnw("no-inport-found--cannot-release-resources-of-the-multicast-flow", log.Fields{"flowId:": flow.Id})
+ logger.Warnw(ctx, "no-inport-found--cannot-release-resources-of-the-multicast-flow", log.Fields{"flowId:": flow.Id})
return
}
@@ -2332,7 +2330,7 @@
for _, flowID = range flowIds {
flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, networkInterfaceID, onuID, uniID, flowID)
if flowInfo == nil {
- logger.Debugw("no-multicast-flowinfo-found-in-the-kv-store",
+ logger.Debugw(ctx, "no-multicast-flowinfo-found-in-the-kv-store",
log.Fields{
"intf": networkInterfaceID,
"onu-id": onuID,
@@ -2347,31 +2345,31 @@
for i, storedFlow := range updatedFlows {
if flow.Id == storedFlow.LogicalFlowID {
removeFlowMessage := openoltpb2.Flow{FlowId: storedFlow.Flow.FlowId, FlowType: storedFlow.Flow.FlowType}
- logger.Debugw("multicast-flow-to-be-deleted",
+ logger.Debugw(ctx, "multicast-flow-to-be-deleted",
log.Fields{
"flow": storedFlow,
"flow-id": flow.Id,
"device-id": f.deviceHandler.device.Id})
//remove from device
- if err := f.removeFlowFromDevice(&removeFlowMessage, flow.Id); err != nil {
+ if err := f.removeFlowFromDevice(ctx, &removeFlowMessage, flow.Id); err != nil {
// DKB
- logger.Errorw("failed-to-remove-multicast-flow",
+ logger.Errorw(ctx, "failed-to-remove-multicast-flow",
log.Fields{
"flow-id": flow.Id,
"error": err})
return
}
- logger.Infow("multicast-flow-removed-from-device-successfully", log.Fields{"flow-id": flow.Id})
+ logger.Infow(ctx, "multicast-flow-removed-from-device-successfully", log.Fields{"flow-id": flow.Id})
//Remove the Flow from FlowInfo
updatedFlows = append(updatedFlows[:i], updatedFlows[i+1:]...)
if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID), NoneOnuID, NoneUniID, flowID, &updatedFlows); err != nil {
- logger.Errorw("failed-to-delete-multicast-flow-from-the-kv-store",
+ logger.Errorw(ctx, "failed-to-delete-multicast-flow-from-the-kv-store",
log.Fields{"flow": storedFlow,
"err": err})
return
}
//release flow id
- logger.Debugw("releasing-multicast-flow-id",
+ logger.Debugw(ctx, "releasing-multicast-flow-id",
log.Fields{"flow-id": flowID,
"interfaceID": networkInterfaceID})
f.resourceMgr.FreeFlowID(ctx, uint32(networkInterfaceID), NoneOnuID, NoneUniID, flowID)
@@ -2382,7 +2380,7 @@
//RemoveFlow removes the flow from the device
func (f *OpenOltFlowMgr) RemoveFlow(ctx context.Context, flow *ofp.OfpFlowStats) error {
- logger.Infow("removing-flow", log.Fields{"flow": *flow})
+ logger.Infow(ctx, "removing-flow", log.Fields{"flow": *flow})
var direction string
actionInfo := make(map[string]interface{})
@@ -2390,9 +2388,9 @@
if action.Type == flows.OUTPUT {
if out := action.GetOutput(); out != nil {
actionInfo[Output] = out.GetPort()
- logger.Debugw("action-type-output", log.Fields{"out_port": actionInfo[Output].(uint32)})
+ logger.Debugw(ctx, "action-type-output", log.Fields{"out_port": actionInfo[Output].(uint32)})
} else {
- logger.Error("invalid-output-port-in-action")
+ logger.Error(ctx, "invalid-output-port-in-action")
return olterrors.NewErrInvalidValue(log.Fields{"invalid-out-port-action": 0}, nil)
}
}
@@ -2408,7 +2406,7 @@
direction = Downstream
}
- _, intfID, onuID, uniID, _, _, err := FlowExtractInfo(flow, direction)
+ _, intfID, onuID, uniID, _, _, err := FlowExtractInfo(ctx, flow, direction)
if err != nil {
return err
}
@@ -2421,7 +2419,7 @@
f.perUserFlowHandleLock.Unlock(userKey)
} else {
// Ideally this should never happen
- logger.Errorw("failed-to-acquire-lock-to-remove-flow--remove-aborted", log.Fields{"flow": flow})
+ logger.Errorw(ctx, "failed-to-acquire-lock-to-remove-flow--remove-aborted", log.Fields{"flow": flow})
return errors.New("failed-to-acquire-per-user-lock")
}
@@ -2435,12 +2433,12 @@
select {
case <-time.After(20 * time.Millisecond):
if flowDelRefCnt, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok || flowDelRefCnt == 0 {
- logger.Debug("pending-flow-deletes-completed")
+ logger.Debug(ctx, "pending-flow-deletes-completed")
ch <- true
return
}
case <-ctx.Done():
- logger.Error("flow-delete-wait-handler-routine-canceled")
+ logger.Error(ctx, "flow-delete-wait-handler-routine-canceled")
return
}
}
@@ -2470,13 +2468,13 @@
var UsMeterID uint32
var DsMeterID uint32
- logger.Infow("adding-flow",
+ logger.Infow(ctx, "adding-flow",
log.Fields{
"flow": flow,
"flowmetadata": flowMetadata})
- formulateClassifierInfoFromFlow(classifierInfo, flow)
+ formulateClassifierInfoFromFlow(ctx, classifierInfo, flow)
- err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flow)
+ err := formulateActionInfoFromFlow(ctx, actionInfo, classifierInfo, flow)
if err != nil {
// Error logging is already done in the called function
// So just return in case of error
@@ -2489,13 +2487,13 @@
}
/* Controller bound trap flows */
- err = formulateControllerBoundTrapFlowInfo(actionInfo, classifierInfo, flow)
+ err = formulateControllerBoundTrapFlowInfo(ctx, actionInfo, classifierInfo, flow)
if err != nil {
// error if any, already logged in the called function
return err
}
- logger.Debugw("flow-ports",
+ logger.Debugw(ctx, "flow-ports",
log.Fields{
"classifierinfo_inport": classifierInfo[InPort],
"action_output": actionInfo[Output]})
@@ -2503,7 +2501,7 @@
if ethType, ok := classifierInfo[EthType]; ok {
if ethType.(uint32) == LldpEthType {
- logger.Info("adding-lldp-flow")
+ logger.Info(ctx, "adding-lldp-flow")
return f.addLLDPFlow(ctx, flow, portNo)
}
}
@@ -2511,21 +2509,21 @@
if ipProto.(uint32) == IPProtoDhcp {
if udpSrc, ok := classifierInfo[UDPSrc]; ok {
if udpSrc.(uint32) == uint32(67) || udpSrc.(uint32) == uint32(546) {
- logger.Debug("trap-dhcp-from-nni-flow")
+ logger.Debug(ctx, "trap-dhcp-from-nni-flow")
return f.addDHCPTrapFlowOnNNI(ctx, flow, classifierInfo, portNo)
}
}
}
}
if isIgmpTrapDownstreamFlow(classifierInfo) {
- logger.Debug("trap-igmp-from-nni-flow")
+ logger.Debug(ctx, "trap-igmp-from-nni-flow")
return f.addIgmpTrapFlowOnNNI(ctx, flow, classifierInfo, portNo)
}
- f.deviceHandler.AddUniPortToOnu(intfID, onuID, portNo)
+ f.deviceHandler.AddUniPortToOnu(ctx, intfID, onuID, portNo)
f.resourceMgr.AddUniPortToOnuInfo(ctx, intfID, onuID, portNo)
- TpID, err := getTpIDFromFlow(flow)
+ TpID, err := getTpIDFromFlow(ctx, flow)
if err != nil {
return olterrors.NewErrNotFound("tpid-for-flow",
log.Fields{
@@ -2534,7 +2532,7 @@
"onu-id": onuID,
"uni-id": uniID}, err)
}
- logger.Debugw("tpid-for-this-subcriber",
+ logger.Debugw(ctx, "tpid-for-this-subcriber",
log.Fields{
"tp-id": TpID,
"intf-id": intfID,
@@ -2542,16 +2540,16 @@
"uni-id": uniID})
if IsUpstream(actionInfo[Output].(uint32)) {
UsMeterID = flows.GetMeterIdFromFlow(flow)
- logger.Debugw("upstream-flow-meter-id", log.Fields{"us-meter-id": UsMeterID})
+ logger.Debugw(ctx, "upstream-flow-meter-id", log.Fields{"us-meter-id": UsMeterID})
} else {
DsMeterID = flows.GetMeterIdFromFlow(flow)
- logger.Debugw("downstream-flow-meter-id", log.Fields{"ds-meter-id": DsMeterID})
+ logger.Debugw(ctx, "downstream-flow-meter-id", log.Fields{"ds-meter-id": DsMeterID})
}
pnFlDelKey := pendingFlowDeleteKey{intfID, onuID, uniID}
if _, ok := f.pendingFlowDelete.Load(pnFlDelKey); !ok {
- logger.Debugw("no-pending-flows-found--going-ahead-with-flow-install",
+ logger.Debugw(ctx, "no-pending-flows-found--going-ahead-with-flow-install",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -2562,7 +2560,7 @@
go f.waitForFlowDeletesToCompleteForOnu(ctx, intfID, onuID, uniID, pendingFlowDelComplete)
select {
case <-pendingFlowDelComplete:
- logger.Debugw("all-pending-flow-deletes-completed",
+ logger.Debugw(ctx, "all-pending-flow-deletes-completed",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -2583,7 +2581,7 @@
// handleFlowWithGroup adds multicast flow to the device.
func (f *OpenOltFlowMgr) handleFlowWithGroup(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
classifierInfo[PacketTagType] = DoubleTag
- logger.Debugw("add-multicast-flow", log.Fields{
+ logger.Debugw(ctx, "add-multicast-flow", log.Fields{
"classifier-info": classifierInfo,
"actionInfo": actionInfo})
@@ -2604,7 +2602,7 @@
multicastMac := flows.ConvertToMulticastMacBytes(ipv4Dst.(uint32))
delete(classifierInfo, Ipv4Dst)
classifierInfo[EthDst] = multicastMac
- logger.Debugw("multicast-ip-to-mac-conversion-success",
+ logger.Debugw(ctx, "multicast-ip-to-mac-conversion-success",
log.Fields{
"ip:": ipv4Dst.(uint32),
"mac:": multicastMac})
@@ -2616,9 +2614,9 @@
uniID := NoneUniID
gemPortID := NoneGemPortID
- flowStoreCookie := getFlowStoreCookie(classifierInfo, uint32(0))
+ flowStoreCookie := getFlowStoreCookie(ctx, classifierInfo, uint32(0))
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Infow("multicast-flow-exists-not-re-adding", log.Fields{"classifier-info": classifierInfo})
+ logger.Infow(ctx, "multicast-flow-exists-not-re-adding", log.Fields{"classifier-info": classifierInfo})
return nil
}
flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
@@ -2649,7 +2647,7 @@
if err = f.addFlowToDevice(ctx, flow, &multicastFlow); err != nil {
return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": multicastFlow}, err)
}
- logger.Info("multicast-flow-added-to-device-successfully")
+ logger.Info(ctx, "multicast-flow-added-to-device-successfully")
//get cached group
group, _, err := f.GetFlowGroupFromKVStore(ctx, groupID, true)
if err == nil {
@@ -2675,7 +2673,7 @@
//getNNIInterfaceIDOfMulticastFlow returns associated NNI interface id of the inPort criterion if exists; returns the first NNI interface of the device otherwise
func (f *OpenOltFlowMgr) getNNIInterfaceIDOfMulticastFlow(ctx context.Context, classifierInfo map[string]interface{}) (uint32, error) {
if inPort, ok := classifierInfo[InPort]; ok {
- nniInterfaceID, err := IntfIDFromNniPortNum(inPort.(uint32))
+ nniInterfaceID, err := IntfIDFromNniPortNum(ctx, inPort.(uint32))
if err != nil {
return 0, olterrors.NewErrInvalidValue(log.Fields{"nni-in-port-number": inPort}, err)
}
@@ -2691,7 +2689,7 @@
// AddGroup add or update the group
func (f *OpenOltFlowMgr) AddGroup(ctx context.Context, group *ofp.OfpGroupEntry) error {
- logger.Infow("add-group", log.Fields{"group": group})
+ logger.Infow(ctx, "add-group", log.Fields{"group": group})
if group == nil {
return olterrors.NewErrInvalidValue(log.Fields{"group": group}, nil)
}
@@ -2702,7 +2700,7 @@
Action: f.buildGroupAction(),
}
- logger.Debugw("sending-group-to-device", log.Fields{"groupToOlt": groupToOlt})
+ logger.Debugw(ctx, "sending-group-to-device", log.Fields{"groupToOlt": groupToOlt})
_, err := f.deviceHandler.Client.PerformGroupOperation(ctx, &groupToOlt)
if err != nil {
return olterrors.NewErrAdapter("add-group-operation-failed", log.Fields{"groupToOlt": groupToOlt}, err)
@@ -2711,7 +2709,7 @@
if err := f.resourceMgr.AddFlowGroupToKVStore(ctx, group, true); err != nil {
return olterrors.NewErrPersistence("add", "flow-group", group.Desc.GroupId, log.Fields{"group": group}, err)
}
- logger.Infow("add-group-operation-performed-on-the-device-successfully ", log.Fields{"groupToOlt": groupToOlt})
+ logger.Infow(ctx, "add-group-operation-performed-on-the-device-successfully ", log.Fields{"groupToOlt": groupToOlt})
return nil
}
@@ -2727,12 +2725,12 @@
// ModifyGroup updates the group
func (f *OpenOltFlowMgr) ModifyGroup(ctx context.Context, group *ofp.OfpGroupEntry) error {
- logger.Infow("modify-group", log.Fields{"group": group})
+ logger.Infow(ctx, "modify-group", log.Fields{"group": group})
if group == nil || group.Desc == nil {
return olterrors.NewErrInvalidValue(log.Fields{"group": group}, nil)
}
- newGroup := f.buildGroup(group.Desc.GroupId, group.Desc.Buckets)
+ newGroup := f.buildGroup(ctx, group.Desc.GroupId, group.Desc.Buckets)
//get existing members of the group
val, groupExists, err := f.GetFlowGroupFromKVStore(ctx, group.Desc.GroupId, false)
@@ -2743,16 +2741,16 @@
var current *openoltpb2.Group // represents the group on the device
if groupExists {
// group already exists
- current = f.buildGroup(group.Desc.GroupId, val.Desc.GetBuckets())
- logger.Debugw("modify-group--group exists",
+ current = f.buildGroup(ctx, group.Desc.GroupId, val.Desc.GetBuckets())
+ logger.Debugw(ctx, "modify-group--group exists",
log.Fields{
"group on the device": val,
"new": group})
} else {
- current = f.buildGroup(group.Desc.GroupId, nil)
+ current = f.buildGroup(ctx, group.Desc.GroupId, nil)
}
- logger.Debugw("modify-group--comparing-current-and-new",
+ logger.Debugw(ctx, "modify-group--comparing-current-and-new",
log.Fields{
"group on the device": current,
"new": newGroup})
@@ -2761,7 +2759,7 @@
// get members to be removed
membersToBeRemoved := f.findDiff(newGroup, current)
- logger.Infow("modify-group--differences found", log.Fields{
+ logger.Infow(ctx, "modify-group--differences found", log.Fields{
"membersToBeAdded": membersToBeAdded,
"membersToBeRemoved": membersToBeRemoved,
"groupId": group.Desc.GroupId})
@@ -2774,13 +2772,13 @@
groupToOlt.Command = openoltpb2.Group_ADD_MEMBERS
groupToOlt.Members = membersToBeAdded
//execute addMembers
- errAdd = f.callGroupAddRemove(&groupToOlt)
+ errAdd = f.callGroupAddRemove(ctx, &groupToOlt)
}
if membersToBeRemoved != nil && len(membersToBeRemoved) > 0 {
groupToOlt.Command = openoltpb2.Group_REMOVE_MEMBERS
groupToOlt.Members = membersToBeRemoved
//execute removeMembers
- errRemoved = f.callGroupAddRemove(&groupToOlt)
+ errRemoved = f.callGroupAddRemove(ctx, &groupToOlt)
}
//save the modified group
@@ -2788,12 +2786,12 @@
if err := f.resourceMgr.AddFlowGroupToKVStore(ctx, group, false); err != nil {
return olterrors.NewErrPersistence("add", "flow-group", group.Desc.GroupId, log.Fields{"group": group}, err)
}
- logger.Infow("modify-group-was-success--storing-group",
+ logger.Infow(ctx, "modify-group-was-success--storing-group",
log.Fields{
"group": group,
"existingGroup": current})
} else {
- logger.Warnw("one-of-the-group-add/remove-operations-failed--cannot-save-group-modifications",
+ logger.Warnw(ctx, "one-of-the-group-add/remove-operations-failed--cannot-save-group-modifications",
log.Fields{"group": group})
if errAdd != nil {
return errAdd
@@ -2804,8 +2802,8 @@
}
//callGroupAddRemove performs add/remove buckets operation for the indicated group
-func (f *OpenOltFlowMgr) callGroupAddRemove(group *openoltpb2.Group) error {
- if err := f.performGroupOperation(group); err != nil {
+func (f *OpenOltFlowMgr) callGroupAddRemove(ctx context.Context, group *openoltpb2.Group) error {
+ if err := f.performGroupOperation(ctx, group); err != nil {
st, _ := status.FromError(err)
//ignore already exists error code
if st.Code() != codes.AlreadyExists {
@@ -2838,8 +2836,8 @@
}
//performGroupOperation call performGroupOperation operation of openolt proto
-func (f *OpenOltFlowMgr) performGroupOperation(group *openoltpb2.Group) error {
- logger.Debugw("sending-group-to-device",
+func (f *OpenOltFlowMgr) performGroupOperation(ctx context.Context, group *openoltpb2.Group) error {
+ logger.Debugw(ctx, "sending-group-to-device",
log.Fields{
"groupToOlt": group,
"command": group.Command})
@@ -2851,13 +2849,13 @@
}
//buildGroup build openoltpb2.Group from given group id and bucket list
-func (f *OpenOltFlowMgr) buildGroup(groupID uint32, buckets []*ofp.OfpBucket) *openoltpb2.Group {
+func (f *OpenOltFlowMgr) buildGroup(ctx context.Context, groupID uint32, buckets []*ofp.OfpBucket) *openoltpb2.Group {
group := openoltpb2.Group{
GroupId: groupID}
// create members of the group
if buckets != nil {
for _, ofBucket := range buckets {
- member := f.buildMember(ofBucket)
+ member := f.buildMember(ctx, ofBucket)
if member != nil && !f.contains(group.Members, member) {
group.Members = append(group.Members, member)
}
@@ -2867,7 +2865,7 @@
}
//buildMember builds openoltpb2.GroupMember from an OpenFlow bucket
-func (f *OpenOltFlowMgr) buildMember(ofBucket *ofp.OfpBucket) *openoltpb2.GroupMember {
+func (f *OpenOltFlowMgr) buildMember(ctx context.Context, ofBucket *ofp.OfpBucket) *openoltpb2.GroupMember {
var outPort uint32
outPortFound := false
for _, ofAction := range ofBucket.Actions {
@@ -2878,11 +2876,11 @@
}
if !outPortFound {
- logger.Debugw("bucket-skipped-since-no-out-port-found-in-it", log.Fields{"ofBucket": ofBucket})
+ logger.Debugw(ctx, "bucket-skipped-since-no-out-port-found-in-it", log.Fields{"ofBucket": ofBucket})
return nil
}
interfaceID := IntfIDFromUniPortNum(outPort)
- logger.Debugw("got-associated-interface-id-of-the-port",
+ logger.Debugw(ctx, "got-associated-interface-id-of-the-port",
log.Fields{
"portNumber:": outPort,
"interfaceId:": interfaceID})
@@ -2896,27 +2894,27 @@
//add member to the group
return &member
}
- logger.Warnf("bucket-skipped-since-interface-2-gem-mapping-cannot-be-found", log.Fields{"ofBucket": ofBucket})
+ logger.Warnf(ctx, "bucket-skipped-since-interface-2-gem-mapping-cannot-be-found", log.Fields{"ofBucket": ofBucket})
return nil
}
//sendTPDownloadMsgToChild send payload
-func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32) error {
+func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32) error {
- onuDev, err := f.getOnuDevice(intfID, onuID)
+ onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
if err != nil {
- logger.Errorw("couldnt-find-onu-child-device",
+ logger.Errorw(ctx, "couldnt-find-onu-child-device",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID})
return err
}
- logger.Debugw("got-child-device-from-olt-device-handler", log.Fields{"onu-id": onuDev.deviceID})
+ logger.Debugw(ctx, "got-child-device-from-olt-device-handler", log.Fields{"onu-id": onuDev.deviceID})
- tpPath := f.getTPpath(intfID, uni, TpID)
+ tpPath := f.getTPpath(ctx, intfID, uni, TpID)
tpDownloadMsg := &ic.InterAdapterTechProfileDownloadMessage{UniId: uniID, Path: tpPath}
- logger.Debugw("sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
+ logger.Debugw(ctx, "sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
sendErr := f.deviceHandler.AdapterProxy.SendInterAdapterMessage(context.Background(),
tpDownloadMsg,
ic.InterAdapterMessageType_TECH_PROFILE_DOWNLOAD_REQUEST,
@@ -2932,7 +2930,7 @@
"onu-id": onuDev.deviceID,
"proxyDeviceID": onuDev.proxyDeviceID}, sendErr)
}
- logger.Infow("success-sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
+ logger.Infow(ctx, "success-sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
return nil
}
@@ -2947,7 +2945,7 @@
if err := f.resourceMgr.AddOnuGemInfo(ctx, intfID, onu); err != nil {
return err
}
- logger.Infow("updated-onuinfo",
+ logger.Infow(ctx, "updated-onuinfo",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -2963,7 +2961,7 @@
f.onuGemInfoLock[intfID].Lock()
defer f.onuGemInfoLock[intfID].Unlock()
- logger.Infow("adding-gem-to-onu-info-map",
+ logger.Infow(ctx, "adding-gem-to-onu-info-map",
log.Fields{
"gem": gemPort,
"intf": intfID,
@@ -2977,7 +2975,7 @@
// check if gem already exists , else update the cache and kvstore
for _, gem := range onu.GemPorts {
if gem == gemPort {
- logger.Debugw("gem-already-in-cache-no-need-to-update-cache-and-kv-store",
+ logger.Debugw(ctx, "gem-already-in-cache-no-need-to-update-cache-and-kv-store",
log.Fields{
"gem": gemPort,
"device-id": f.deviceHandler.device.Id})
@@ -2990,7 +2988,7 @@
}
err := f.resourceMgr.AddGemToOnuGemInfo(ctx, intfID, onuID, gemPort)
if err != nil {
- logger.Errorw("failed-to-add-gem-to-onu",
+ logger.Errorw(ctx, "failed-to-add-gem-to-onu",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -2998,7 +2996,7 @@
"device-id": f.deviceHandler.device.Id})
return
}
- logger.Infow("gem-added-to-onu-info-map",
+ logger.Infow(ctx, "gem-added-to-onu-info-map",
log.Fields{
"gem": gemPort,
"intf": intfID,
@@ -3010,12 +3008,12 @@
// This function Lookup maps by serialNumber or (intfId, gemPort)
//getOnuIDfromGemPortMap Returns OnuID,nil if found or set 0,error if no onuId is found for serialNumber or (intfId, gemPort)
-func (f *OpenOltFlowMgr) getOnuIDfromGemPortMap(intfID uint32, gemPortID uint32) (uint32, error) {
+func (f *OpenOltFlowMgr) getOnuIDfromGemPortMap(ctx context.Context, intfID uint32, gemPortID uint32) (uint32, error) {
f.onuGemInfoLock[intfID].Lock()
defer f.onuGemInfoLock[intfID].Unlock()
- logger.Infow("getting-onu-id-from-gem-port-and-pon-port",
+ logger.Infow(ctx, "getting-onu-id-from-gem-port-and-pon-port",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"onu-geminfo": f.onuGemInfo[intfID],
@@ -3046,7 +3044,7 @@
if packetIn.IntfType == "pon" {
// packet indication does not have serial number , so sending as nil
- if onuID, err = f.getOnuIDfromGemPortMap(packetIn.IntfId, packetIn.GemportId); err != nil {
+ if onuID, err = f.getOnuIDfromGemPortMap(ctx, packetIn.IntfId, packetIn.GemportId); err != nil {
// Called method is returning error with all data populated; just return the same
return logicalPortNum, err
}
@@ -3054,14 +3052,14 @@
logicalPortNum = packetIn.PortNo
} else {
uniID := uint32(0) // FIXME - multi-uni support
- logicalPortNum = MkUniPortNum(packetIn.IntfId, onuID, uniID)
+ logicalPortNum = MkUniPortNum(ctx, packetIn.IntfId, onuID, uniID)
}
// Store the gem port through which the packet_in came. Use the same gem port for packet_out
f.UpdateGemPortForPktIn(ctx, packetIn.IntfId, onuID, logicalPortNum, packetIn.GemportId)
} else if packetIn.IntfType == "nni" {
logicalPortNum = IntfIDToPortNo(packetIn.IntfId, voltha.Port_ETHERNET_NNI)
}
- logger.Infow("retrieved-logicalport-from-packet-in",
+ logger.Infow(ctx, "retrieved-logicalport-from-packet-in",
log.Fields{
"logical-port-num": logicalPortNum,
"intf-type": packetIn.IntfType,
@@ -3082,7 +3080,7 @@
gemPortID, ok := f.packetInGemPort[pktInkey]
if ok {
- logger.Debugw("found-gemport-for-pktin-key",
+ logger.Debugw(ctx, "found-gemport-for-pktin-key",
log.Fields{
"pktinkey": pktInkey,
"gem": gemPortID})
@@ -3093,7 +3091,7 @@
if err == nil {
if gemPortID != 0 {
f.packetInGemPort[pktInkey] = gemPortID
- logger.Infow("found-gem-port-from-kv-store-and-updating-cache-with-gemport",
+ logger.Infow(ctx, "found-gem-port-from-kv-store-and-updating-cache-with-gemport",
log.Fields{
"pktinkey": pktInkey,
"gem": gemPortID})
@@ -3124,7 +3122,7 @@
direction string,
tpID uint32,
vlanID ...uint32) {
- logger.Debugw("installing-flow-on-all-gem-ports",
+ logger.Debugw(ctx, "installing-flow-on-all-gem-ports",
log.Fields{
"FlowType": FlowType,
"gemPorts": gemPorts,
@@ -3222,7 +3220,7 @@
}
}
default:
- logger.Errorw("unknown-tech", log.Fields{"tpInst": TpInst})
+ logger.Errorw(ctx, "unknown-tech", log.Fields{"tpInst": TpInst})
}
}
@@ -3236,7 +3234,7 @@
}
func (f *OpenOltFlowMgr) addDHCPTrapFlowOnNNI(ctx context.Context, logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) error {
- logger.Debug("adding-trap-dhcp-of-nni-flow")
+ logger.Debug(ctx, "adding-trap-dhcp-of-nni-flow")
action := make(map[string]interface{})
classifier[PacketTagType] = DoubleTag
action[TrapToHost] = true
@@ -3255,7 +3253,7 @@
uniID := -1
gemPortID := -1
allocID := -1
- networkInterfaceID, err := getNniIntfID(classifier, action)
+ networkInterfaceID, err := getNniIntfID(ctx, classifier, action)
if err != nil {
return olterrors.NewErrNotFound("nni-intreface-id",
log.Fields{
@@ -3264,9 +3262,9 @@
err)
}
- flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+ flowStoreCookie := getFlowStoreCookie(ctx, classifier, uint32(0))
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Info("flow-exists-not-re-adding")
+ logger.Info(ctx, "flow-exists-not-re-adding")
return nil
}
flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
@@ -3284,12 +3282,12 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier}, err)
}
- logger.Debugw("created-classifier-proto", log.Fields{"classifier": *classifierProto})
+ logger.Debugw(ctx, "created-classifier-proto", log.Fields{"classifier": *classifierProto})
actionProto, err := makeOpenOltActionField(action, classifier)
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"action": action}, err)
}
- logger.Debugw("created-action-proto", log.Fields{"action": *actionProto})
+ logger.Debugw(ctx, "created-action-proto", log.Fields{"action": *actionProto})
downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
OnuId: int32(onuID), // OnuId not required
UniId: int32(uniID), // UniId not used
@@ -3306,7 +3304,7 @@
if err := f.addFlowToDevice(ctx, logicalFlow, &downstreamflow); err != nil {
return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err)
}
- logger.Info("dhcp-trap-on-nni-flow-added–to-device-successfully")
+ logger.Info(ctx, "dhcp-trap-on-nni-flow-added–to-device-successfully")
flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, logicalFlow.Id)
if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
int32(onuID),
@@ -3345,7 +3343,7 @@
//addIgmpTrapFlowOnNNI adds a trap-to-host flow on NNI
func (f *OpenOltFlowMgr) addIgmpTrapFlowOnNNI(ctx context.Context, logicalFlow *ofp.OfpFlowStats, classifier map[string]interface{}, portNo uint32) error {
- logger.Infow("adding-igmp-trap-of-nni-flow", log.Fields{"classifier-info": classifier})
+ logger.Infow(ctx, "adding-igmp-trap-of-nni-flow", log.Fields{"classifier-info": classifier})
action := make(map[string]interface{})
classifier[PacketTagType] = getPacketTypeFromClassifiers(classifier)
action[TrapToHost] = true
@@ -3364,16 +3362,16 @@
uniID := -1
gemPortID := -1
allocID := -1
- networkInterfaceID, err := getNniIntfID(classifier, action)
+ networkInterfaceID, err := getNniIntfID(ctx, classifier, action)
if err != nil {
return olterrors.NewErrNotFound("nni-interface-id", log.Fields{
"classifier": classifier,
"action": action},
err)
}
- flowStoreCookie := getFlowStoreCookie(classifier, uint32(0))
+ flowStoreCookie := getFlowStoreCookie(ctx, classifier, uint32(0))
if present := f.resourceMgr.IsFlowCookieOnKVStore(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), flowStoreCookie); present {
- logger.Info("igmp-flow-exists-not-re-adding")
+ logger.Info(ctx, "igmp-flow-exists-not-re-adding")
return nil
}
flowID, err := f.resourceMgr.GetFlowID(ctx, uint32(networkInterfaceID), int32(onuID), int32(uniID), uint32(gemPortID), flowStoreCookie, "", 0, 0)
@@ -3391,12 +3389,12 @@
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"classifier": classifier}, err)
}
- logger.Debugw("created-classifier-proto-for-the-igmp-flow", log.Fields{"classifier": *classifierProto})
+ logger.Debugw(ctx, "created-classifier-proto-for-the-igmp-flow", log.Fields{"classifier": *classifierProto})
actionProto, err := makeOpenOltActionField(action, classifier)
if err != nil {
return olterrors.NewErrInvalidValue(log.Fields{"action": action}, err)
}
- logger.Debugw("created-action-proto-for-the-igmp-flow", log.Fields{"action": *actionProto})
+ logger.Debugw(ctx, "created-action-proto-for-the-igmp-flow", log.Fields{"action": *actionProto})
downstreamflow := openoltpb2.Flow{AccessIntfId: int32(-1), // AccessIntfId not required
OnuId: int32(onuID), // OnuId not required
UniId: int32(uniID), // UniId not used
@@ -3413,7 +3411,7 @@
if err := f.addFlowToDevice(ctx, logicalFlow, &downstreamflow); err != nil {
return olterrors.NewErrFlowOp("add", flowID, log.Fields{"flow": downstreamflow}, err)
}
- logger.Info("igmp-trap-on-nni-flow-added-to-device-successfully")
+ logger.Info(ctx, "igmp-trap-on-nni-flow-added-to-device-successfully")
flowsToKVStore := f.getUpdatedFlowInfo(ctx, &downstreamflow, flowStoreCookie, "", flowID, logicalFlow.Id)
if err := f.updateFlowInfoToKVStore(ctx, int32(networkInterfaceID),
int32(onuID),
@@ -3447,7 +3445,7 @@
allocID := args[AllocID]
if ipProto, ok := classifierInfo[IPProto]; ok {
if ipProto.(uint32) == IPProtoDhcp {
- logger.Infow("adding-dhcp-flow", log.Fields{
+ logger.Infow(ctx, "adding-dhcp-flow", log.Fields{
"tp-id": tpID,
"alloc-id": allocID,
"intf-id": intfID,
@@ -3455,7 +3453,7 @@
"uni-id": uniID,
})
if pcp, ok := classifierInfo[VlanPcp]; ok {
- gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+ gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
tp_pb.Direction_UPSTREAM,
pcp.(uint32))
//Adding DHCP upstream flow
@@ -3467,14 +3465,14 @@
}
} else if ipProto.(uint32) == IgmpProto {
- logger.Infow("adding-us-igmp-flow",
+ logger.Infow(ctx, "adding-us-igmp-flow",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID,
"classifier-info:": classifierInfo})
if pcp, ok := classifierInfo[VlanPcp]; ok {
- gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+ gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
tp_pb.Direction_UPSTREAM,
pcp.(uint32))
f.addIGMPTrapFlow(ctx, intfID, onuID, uniID, portNo, classifierInfo, actionInfo, flow, allocID, gemPort, tpID)
@@ -3483,12 +3481,12 @@
installFlowOnAllGemports(ctx, f.addIGMPTrapFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, IgmpFlow, Upstream, tpID)
}
} else {
- logger.Errorw("invalid-classifier-to-handle", log.Fields{"classifier": classifierInfo, "action": actionInfo})
+ logger.Errorw(ctx, "invalid-classifier-to-handle", log.Fields{"classifier": classifierInfo, "action": actionInfo})
return
}
} else if ethType, ok := classifierInfo[EthType]; ok {
if ethType.(uint32) == EapEthType {
- logger.Infow("adding-eapol-flow", log.Fields{
+ logger.Infow(ctx, "adding-eapol-flow", log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID,
@@ -3500,7 +3498,7 @@
vlanID = DefaultMgmtVlan
}
if pcp, ok := classifierInfo[VlanPcp]; ok {
- gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+ gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
tp_pb.Direction_UPSTREAM,
pcp.(uint32))
@@ -3510,13 +3508,13 @@
}
}
} else if _, ok := actionInfo[PushVlan]; ok {
- logger.Infow("adding-upstream-data-rule", log.Fields{
+ logger.Infow(ctx, "adding-upstream-data-rule", log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID,
})
if pcp, ok := classifierInfo[VlanPcp]; ok {
- gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+ gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
tp_pb.Direction_UPSTREAM,
pcp.(uint32))
//Adding HSIA upstream flow
@@ -3526,13 +3524,13 @@
installFlowOnAllGemports(ctx, f.addUpstreamDataFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, HsiaFlow, Upstream, tpID)
}
} else if _, ok := actionInfo[PopVlan]; ok {
- logger.Infow("adding-downstream-data-rule", log.Fields{
+ logger.Infow(ctx, "adding-downstream-data-rule", log.Fields{
"intf-id": intfID,
"onu-id": onuID,
"uni-id": uniID,
})
if pcp, ok := classifierInfo[VlanPcp]; ok {
- gemPort = f.techprofile[intfID].GetGemportIDForPbit(TpInst,
+ gemPort = f.techprofile[intfID].GetGemportIDForPbit(ctx, TpInst,
tp_pb.Direction_DOWNSTREAM,
pcp.(uint32))
//Adding HSIA downstream flow
@@ -3542,7 +3540,7 @@
installFlowOnAllGemports(ctx, f.addDownstreamDataFlow, nil, args, classifierInfo, actionInfo, flow, gemPorts, TpInst, HsiaFlow, Downstream, tpID)
}
} else {
- logger.Errorw("invalid-flow-type-to-handle",
+ logger.Errorw(ctx, "invalid-flow-type-to-handle",
log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -3553,7 +3551,7 @@
return
}
// Send Techprofile download event to child device in go routine as it takes time
- go f.sendTPDownloadMsgToChild(intfID, onuID, uniID, uni, tpID)
+ go f.sendTPDownloadMsgToChild(ctx, intfID, onuID, uniID, uni, tpID)
}
func (f *OpenOltFlowMgr) isGemPortUsedByAnotherFlow(gemPK gemPortKey) bool {
@@ -3584,87 +3582,87 @@
// So, we need to check and make sure that no other gem port is referring to the given TP ID
// on any other uni port.
tpInstances := f.techprofile[ponIntf].FindAllTpInstances(ctx, tpID, ponIntf, onuID).([]tp.TechProfile)
- logger.Debugw("got-single-instance-tp-instances", log.Fields{"tp-instances": tpInstances})
+ logger.Debugw(ctx, "got-single-instance-tp-instances", log.Fields{"tp-instances": tpInstances})
for i := 0; i < len(tpInstances); i++ {
tpI := tpInstances[i]
tpGemPorts := tpI.UpstreamGemPortAttributeList
for _, tpGemPort := range tpGemPorts {
if tpGemPort.GemportID != gemPortID {
- logger.Debugw("single-instance-tp-is-in-use-by-gem", log.Fields{"gemPort": tpGemPort.GemportID})
+ logger.Debugw(ctx, "single-instance-tp-is-in-use-by-gem", log.Fields{"gemPort": tpGemPort.GemportID})
return true, tpGemPort.GemportID
}
}
}
}
- logger.Debug("tech-profile-is-not-in-use-by-any-gem")
+ logger.Debug(ctx, "tech-profile-is-not-in-use-by-any-gem")
return false, 0
}
-func formulateClassifierInfoFromFlow(classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) {
+func formulateClassifierInfoFromFlow(ctx context.Context, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) {
for _, field := range flows.GetOfbFields(flow) {
if field.Type == flows.ETH_TYPE {
classifierInfo[EthType] = field.GetEthType()
- logger.Debug("field-type-eth-type", log.Fields{"classifierInfo[ETH_TYPE]": classifierInfo[EthType].(uint32)})
+ logger.Debug(ctx, "field-type-eth-type", log.Fields{"classifierInfo[ETH_TYPE]": classifierInfo[EthType].(uint32)})
} else if field.Type == flows.ETH_DST {
classifierInfo[EthDst] = field.GetEthDst()
- logger.Debug("field-type-eth-type", log.Fields{"classifierInfo[ETH_DST]": classifierInfo[EthDst].([]uint8)})
+ logger.Debug(ctx, "field-type-eth-type", log.Fields{"classifierInfo[ETH_DST]": classifierInfo[EthDst].([]uint8)})
} else if field.Type == flows.IP_PROTO {
classifierInfo[IPProto] = field.GetIpProto()
- logger.Debug("field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
+ logger.Debug(ctx, "field-type-ip-proto", log.Fields{"classifierInfo[IP_PROTO]": classifierInfo[IPProto].(uint32)})
} else if field.Type == flows.IN_PORT {
classifierInfo[InPort] = field.GetPort()
- logger.Debug("field-type-in-port", log.Fields{"classifierInfo[IN_PORT]": classifierInfo[InPort].(uint32)})
+ logger.Debug(ctx, "field-type-in-port", log.Fields{"classifierInfo[IN_PORT]": classifierInfo[InPort].(uint32)})
} else if field.Type == flows.VLAN_VID {
classifierInfo[VlanVid] = field.GetVlanVid() & 0xfff
- logger.Debug("field-type-vlan-vid", log.Fields{"classifierInfo[VLAN_VID]": classifierInfo[VlanVid].(uint32)})
+ logger.Debug(ctx, "field-type-vlan-vid", log.Fields{"classifierInfo[VLAN_VID]": classifierInfo[VlanVid].(uint32)})
} else if field.Type == flows.VLAN_PCP {
classifierInfo[VlanPcp] = field.GetVlanPcp()
- logger.Debug("field-type-vlan-pcp", log.Fields{"classifierInfo[VLAN_PCP]": classifierInfo[VlanPcp].(uint32)})
+ logger.Debug(ctx, "field-type-vlan-pcp", log.Fields{"classifierInfo[VLAN_PCP]": classifierInfo[VlanPcp].(uint32)})
} else if field.Type == flows.UDP_DST {
classifierInfo[UDPDst] = field.GetUdpDst()
- logger.Debug("field-type-udp-dst", log.Fields{"classifierInfo[UDP_DST]": classifierInfo[UDPDst].(uint32)})
+ logger.Debug(ctx, "field-type-udp-dst", log.Fields{"classifierInfo[UDP_DST]": classifierInfo[UDPDst].(uint32)})
} else if field.Type == flows.UDP_SRC {
classifierInfo[UDPSrc] = field.GetUdpSrc()
- logger.Debug("field-type-udp-src", log.Fields{"classifierInfo[UDP_SRC]": classifierInfo[UDPSrc].(uint32)})
+ logger.Debug(ctx, "field-type-udp-src", log.Fields{"classifierInfo[UDP_SRC]": classifierInfo[UDPSrc].(uint32)})
} else if field.Type == flows.IPV4_DST {
classifierInfo[Ipv4Dst] = field.GetIpv4Dst()
- logger.Debug("field-type-ipv4-dst", log.Fields{"classifierInfo[IPV4_DST]": classifierInfo[Ipv4Dst].(uint32)})
+ logger.Debug(ctx, "field-type-ipv4-dst", log.Fields{"classifierInfo[IPV4_DST]": classifierInfo[Ipv4Dst].(uint32)})
} else if field.Type == flows.IPV4_SRC {
classifierInfo[Ipv4Src] = field.GetIpv4Src()
- logger.Debug("field-type-ipv4-src", log.Fields{"classifierInfo[IPV4_SRC]": classifierInfo[Ipv4Src].(uint32)})
+ logger.Debug(ctx, "field-type-ipv4-src", log.Fields{"classifierInfo[IPV4_SRC]": classifierInfo[Ipv4Src].(uint32)})
} else if field.Type == flows.METADATA {
classifierInfo[Metadata] = field.GetTableMetadata()
- logger.Debug("field-type-metadata", log.Fields{"classifierInfo[Metadata]": classifierInfo[Metadata].(uint64)})
+ logger.Debug(ctx, "field-type-metadata", log.Fields{"classifierInfo[Metadata]": classifierInfo[Metadata].(uint64)})
} else if field.Type == flows.TUNNEL_ID {
classifierInfo[TunnelID] = field.GetTunnelId()
- logger.Debug("field-type-tunnelId", log.Fields{"classifierInfo[TUNNEL_ID]": classifierInfo[TunnelID].(uint64)})
+ logger.Debug(ctx, "field-type-tunnelId", log.Fields{"classifierInfo[TUNNEL_ID]": classifierInfo[TunnelID].(uint64)})
} else {
- logger.Errorw("un-supported-field-type", log.Fields{"type": field.Type})
+ logger.Errorw(ctx, "un-supported-field-type", log.Fields{"type": field.Type})
return
}
}
}
-func formulateActionInfoFromFlow(actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+func formulateActionInfoFromFlow(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
for _, action := range flows.GetActions(flow) {
if action.Type == flows.OUTPUT {
if out := action.GetOutput(); out != nil {
actionInfo[Output] = out.GetPort()
- logger.Debugw("action-type-output", log.Fields{"out-port": actionInfo[Output].(uint32)})
+ logger.Debugw(ctx, "action-type-output", log.Fields{"out-port": actionInfo[Output].(uint32)})
} else {
return olterrors.NewErrInvalidValue(log.Fields{"output-port": nil}, nil)
}
} else if action.Type == flows.POP_VLAN {
actionInfo[PopVlan] = true
- logger.Debugw("action-type-pop-vlan", log.Fields{"in_port": classifierInfo[InPort].(uint32)})
+ logger.Debugw(ctx, "action-type-pop-vlan", log.Fields{"in_port": classifierInfo[InPort].(uint32)})
} else if action.Type == flows.PUSH_VLAN {
if out := action.GetPush(); out != nil {
if tpid := out.GetEthertype(); tpid != 0x8100 {
- logger.Errorw("invalid ethertype in push action", log.Fields{"ethertype": actionInfo[PushVlan].(int32)})
+ logger.Errorw(ctx, "invalid ethertype in push action", log.Fields{"ethertype": actionInfo[PushVlan].(int32)})
} else {
actionInfo[PushVlan] = true
actionInfo[TPID] = tpid
- logger.Debugw("action-type-push-vlan",
+ logger.Debugw(ctx, "action-type-push-vlan",
log.Fields{
"push-tpid": actionInfo[TPID].(uint32),
"in-port": classifierInfo[InPort].(uint32)})
@@ -3676,12 +3674,12 @@
if ofClass := field.GetOxmClass(); ofClass != ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
return olterrors.NewErrInvalidValue(log.Fields{"openflow-class": ofClass}, nil)
}
- /*logger.Debugw("action-type-set-field",log.Fields{"field": field, "in_port": classifierInfo[IN_PORT].(uint32)})*/
- formulateSetFieldActionInfoFromFlow(field, actionInfo)
+ /*logger.Debugw(ctx, "action-type-set-field",log.Fields{"field": field, "in_port": classifierInfo[IN_PORT].(uint32)})*/
+ formulateSetFieldActionInfoFromFlow(ctx, field, actionInfo)
}
}
} else if action.Type == flows.GROUP {
- formulateGroupActionInfoFromFlow(action, actionInfo)
+ formulateGroupActionInfoFromFlow(ctx, action, actionInfo)
} else {
return olterrors.NewErrInvalidValue(log.Fields{"action-type": action.Type}, nil)
}
@@ -3689,43 +3687,43 @@
return nil
}
-func formulateSetFieldActionInfoFromFlow(field *ofp.OfpOxmField, actionInfo map[string]interface{}) {
+func formulateSetFieldActionInfoFromFlow(ctx context.Context, field *ofp.OfpOxmField, actionInfo map[string]interface{}) {
if ofbField := field.GetOfbField(); ofbField != nil {
fieldtype := ofbField.GetType()
if fieldtype == ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID {
if vlan := ofbField.GetVlanVid(); vlan != 0 {
actionInfo[VlanVid] = vlan & 0xfff
- logger.Debugw("action-set-vlan-vid", log.Fields{"actionInfo[VLAN_VID]": actionInfo[VlanVid].(uint32)})
+ logger.Debugw(ctx, "action-set-vlan-vid", log.Fields{"actionInfo[VLAN_VID]": actionInfo[VlanVid].(uint32)})
} else {
- logger.Error("no-invalid-vlan-id-in-set-vlan-vid-action")
+ logger.Error(ctx, "no-invalid-vlan-id-in-set-vlan-vid-action")
}
} else if fieldtype == ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP {
pcp := ofbField.GetVlanPcp()
actionInfo[VlanPcp] = pcp
log.Debugw("action-set-vlan-pcp", log.Fields{"actionInfo[VLAN_PCP]": actionInfo[VlanPcp].(uint32)})
} else {
- logger.Errorw("unsupported-action-set-field-type", log.Fields{"type": fieldtype})
+ logger.Errorw(ctx, "unsupported-action-set-field-type", log.Fields{"type": fieldtype})
}
}
}
-func formulateGroupActionInfoFromFlow(action *ofp.OfpAction, actionInfo map[string]interface{}) {
+func formulateGroupActionInfoFromFlow(ctx context.Context, action *ofp.OfpAction, actionInfo map[string]interface{}) {
if action.GetGroup() == nil {
- logger.Warn("no-group-entry-found-in-the-group-action")
+ logger.Warn(ctx, "no-group-entry-found-in-the-group-action")
} else {
actionInfo[GroupID] = action.GetGroup().GroupId
- logger.Debugw("action-group-id", log.Fields{"actionInfo[GroupID]": actionInfo[GroupID].(uint32)})
+ logger.Debugw(ctx, "action-group-id", log.Fields{"actionInfo[GroupID]": actionInfo[GroupID].(uint32)})
}
}
-func formulateControllerBoundTrapFlowInfo(actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
+func formulateControllerBoundTrapFlowInfo(ctx context.Context, actionInfo, classifierInfo map[string]interface{}, flow *ofp.OfpFlowStats) error {
if isControllerFlow := IsControllerBoundFlow(actionInfo[Output].(uint32)); isControllerFlow {
- logger.Debug("controller-bound-trap-flows--getting-inport-from-tunnelid")
+ logger.Debug(ctx, "controller-bound-trap-flows--getting-inport-from-tunnelid")
/* Get UNI port/ IN Port from tunnel ID field for upstream controller bound flows */
if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_PON_OLT {
if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
classifierInfo[InPort] = uniPort
- logger.Debugw("upstream-pon-to-controller-flow--inport-in-tunnelid",
+ logger.Debugw(ctx, "upstream-pon-to-controller-flow--inport-in-tunnelid",
log.Fields{
"newinport": classifierInfo[InPort].(uint32),
"outport": actionInfo[Output].(uint32)})
@@ -3737,12 +3735,12 @@
}
}
} else {
- logger.Debug("non-controller-flows--getting-uniport-from-tunnelid")
+ logger.Debug(ctx, "non-controller-flows--getting-uniport-from-tunnelid")
// Downstream flow from NNI to PON port , Use tunnel ID as new OUT port / UNI port
if portType := IntfIDToPortTypeName(actionInfo[Output].(uint32)); portType == voltha.Port_PON_OLT {
if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
actionInfo[Output] = uniPort
- logger.Debugw("downstream-nni-to-pon-port-flow, outport-in-tunnelid",
+ logger.Debugw(ctx, "downstream-nni-to-pon-port-flow, outport-in-tunnelid",
log.Fields{
"newoutport": actionInfo[Output].(uint32),
"outport": actionInfo[Output].(uint32)})
@@ -3756,7 +3754,7 @@
} else if portType := IntfIDToPortTypeName(classifierInfo[InPort].(uint32)); portType == voltha.Port_PON_OLT {
if uniPort := flows.GetChildPortFromTunnelId(flow); uniPort != 0 {
classifierInfo[InPort] = uniPort
- logger.Debugw("upstream-pon-to-nni-port-flow, inport-in-tunnelid",
+ logger.Debugw(ctx, "upstream-pon-to-nni-port-flow, inport-in-tunnelid",
log.Fields{
"newinport": actionInfo[Output].(uint32),
"outport": actionInfo[Output].(uint32)})
@@ -3773,7 +3771,7 @@
return nil
}
-func getTpIDFromFlow(flow *ofp.OfpFlowStats) (uint32, error) {
+func getTpIDFromFlow(ctx context.Context, flow *ofp.OfpFlowStats) (uint32, error) {
/* Metadata 8 bytes:
Most Significant 2 Bytes = Inner VLAN
Next 2 Bytes = Tech Profile ID(TPID)
@@ -3781,11 +3779,11 @@
Flow Metadata carries Tech-Profile (TP) ID and is mandatory in all
subscriber related flows.
*/
- metadata := flows.GetMetadataFromWriteMetadataAction(flow)
+ metadata := flows.GetMetadataFromWriteMetadataAction(ctx, flow)
if metadata == 0 {
return 0, olterrors.NewErrNotFound("metadata", log.Fields{"flow": flow}, nil)
}
- TpID := flows.GetTechProfileIDFromWriteMetaData(metadata)
+ TpID := flows.GetTechProfileIDFromWriteMetaData(ctx, metadata)
return uint32(TpID), nil
}
@@ -3799,30 +3797,30 @@
}
// getNniIntfID gets nni intf id from the flow classifier/action
-func getNniIntfID(classifier map[string]interface{}, action map[string]interface{}) (uint32, error) {
+func getNniIntfID(ctx context.Context, classifier map[string]interface{}, action map[string]interface{}) (uint32, error) {
portType := IntfIDToPortTypeName(classifier[InPort].(uint32))
if portType == voltha.Port_PON_OLT {
- intfID, err := IntfIDFromNniPortNum(action[Output].(uint32))
+ intfID, err := IntfIDFromNniPortNum(ctx, action[Output].(uint32))
if err != nil {
- logger.Debugw("invalid-action-port-number",
+ logger.Debugw(ctx, "invalid-action-port-number",
log.Fields{
"port-number": action[Output].(uint32),
"error": err})
return uint32(0), err
}
- logger.Infow("output-nni-intfId-is", log.Fields{"intf-id": intfID})
+ logger.Infow(ctx, "output-nni-intfId-is", log.Fields{"intf-id": intfID})
return intfID, nil
} else if portType == voltha.Port_ETHERNET_NNI {
- intfID, err := IntfIDFromNniPortNum(classifier[InPort].(uint32))
+ intfID, err := IntfIDFromNniPortNum(ctx, classifier[InPort].(uint32))
if err != nil {
- logger.Debugw("invalid-classifier-port-number",
+ logger.Debugw(ctx, "invalid-classifier-port-number",
log.Fields{
"port-number": action[Output].(uint32),
"error": err})
return uint32(0), err
}
- logger.Infow("input-nni-intfId-is", log.Fields{"intf-id": intfID})
+ logger.Infow(ctx, "input-nni-intfId-is", log.Fields{"intf-id": intfID})
return intfID, nil
}
return uint32(0), nil
@@ -3838,7 +3836,7 @@
lookupGemPort, ok := f.packetInGemPort[pktInkey]
if ok {
if lookupGemPort == gemPort {
- logger.Infow("pktin-key/value-found-in-cache--no-need-to-update-kv--assume-both-in-sync",
+ logger.Infow(ctx, "pktin-key/value-found-in-cache--no-need-to-update-kv--assume-both-in-sync",
log.Fields{
"pktinkey": pktInkey,
"gem": gemPort})
@@ -3848,7 +3846,7 @@
f.packetInGemPort[pktInkey] = gemPort
f.resourceMgr.UpdateGemPortForPktIn(ctx, pktInkey, gemPort)
- logger.Infow("pktin-key-not-found-in-local-cache-value-is-different--updating-cache-and-kv-store",
+ logger.Infow(ctx, "pktin-key-not-found-in-local-cache-value-is-different--updating-cache-and-kv-store",
log.Fields{
"pktinkey": pktInkey,
"gem": gemPort})
@@ -3866,7 +3864,7 @@
if onu.OnuID == onuID {
for _, uni := range onu.UniPorts {
if uni == portNum {
- logger.Infow("uni-already-in-cache--no-need-to-update-cache-and-kv-store", log.Fields{"uni": portNum})
+ logger.Infow(ctx, "uni-already-in-cache--no-need-to-update-cache-and-kv-store", log.Fields{"uni": portNum})
return
}
}
@@ -3880,7 +3878,7 @@
func (f *OpenOltFlowMgr) loadFlowIDlistForGem(ctx context.Context, intf uint32) {
flowIDsList, err := f.resourceMgr.GetFlowIDsGemMapForInterface(ctx, intf)
if err != nil {
- logger.Error("failed-to-get-flowid-list-per-gem", log.Fields{"intf": intf})
+ logger.Error(ctx, "failed-to-get-flowid-list-per-gem", log.Fields{"intf": intf})
return
}
for gem, FlowIDs := range flowIDsList {
@@ -3895,7 +3893,7 @@
func (f *OpenOltFlowMgr) loadInterfaceToMulticastQueueMap(ctx context.Context) {
storedMulticastQueueMap, err := f.resourceMgr.GetMcastQueuePerInterfaceMap(ctx)
if err != nil {
- logger.Error("failed-to-get-pon-interface-to-multicast-queue-map")
+ logger.Error(ctx, "failed-to-get-pon-interface-to-multicast-queue-map")
return
}
for intf, queueInfo := range storedMulticastQueueMap {
diff --git a/internal/pkg/core/openolt_flowmgr_test.go b/internal/pkg/core/openolt_flowmgr_test.go
index 4a74e9a..26697aa 100644
--- a/internal/pkg/core/openolt_flowmgr_test.go
+++ b/internal/pkg/core/openolt_flowmgr_test.go
@@ -265,8 +265,9 @@
}
func TestOpenOltFlowMgr_RemoveFlow(t *testing.T) {
+ ctx := context.Background()
// flowMgr := newMockFlowmgr()
- logger.Debug("Info Warning Error: Starting RemoveFlow() test")
+ logger.Debug(ctx, "Info Warning Error: Starting RemoveFlow() test")
fa := &fu.FlowArgs{
MatchFields: []*ofp.OfpOxmOfbField{
fu.InPort(2),
@@ -652,7 +653,7 @@
flowMgr.addGemPortToOnuInfoMap(ctx, tt.args.intfID, tt.args.onuID, gemPort)
}
for _, gemPortDeleted := range tt.args.gemPortIDsToBeDeleted {
- flowMgr.deleteGemPortFromLocalCache(tt.args.intfID, tt.args.onuID, gemPortDeleted)
+ flowMgr.deleteGemPortFromLocalCache(context.Background(), tt.args.intfID, tt.args.onuID, gemPortDeleted)
}
lenofGemPorts := len(flowMgr.onuGemInfo[tt.args.intfID][0].GemPorts)
if lenofGemPorts != tt.args.finalLength {
@@ -764,6 +765,7 @@
}
func TestOpenOltFlowMgr_checkAndAddFlow(t *testing.T) {
+ ctx := context.Background()
// flowMgr := newMockFlowmgr()
kw := make(map[string]uint64)
kw["table_id"] = 1
@@ -851,33 +853,33 @@
flowState2, _ := fu.MkFlowStat(fa2)
flowState3, _ := fu.MkFlowStat(fa3)
flowState4, _ := fu.MkFlowStat(fa4)
- formulateClassifierInfoFromFlow(classifierInfo, flowState)
- formulateClassifierInfoFromFlow(classifierInfo2, flowState2)
- formulateClassifierInfoFromFlow(classifierInfo3, flowState3)
- formulateClassifierInfoFromFlow(classifierInfo4, flowState4)
+ formulateClassifierInfoFromFlow(ctx, classifierInfo, flowState)
+ formulateClassifierInfoFromFlow(ctx, classifierInfo2, flowState2)
+ formulateClassifierInfoFromFlow(ctx, classifierInfo3, flowState3)
+ formulateClassifierInfoFromFlow(ctx, classifierInfo4, flowState4)
- err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flowState)
+ err := formulateActionInfoFromFlow(ctx, actionInfo, classifierInfo, flowState)
if err != nil {
// Error logging is already done in the called function
// So just return in case of error
return
}
- err = formulateActionInfoFromFlow(actionInfo2, classifierInfo2, flowState2)
+ err = formulateActionInfoFromFlow(ctx, actionInfo2, classifierInfo2, flowState2)
if err != nil {
// Error logging is already done in the called function
// So just return in case of error
return
}
- err = formulateActionInfoFromFlow(actionInfo3, classifierInfo3, flowState3)
+ err = formulateActionInfoFromFlow(ctx, actionInfo3, classifierInfo3, flowState3)
if err != nil {
// Error logging is already done in the called function
// So just return in case of error
return
}
- err = formulateActionInfoFromFlow(actionInfo4, classifierInfo4, flowState4)
+ err = formulateActionInfoFromFlow(ctx, actionInfo4, classifierInfo4, flowState4)
if err != nil {
// Error logging is already done in the called function
// So just return in case of error
diff --git a/internal/pkg/core/openolt_test.go b/internal/pkg/core/openolt_test.go
index 22a61c2..764cb20 100644
--- a/internal/pkg/core/openolt_test.go
+++ b/internal/pkg/core/openolt_test.go
@@ -141,7 +141,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Abandon_device(tt.args.device); err != tt.wantErr {
+ if err := oo.Abandon_device(context.Background(), tt.args.device); err != tt.wantErr {
t.Errorf("Abandon_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -170,7 +170,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Activate_image_update(tt.args.device, tt.args.request)
+ got, err := oo.Activate_image_update(context.Background(), tt.args.device, tt.args.request)
if err != tt.wantErr && got == nil {
t.Errorf("Activate_image_update() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -191,7 +191,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Adapter_descriptor(); err != tt.wantErr {
+ if err := oo.Adapter_descriptor(context.Background()); err != tt.wantErr {
t.Errorf("Adapter_descriptor() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -218,7 +218,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- err := oo.Adopt_device(tt.args.device)
+ err := oo.Adopt_device(context.Background(), tt.args.device)
if (err != nil) && (reflect.TypeOf(err) !=
reflect.TypeOf(tt.wantErr)) && (tt.args.device == nil) {
t.Errorf("Adopt_device() error = %v, wantErr %v", err, tt.wantErr)
@@ -252,7 +252,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Cancel_image_download(tt.args.device, tt.args.request)
+ got, err := oo.Cancel_image_download(context.Background(), tt.args.device, tt.args.request)
if err != tt.wantErr && got == nil {
t.Errorf("Cancel_image_download() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -276,7 +276,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Delete_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Delete_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Delete_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -300,7 +300,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Device_types()
+ got, err := oo.Device_types(context.Background())
if err != tt.wantErr && got == nil {
t.Errorf("Device_types() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -325,7 +325,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Disable_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Disable_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Disable_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -354,7 +354,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Download_image(tt.args.device, tt.args.request)
+ got, err := oo.Download_image(context.Background(), tt.args.device, tt.args.request)
if err != tt.wantErr && got == nil {
t.Errorf("Download_image() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -379,7 +379,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Get_device_details(tt.args.device); err != tt.wantErr {
+ if err := oo.Get_device_details(context.Background(), tt.args.device); err != tt.wantErr {
t.Errorf("Get_device_details() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -408,7 +408,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Get_image_download_status(tt.args.device, tt.args.request)
+ got, err := oo.Get_image_download_status(context.Background(), tt.args.device, tt.args.request)
if err != tt.wantErr && got == nil {
t.Errorf("Get_image_download_status() got = %v want = %v error = %v, wantErr %v",
got, tt.want, err, tt.wantErr)
@@ -446,7 +446,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Get_ofp_device_info(tt.args.device)
+ got, err := oo.Get_ofp_device_info(context.Background(), tt.args.device)
if !reflect.DeepEqual(err, tt.wantErr) || !reflect.DeepEqual(got, tt.want) {
t.Errorf("Get_ofp_device_info() got = %v want = %v error = %v, wantErr = %v",
got, tt.want, err, tt.wantErr)
@@ -469,7 +469,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Health()
+ got, err := oo.Health(context.Background())
if err != tt.wantErr && got == nil {
t.Errorf("Get_ofp_port_info() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -526,7 +526,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Process_inter_adapter_message(tt.args.msg); reflect.TypeOf(err) != tt.wantErrType {
+ if err := oo.Process_inter_adapter_message(context.Background(), tt.args.msg); reflect.TypeOf(err) != tt.wantErrType {
t.Errorf("Process_inter_adapter_message() error = %v, wantErr %v",
reflect.TypeOf(err), tt.wantErrType)
}
@@ -551,7 +551,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Reboot_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Reboot_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Reboot_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -585,7 +585,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Receive_packet_out(tt.args.deviceID, tt.args.egressPortNo, tt.args.packet); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Receive_packet_out(context.Background(), tt.args.deviceID, tt.args.egressPortNo, tt.args.packet); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Receive_packet_out() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -610,7 +610,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Reconcile_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Reconcile_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Reconcile_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -634,7 +634,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Reenable_device(tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Reenable_device(context.Background(), tt.args.device); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Reenable_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -663,7 +663,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- got, err := oo.Revert_image_update(tt.args.device, tt.args.request)
+ got, err := oo.Revert_image_update(context.Background(), tt.args.device, tt.args.request)
if err != tt.wantErr && got == nil {
t.Log("error :", err)
}
@@ -688,7 +688,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Self_test_device(tt.args.device); err != tt.wantErr {
+ if err := oo.Self_test_device(context.Background(), tt.args.device); err != tt.wantErr {
t.Errorf("Self_test_device() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -757,7 +757,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Suppress_event(tt.args.filter); err != tt.wantErr {
+ if err := oo.Suppress_event(context.Background(), tt.args.filter); err != tt.wantErr {
t.Errorf("Suppress_event() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -781,7 +781,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Unsuppress_event(tt.args.filter); err != tt.wantErr {
+ if err := oo.Unsuppress_event(context.Background(), tt.args.filter); err != tt.wantErr {
t.Errorf("Unsuppress_event() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -808,7 +808,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Update_flows_bulk(tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); err != tt.wantErr {
+ if err := oo.Update_flows_bulk(context.Background(), tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); err != tt.wantErr {
t.Errorf("Update_flows_bulk() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -836,7 +836,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Update_flows_incrementally(tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); !reflect.DeepEqual(err, tt.wantErr) {
+ if err := oo.Update_flows_incrementally(context.Background(), tt.args.device, tt.args.flows, tt.args.groups, tt.args.flowMetadata); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Update_flows_incrementally() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -860,7 +860,8 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Update_pm_config(tt.args.device, tt.args.pmConfigs); !reflect.DeepEqual(err, tt.wantErr) {
+
+ if err := oo.Update_pm_config(context.Background(), tt.args.device, tt.args.pmConfigs); !reflect.DeepEqual(err, tt.wantErr) {
t.Errorf("Update_pm_config() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -908,7 +909,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Enable_port(tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
+ if err := oo.Enable_port(context.Background(), tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
t.Errorf("OpenOLT.Enable_port() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -933,7 +934,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
oo := testOltObject(tt.fields)
- if err := oo.Disable_port(tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
+ if err := oo.Disable_port(context.Background(), tt.args.deviceID, tt.args.port); (err != nil) != tt.wantErr {
t.Errorf("OpenOLT.Disable_port() error = %v, wantErr %v", err, tt.wantErr)
}
})
diff --git a/internal/pkg/core/statsmanager.go b/internal/pkg/core/statsmanager.go
index c7bd546..6427a0c 100755
--- a/internal/pkg/core/statsmanager.go
+++ b/internal/pkg/core/statsmanager.go
@@ -18,12 +18,12 @@
package core
import (
+ "context"
"fmt"
+ "strconv"
"sync"
"time"
- "strconv"
-
"github.com/opencord/voltha-lib-go/v3/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
"github.com/opencord/voltha-protos/v3/go/openolt"
@@ -190,7 +190,7 @@
}
// NewOpenOltStatsMgr returns a new instance of the OpenOltStatisticsMgr
-func NewOpenOltStatsMgr(Dev *DeviceHandler) *OpenOltStatisticsMgr {
+func NewOpenOltStatsMgr(ctx context.Context, Dev *DeviceHandler) *OpenOltStatisticsMgr {
var StatMgr OpenOltStatisticsMgr
@@ -199,16 +199,16 @@
// Northbound and Southbound ports
// added to initialize the pm_metrics
var Ports interface{}
- Ports, _ = InitPorts("nni", Dev.device.Id, 1)
+ Ports, _ = InitPorts(ctx, "nni", Dev.device.Id, 1)
StatMgr.NorthBoundPort, _ = Ports.(map[uint32]*NniPort)
NumPonPorts := Dev.resourceMgr.DevInfo.GetPonPorts()
- Ports, _ = InitPorts("pon", Dev.device.Id, NumPonPorts)
+ Ports, _ = InitPorts(ctx, "pon", Dev.device.Id, NumPonPorts)
StatMgr.SouthBoundPort, _ = Ports.(map[uint32]*PonPort)
return &StatMgr
}
// InitPorts collects the port objects: nni and pon that are updated with the current data from the OLT
-func InitPorts(Intftype string, DeviceID string, numOfPorts uint32) (interface{}, error) {
+func InitPorts(ctx context.Context, Intftype string, DeviceID string, numOfPorts uint32) (interface{}, error) {
/*
This method collects the port objects: nni and pon that are updated with the
current data from the OLT
@@ -224,25 +224,25 @@
if Intftype == "nni" {
NniPorts := make(map[uint32]*NniPort)
for i = 0; i < numOfPorts; i++ {
- Port := BuildPortObject(i, "nni", DeviceID).(*NniPort)
+ Port := BuildPortObject(ctx, i, "nni", DeviceID).(*NniPort)
NniPorts[Port.IntfID] = Port
}
return NniPorts, nil
} else if Intftype == "pon" {
PONPorts := make(map[uint32]*PonPort)
for i = 0; i < numOfPorts; i++ {
- PONPort := BuildPortObject(i, "pon", DeviceID).(*PonPort)
+ PONPort := BuildPortObject(ctx, i, "pon", DeviceID).(*PonPort)
PONPorts[PortNoToIntfID(PONPort.IntfID, voltha.Port_PON_OLT)] = PONPort
}
return PONPorts, nil
} else {
- logger.Errorw("invalid-type-of-interface", log.Fields{"interface-type": Intftype})
+ logger.Errorw(ctx, "invalid-type-of-interface", log.Fields{"interface-type": Intftype})
return nil, olterrors.NewErrInvalidValue(log.Fields{"interface-type": Intftype}, nil)
}
}
// BuildPortObject allows for updating north and southbound ports, newly discovered ports, and devices
-func BuildPortObject(PortNum uint32, IntfType string, DeviceID string) interface{} {
+func BuildPortObject(ctx context.Context, PortNum uint32, IntfType string, DeviceID string) interface{} {
/*
Separate method to allow for updating north and southbound ports
newly discovered ports and devices
@@ -257,7 +257,7 @@
if IntfType == "nni" {
IntfID := IntfIDToPortNo(PortNum, voltha.Port_ETHERNET_NNI)
nniID := PortNoToIntfID(IntfID, voltha.Port_ETHERNET_NNI)
- logger.Debugw("interface-type-nni",
+ logger.Debugw(ctx, "interface-type-nni",
log.Fields{
"nni-id": nniID,
"intf-type": IntfType})
@@ -267,13 +267,13 @@
// intf_id and pon_id are currently equal.
IntfID := IntfIDToPortNo(PortNum, voltha.Port_PON_OLT)
PONID := PortNoToIntfID(IntfID, voltha.Port_PON_OLT)
- logger.Debugw("interface-type-pon",
+ logger.Debugw(ctx, "interface-type-pon",
log.Fields{
"pon-id": PONID,
"intf-type": IntfType})
return NewPONPort(PONID, DeviceID, IntfID, PortNum)
} else {
- logger.Errorw("invalid-type-of-interface", log.Fields{"intf-type": IntfType})
+ logger.Errorw(ctx, "invalid-type-of-interface", log.Fields{"intf-type": IntfType})
return nil
}
}
@@ -368,9 +368,9 @@
}
// publishMatrics will publish the pon port metrics
-func (StatMgr OpenOltStatisticsMgr) publishMetrics(val map[string]float32,
+func (StatMgr OpenOltStatisticsMgr) publishMetrics(ctx context.Context, val map[string]float32,
port *voltha.Port, devID string, devType string) {
- logger.Debugw("publish-metrics",
+ logger.Debugw(ctx, "publish-metrics",
log.Fields{
"port": port.Label,
"metrics": val})
@@ -405,26 +405,26 @@
ke.Type = voltha.KpiEventType_slice
ke.Ts = float64(time.Now().UnixNano())
- if err := StatMgr.Device.EventProxy.SendKpiEvent("STATS_EVENT", &ke, voltha.EventCategory_EQUIPMENT, volthaEventSubCatgry, raisedTs); err != nil {
- logger.Errorw("failed-to-send-pon-stats", log.Fields{"err": err})
+ if err := StatMgr.Device.EventProxy.SendKpiEvent(ctx, "STATS_EVENT", &ke, voltha.EventCategory_EQUIPMENT, volthaEventSubCatgry, raisedTs); err != nil {
+ logger.Errorw(ctx, "failed-to-send-pon-stats", log.Fields{"err": err})
}
}
// PortStatisticsIndication handles the port statistics indication
-func (StatMgr *OpenOltStatisticsMgr) PortStatisticsIndication(PortStats *openolt.PortStatistics, NumPonPorts uint32) {
- StatMgr.PortsStatisticsKpis(PortStats, NumPonPorts)
- logger.Debugw("received-port-stats-indication", log.Fields{"port-stats": PortStats})
+func (StatMgr *OpenOltStatisticsMgr) PortStatisticsIndication(ctx context.Context, PortStats *openolt.PortStatistics, NumPonPorts uint32) {
+ StatMgr.PortsStatisticsKpis(ctx, PortStats, NumPonPorts)
+ logger.Debugw(ctx, "received-port-stats-indication", log.Fields{"port-stats": PortStats})
// TODO send stats to core topic to the voltha kafka or a different kafka ?
}
// FlowStatisticsIndication to be implemented
-func FlowStatisticsIndication(self, FlowStats *openolt.FlowStatistics) {
- logger.Debugw("flow-stats-collected", log.Fields{"flow-stats": FlowStats})
+func FlowStatisticsIndication(ctx context.Context, self, FlowStats *openolt.FlowStatistics) {
+ logger.Debugw(ctx, "flow-stats-collected", log.Fields{"flow-stats": FlowStats})
//TODO send to kafka ?
}
// PortsStatisticsKpis map the port stats values into a dictionary, creates the kpiEvent and then publish to Kafka
-func (StatMgr *OpenOltStatisticsMgr) PortsStatisticsKpis(PortStats *openolt.PortStatistics, NumPonPorts uint32) {
+func (StatMgr *OpenOltStatisticsMgr) PortsStatisticsKpis(ctx context.Context, PortStats *openolt.PortStatistics, NumPonPorts uint32) {
/*map the port stats values into a dictionary
Create a kpoEvent and publish to Kafka
@@ -461,7 +461,7 @@
mutex.Lock()
StatMgr.NorthBoundPort[0] = &portNNIStat
mutex.Unlock()
- logger.Debugw("received-nni-stats", log.Fields{"nni-stats": StatMgr.NorthBoundPort})
+ logger.Debugw(ctx, "received-nni-stats", log.Fields{"nni-stats": StatMgr.NorthBoundPort})
}
for i := uint32(0); i < NumPonPorts; i++ {
@@ -483,7 +483,7 @@
mutex.Lock()
StatMgr.SouthBoundPort[i] = &portPonStat
mutex.Unlock()
- logger.Debugw("received-pon-stats-for-port", log.Fields{"port-pon-stats": portPonStat})
+ logger.Debugw(ctx, "received-pon-stats-for-port", log.Fields{"port-pon-stats": portPonStat})
}
}
@@ -506,7 +506,7 @@
err = UpdatePortObjectKpiData(SouthboundPorts[PortStats.IntfID], PMData)
}
if (err != nil) {
- logger.Error("Error publishing statistics data")
+ logger.Error(ctx, "Error publishing statistics data")
}
*/
diff --git a/internal/pkg/core/statsmanager_test.go b/internal/pkg/core/statsmanager_test.go
index f7f8246..67b9a6a 100644
--- a/internal/pkg/core/statsmanager_test.go
+++ b/internal/pkg/core/statsmanager_test.go
@@ -18,6 +18,7 @@
package core
import (
+ "context"
"fmt"
"github.com/opencord/voltha-protos/v3/go/openolt"
"github.com/opencord/voltha-protos/v3/go/voltha"
@@ -44,7 +45,7 @@
}
dh := newMockDeviceHandler()
dh.device = device
- StatMgr := NewOpenOltStatsMgr(dh)
+ StatMgr := NewOpenOltStatsMgr(context.Background(), dh)
type args struct {
PortStats *openolt.PortStatistics
@@ -59,7 +60,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- StatMgr.PortStatisticsIndication(tt.args.PortStats, 16)
+ StatMgr.PortStatisticsIndication(context.Background(), tt.args.PortStats, 16)
})
}
}
@@ -168,7 +169,7 @@
NorthBoundPort: tt.fields.NorthBoundPort,
SouthBoundPort: tt.fields.SouthBoundPort,
}
- StatMgr.publishMetrics(tt.args.val, tt.args.port, "onu1", "openolt")
+ StatMgr.publishMetrics(context.Background(), tt.args.val, tt.args.port, "onu1", "openolt")
})
}
}
diff --git a/internal/pkg/resourcemanager/common.go b/internal/pkg/resourcemanager/common.go
index b2a4112..3703526 100644
--- a/internal/pkg/resourcemanager/common.go
+++ b/internal/pkg/resourcemanager/common.go
@@ -21,12 +21,12 @@
"github.com/opencord/voltha-lib-go/v3/pkg/log"
)
-var logger log.Logger
+var logger log.CLogger
func init() {
// Setup this package so that it's log level can be modified at run time
var err error
- logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "resourcemanager"})
+ logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "resourcemanager"})
if err != nil {
panic(err)
}
diff --git a/internal/pkg/resourcemanager/resourcemanager.go b/internal/pkg/resourcemanager/resourcemanager.go
index 8ab03f0..3557bbf 100755
--- a/internal/pkg/resourcemanager/resourcemanager.go
+++ b/internal/pkg/resourcemanager/resourcemanager.go
@@ -122,24 +122,24 @@
flowIDToGemInfoLock sync.RWMutex
}
-func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
- logger.Infow("kv-store-type", log.Fields{"store": storeType})
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+ logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
switch storeType {
case "consul":
- return kvstore.NewConsulClient(address, timeout)
+ return kvstore.NewConsulClient(ctx, address, timeout)
case "etcd":
- return kvstore.NewEtcdClient(address, timeout, log.FatalLevel)
+ return kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)
}
return nil, errors.New("unsupported-kv-store")
}
// SetKVClient sets the KV client and return a kv backend
-func SetKVClient(backend string, addr string, DeviceID string) *db.Backend {
+func SetKVClient(ctx context.Context, backend string, addr string, DeviceID string) *db.Backend {
// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
// issue between kv store and backend , core is not calling NewBackend directly
- kvClient, err := newKVClient(backend, addr, KvstoreTimeout)
+ kvClient, err := newKVClient(ctx, backend, addr, KvstoreTimeout)
if err != nil {
- logger.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+ logger.Fatalw(ctx, "Failed to init KV client\n", log.Fields{"err": err})
return nil
}
@@ -158,16 +158,16 @@
// the resources.
func NewResourceMgr(ctx context.Context, deviceID string, KVStoreAddress string, kvStoreType string, deviceType string, devInfo *openolt.DeviceInfo) *OpenOltResourceMgr {
var ResourceMgr OpenOltResourceMgr
- logger.Debugf("Init new resource manager , address: %s, deviceid: %s", KVStoreAddress, deviceID)
+ logger.Debugf(ctx, "Init new resource manager , address: %s, deviceid: %s", KVStoreAddress, deviceID)
ResourceMgr.Address = KVStoreAddress
ResourceMgr.DeviceType = deviceType
ResourceMgr.DevInfo = devInfo
NumPONPorts := devInfo.GetPonPorts()
Backend := kvStoreType
- ResourceMgr.KVStore = SetKVClient(Backend, ResourceMgr.Address, deviceID)
+ ResourceMgr.KVStore = SetKVClient(ctx, Backend, ResourceMgr.Address, deviceID)
if ResourceMgr.KVStore == nil {
- logger.Error("Failed to setup KV store")
+ logger.Error(ctx, "Failed to setup KV store")
}
Ranges := make(map[string]*openolt.DeviceInfo_DeviceResourceRanges)
RsrcMgrsByTech := make(map[string]*ponrmgr.PONResourceManager)
@@ -230,12 +230,13 @@
var err error
for _, TechRange := range devInfo.Ranges {
technology := TechRange.Technology
- logger.Debugf("Device info technology %s", technology)
+ logger.Debugf(ctx, "Device info technology %s", technology)
Ranges[technology] = TechRange
- RsrcMgrsByTech[technology], err = ponrmgr.NewPONResourceManager(technology, deviceType, deviceID,
+
+ RsrcMgrsByTech[technology], err = ponrmgr.NewPONResourceManager(ctx, technology, deviceType, deviceID,
Backend, ResourceMgr.Address)
if err != nil {
- logger.Errorf("Failed to create pon resource manager instance for technology %s", technology)
+ logger.Errorf(ctx, "Failed to create pon resource manager instance for technology %s", technology)
return nil
}
// resource_mgrs_by_tech[technology] = resource_mgr
@@ -254,7 +255,7 @@
for _, PONRMgr := range RsrcMgrsByTech {
_ = PONRMgr.InitDeviceResourcePool(ctx)
}
- logger.Info("Initialization of resource manager success!")
+ logger.Info(ctx, "Initialization of resource manager success!")
return &ResourceMgr
}
@@ -267,11 +268,11 @@
// init the resource range pool according to the sharing type
- logger.Debugf("Resource range pool init for technology %s", ponRMgr.Technology)
+ logger.Debugf(ctx, "Resource range pool init for technology %s", ponRMgr.Technology)
// first load from KV profiles
status := ponRMgr.InitResourceRangesFromKVStore(ctx)
if !status {
- logger.Debugf("Failed to load resource ranges from KV store for tech %s", ponRMgr.Technology)
+ logger.Debugf(ctx, "Failed to load resource ranges from KV store for tech %s", ponRMgr.Technology)
}
/*
@@ -279,7 +280,7 @@
or is broader than the device, the device's information will
dictate the range limits
*/
- logger.Debugw("Using device info to init pon resource ranges", log.Fields{"Tech": ponRMgr.Technology})
+ logger.Debugw(ctx, "Using device info to init pon resource ranges", log.Fields{"Tech": ponRMgr.Technology})
ONUIDStart := devInfo.OnuIdStart
ONUIDEnd := devInfo.OnuIdEnd
@@ -344,7 +345,7 @@
}
}
- logger.Debugw("Device info init", log.Fields{"technology": techRange.Technology,
+ logger.Debugw(ctx, "Device info init", log.Fields{"technology": techRange.Technology,
"onu_id_start": ONUIDStart, "onu_id_end": ONUIDEnd, "onu_id_shared_pool_id": ONUIDSharedPoolID,
"alloc_id_start": AllocIDStart, "alloc_id_end": AllocIDEnd,
"alloc_id_shared_pool_id": AllocIDSharedPoolID,
@@ -358,7 +359,7 @@
"uni_id_end_idx": 1, /*MaxUNIIDperONU()*/
})
- ponRMgr.InitDefaultPONResourceRanges(ONUIDStart, ONUIDEnd, ONUIDSharedPoolID,
+ ponRMgr.InitDefaultPONResourceRanges(ctx, ONUIDStart, ONUIDEnd, ONUIDSharedPoolID,
AllocIDStart, AllocIDEnd, AllocIDSharedPoolID,
GEMPortIDStart, GEMPortIDEnd, GEMPortIDSharedPoolID,
FlowIDStart, FlowIDEnd, FlowIDSharedPoolID, 0, 1,
@@ -367,33 +368,33 @@
// For global sharing, make sure to refresh both local and global resource manager instances' range
if ONUIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
+ globalPONRMgr.UpdateRanges(ctx, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
"", 0, nil)
- ponRMgr.UpdateRanges(ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
+ ponRMgr.UpdateRanges(ctx, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
"", 0, globalPONRMgr)
}
if AllocIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
+ globalPONRMgr.UpdateRanges(ctx, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
"", 0, nil)
- ponRMgr.UpdateRanges(ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
+ ponRMgr.UpdateRanges(ctx, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
"", 0, globalPONRMgr)
}
if GEMPortIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
+ globalPONRMgr.UpdateRanges(ctx, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
"", 0, nil)
- ponRMgr.UpdateRanges(ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
+ ponRMgr.UpdateRanges(ctx, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
"", 0, globalPONRMgr)
}
if FlowIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
+ globalPONRMgr.UpdateRanges(ctx, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
"", 0, nil)
- ponRMgr.UpdateRanges(ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
+ ponRMgr.UpdateRanges(ctx, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
"", 0, globalPONRMgr)
}
// Make sure loaded range fits the platform bit encoding ranges
- ponRMgr.UpdateRanges(ponrmgr.UNI_ID_START_IDX, 0, ponrmgr.UNI_ID_END_IDX /* TODO =OpenOltPlatform.MAX_UNIS_PER_ONU-1*/, 1, "", 0, nil)
+ ponRMgr.UpdateRanges(ctx, ponrmgr.UNI_ID_START_IDX, 0, ponrmgr.UNI_ID_END_IDX /* TODO =OpenOltPlatform.MAX_UNIS_PER_ONU-1*/, 1, "", 0, nil)
}
// Delete clears used resources for the particular olt device being deleted
@@ -421,11 +422,11 @@
*/
for _, rsrcMgr := range RsrcMgr.ResourceMgrs {
if err := rsrcMgr.ClearDeviceResourcePool(ctx); err != nil {
- logger.Debug("Failed to clear device resource pool")
+ logger.Debug(ctx, "Failed to clear device resource pool")
return err
}
}
- logger.Debug("Cleared device resource pool")
+ logger.Debug(ctx, "Cleared device resource pool")
return nil
}
@@ -443,7 +444,7 @@
ONUID, err := RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(ctx, ponIntfID,
ponrmgr.ONU_ID, 1)
if err != nil {
- logger.Errorf("Failed to get resource for interface %d for type %s",
+ logger.Errorf(ctx, "Failed to get resource for interface %d for type %s",
ponIntfID, ponrmgr.ONU_ID)
return 0, err
}
@@ -463,11 +464,11 @@
FlowPath := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
if err := RsrcMgr.ResourceMgrs[ponIntfID].GetFlowIDInfo(ctx, FlowPath, flowID, &flows); err != nil {
- logger.Errorw("Error while getting flows from KV store", log.Fields{"flowId": flowID})
+ logger.Errorw(ctx, "Error while getting flows from KV store", log.Fields{"flowId": flowID})
return nil
}
if len(flows) == 0 {
- logger.Debugw("No flowInfo found in KV store", log.Fields{"flowPath": FlowPath})
+ logger.Debugw(ctx, "No flowInfo found in KV store", log.Fields{"flowPath": FlowPath})
return nil
}
return &flows
@@ -508,10 +509,10 @@
FlowIDs := RsrcMgr.ResourceMgrs[ponIntfID].GetCurrentFlowIDsForOnu(ctx, FlowPath)
if FlowIDs != nil {
- logger.Debugw("Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "ONUID": ONUID, "uniID": uniID, "KVpath": FlowPath})
+ logger.Debugw(ctx, "Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "ONUID": ONUID, "uniID": uniID, "KVpath": FlowPath})
for _, flowID := range FlowIDs {
FlowInfo := RsrcMgr.GetFlowIDInfo(ctx, ponIntfID, int32(ONUID), int32(uniID), uint32(flowID))
- er := getFlowIDFromFlowInfo(FlowInfo, flowID, gemportID, flowStoreCookie, flowCategory, vlanVid, vlanPcp...)
+ er := getFlowIDFromFlowInfo(ctx, FlowInfo, flowID, gemportID, flowStoreCookie, flowCategory, vlanVid, vlanPcp...)
if er == nil {
log.Debugw("Found flowid for the vlan, pcp, and gem",
log.Fields{"flowID": flowID, "vlanVid": vlanVid, "vlanPcp": vlanPcp, "gemPortID": gemportID})
@@ -519,11 +520,11 @@
}
}
}
- logger.Debug("No matching flows with flow cookie or flow category, allocating new flowid")
+ logger.Debug(ctx, "No matching flows with flow cookie or flow category, allocating new flowid")
FlowIDs, err = RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(ctx, ponIntfID,
ponrmgr.FLOW_ID, 1)
if err != nil {
- logger.Errorf("Failed to get resource for interface %d for type %s",
+ logger.Errorf(ctx, "Failed to get resource for interface %d for type %s",
ponIntfID, ponrmgr.FLOW_ID)
return uint32(0), err
}
@@ -551,24 +552,24 @@
// Since we support only one alloc_id for the ONU at the moment,
// return the first alloc_id in the list, if available, for that
// ONU.
- logger.Debugw("Retrieved alloc ID from pon resource mgr", log.Fields{"AllocID": AllocID})
+ logger.Debugw(ctx, "Retrieved alloc ID from pon resource mgr", log.Fields{"AllocID": AllocID})
return AllocID[0]
}
AllocID, err = RsrcMgr.ResourceMgrs[intfID].GetResourceID(ctx, intfID,
ponrmgr.ALLOC_ID, 1)
if AllocID == nil || err != nil {
- logger.Error("Failed to allocate alloc id")
+ logger.Error(ctx, "Failed to allocate alloc id")
return 0
}
// update the resource map on KV store with the list of alloc_id
// allocated for the pon_intf_onu_id tuple
err = RsrcMgr.ResourceMgrs[intfID].UpdateAllocIdsForOnu(ctx, IntfOnuIDUniID, AllocID)
if err != nil {
- logger.Error("Failed to update Alloc ID")
+ logger.Error(ctx, "Failed to update Alloc ID")
return 0
}
- logger.Debugw("Allocated new Tcont from pon resource mgr", log.Fields{"AllocID": AllocID})
+ logger.Debugw(ctx, "Allocated new Tcont from pon resource mgr", log.Fields{"AllocID": AllocID})
return AllocID[0]
}
@@ -612,7 +613,7 @@
}
err := RsrcMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocIDs)
if err != nil {
- logger.Errorf("Failed to Remove Alloc Id For Onu. IntfID %d onuID %d uniID %d allocID %d",
+ logger.Errorf(ctx, "Failed to Remove Alloc Id For Onu. IntfID %d onuID %d uniID %d allocID %d",
intfID, onuID, uniID, allocID)
}
}
@@ -628,7 +629,7 @@
}
err := RsrcMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs)
if err != nil {
- logger.Errorf("Failed to Remove Gem Id For Onu. IntfID %d onuID %d uniID %d gemPortId %d",
+ logger.Errorf(ctx, "Failed to Remove Gem Id For Onu. IntfID %d onuID %d uniID %d gemPortId %d",
intfID, onuID, uniID, gemPortID)
}
}
@@ -646,12 +647,12 @@
IntfGEMPortPath = fmt.Sprintf("%d,%d", PonPort, GEM)
Val, err := json.Marshal(Data)
if err != nil {
- logger.Error("failed to Marshal")
+ logger.Error(ctx, "failed to Marshal")
return err
}
if err = RsrcMgr.KVStore.Put(ctx, IntfGEMPortPath, Val); err != nil {
- logger.Errorf("Failed to update resource %s", IntfGEMPortPath)
+ logger.Errorf(ctx, "Failed to update resource %s", IntfGEMPortPath)
return err
}
}
@@ -663,7 +664,7 @@
IntfGEMPortPath := fmt.Sprintf("%d,%d", PonPort, GemPort)
err := RsrcMgr.KVStore.Delete(ctx, IntfGEMPortPath)
if err != nil {
- logger.Errorf("Failed to Remove Gem port-Pon port to onu map on kv store. Gem %d PonPort %d", GemPort, PonPort)
+ logger.Errorf(ctx, "Failed to Remove Gem port-Pon port to onu map on kv store. Gem %d PonPort %d", GemPort, PonPort)
}
}
@@ -690,7 +691,7 @@
GEMPortList, err = RsrcMgr.ResourceMgrs[ponPort].GetResourceID(ctx, ponPort,
ponrmgr.GEMPORT_ID, NumOfPorts)
if err != nil && GEMPortList == nil {
- logger.Errorf("Failed to get gem port id for %s", IntfOnuIDUniID)
+ logger.Errorf(ctx, "Failed to get gem port id for %s", IntfOnuIDUniID)
return nil, err
}
@@ -699,7 +700,7 @@
err = RsrcMgr.ResourceMgrs[ponPort].UpdateGEMPortIDsForOnu(ctx, IntfOnuIDUniID,
GEMPortList)
if err != nil {
- logger.Errorf("Failed to update GEM ports to kv store for %s", IntfOnuIDUniID)
+ logger.Errorf(ctx, "Failed to update GEM ports to kv store for %s", IntfOnuIDUniID)
return nil, err
}
_ = RsrcMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, GEMPortList, ponPort,
@@ -746,7 +747,7 @@
IntfONUID = fmt.Sprintf("%d,%d,%d", IntfID, onuID, uniID)
err = RsrcMgr.ResourceMgrs[IntfID].UpdateFlowIDForOnu(ctx, IntfONUID, FlowID, false)
if err != nil {
- logger.Errorw("Failed to Update flow id for", log.Fields{"intf": IntfONUID})
+ logger.Errorw(ctx, "Failed to Update flow id for", log.Fields{"intf": IntfONUID})
}
RsrcMgr.ResourceMgrs[IntfID].RemoveFlowIDInfo(ctx, IntfONUID, FlowID)
@@ -767,7 +768,7 @@
IntfOnuIDUniID = fmt.Sprintf("%d,%d,%d", IntfID, onuID, uniID)
err = RsrcMgr.ResourceMgrs[IntfID].UpdateFlowIDForOnu(ctx, IntfOnuIDUniID, flow, false)
if err != nil {
- logger.Errorw("Failed to Update flow id for", log.Fields{"intf": IntfOnuIDUniID})
+ logger.Errorw(ctx, "Failed to Update flow id for", log.Fields{"intf": IntfOnuIDUniID})
}
RsrcMgr.ResourceMgrs[IntfID].RemoveFlowIDInfo(ctx, IntfOnuIDUniID, flow)
}
@@ -843,14 +844,14 @@
FlowPath := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
FlowIDs := RsrcMgr.ResourceMgrs[ponIntfID].GetCurrentFlowIDsForOnu(ctx, FlowPath)
if FlowIDs != nil {
- logger.Debugw("Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "onuID": onuID, "uniID": uniID, "KVpath": FlowPath})
+ logger.Debugw(ctx, "Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "onuID": onuID, "uniID": uniID, "KVpath": FlowPath})
for _, flowID := range FlowIDs {
FlowInfo := RsrcMgr.GetFlowIDInfo(ctx, ponIntfID, int32(onuID), int32(uniID), uint32(flowID))
if FlowInfo != nil {
- logger.Debugw("Found flows", log.Fields{"flows": *FlowInfo, "flowId": flowID})
+ logger.Debugw(ctx, "Found flows", log.Fields{"flows": *FlowInfo, "flowId": flowID})
for _, Info := range *FlowInfo {
if Info.FlowStoreCookie == flowStoreCookie {
- logger.Debug("Found flow matching with flowStore cookie", log.Fields{"flowId": flowID, "flowStoreCookie": flowStoreCookie})
+ logger.Debug(ctx, "Found flow matching with flowStore cookie", log.Fields{"flowId": flowID, "flowStoreCookie": flowStoreCookie})
return true
}
}
@@ -870,18 +871,18 @@
if Value != nil {
Val, err := kvstore.ToByte(Value.Value)
if err != nil {
- logger.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+ logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": err})
return Data
}
if err = json.Unmarshal(Val, &Data); err != nil {
- logger.Error("Failed to unmarshal", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to unmarshal", log.Fields{"error": err})
return Data
}
}
} else {
- logger.Errorf("Failed to get TP id from kvstore for path %s", Path)
+ logger.Errorf(ctx, "Failed to get TP id from kvstore for path %s", Path)
}
- logger.Debugf("Getting TP id %d from path %s", Data, Path)
+ logger.Debugf(ctx, "Getting TP id %d from path %s", Data, Path)
return Data
}
@@ -891,7 +892,7 @@
func (RsrcMgr *OpenOltResourceMgr) RemoveTechProfileIDsForOnu(ctx context.Context, IntfID uint32, OnuID uint32, UniID uint32) error {
IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
if err := RsrcMgr.KVStore.Delete(ctx, IntfOnuUniID); err != nil {
- logger.Errorw("Failed to delete techprofile id resource in KV store", log.Fields{"path": IntfOnuUniID})
+ logger.Errorw(ctx, "Failed to delete techprofile id resource in KV store", log.Fields{"path": IntfOnuUniID})
return err
}
return nil
@@ -909,11 +910,11 @@
IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
Value, err := json.Marshal(tpIDList)
if err != nil {
- logger.Error("failed to Marshal")
+ logger.Error(ctx, "failed to Marshal")
return err
}
if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
- logger.Errorf("Failed to update resource %s", IntfOnuUniID)
+ logger.Errorf(ctx, "Failed to update resource %s", IntfOnuUniID)
return err
}
return err
@@ -931,19 +932,19 @@
tpIDList := RsrcMgr.GetTechProfileIDForOnu(ctx, IntfID, OnuID, UniID)
for _, value := range tpIDList {
if value == TpID {
- logger.Debugf("TpID %d is already in tpIdList for the path %s", TpID, IntfOnuUniID)
+ logger.Debugf(ctx, "TpID %d is already in tpIdList for the path %s", TpID, IntfOnuUniID)
return err
}
}
- logger.Debugf("updating tp id %d on path %s", TpID, IntfOnuUniID)
+ logger.Debugf(ctx, "updating tp id %d on path %s", TpID, IntfOnuUniID)
tpIDList = append(tpIDList, TpID)
Value, err = json.Marshal(tpIDList)
if err != nil {
- logger.Error("failed to Marshal")
+ logger.Error(ctx, "failed to Marshal")
return err
}
if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
- logger.Errorf("Failed to update resource %s", IntfOnuUniID)
+ logger.Errorf(ctx, "Failed to update resource %s", IntfOnuUniID)
return err
}
return err
@@ -959,11 +960,11 @@
IntfOnuUniID := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
Value, err = json.Marshal(*MeterConfig)
if err != nil {
- logger.Error("failed to Marshal meter config")
+ logger.Error(ctx, "failed to Marshal meter config")
return err
}
if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
- logger.Errorf("Failed to store meter into KV store %s", IntfOnuUniID)
+ logger.Errorf(ctx, "Failed to store meter into KV store %s", IntfOnuUniID)
return err
}
return err
@@ -978,22 +979,22 @@
Value, err := RsrcMgr.KVStore.Get(ctx, Path)
if err == nil {
if Value != nil {
- logger.Debug("Found meter in KV store", log.Fields{"Direction": Direction})
+ logger.Debug(ctx, "Found meter in KV store", log.Fields{"Direction": Direction})
Val, er := kvstore.ToByte(Value.Value)
if er != nil {
- logger.Errorw("Failed to convert into byte array", log.Fields{"error": er})
+ logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": er})
return nil, er
}
if er = json.Unmarshal(Val, &meterConfig); er != nil {
- logger.Error("Failed to unmarshal meterconfig", log.Fields{"error": er})
+ logger.Error(ctx, "Failed to unmarshal meterconfig", log.Fields{"error": er})
return nil, er
}
} else {
- logger.Debug("meter-does-not-exists-in-KVStore")
+ logger.Debug(ctx, "meter-does-not-exists-in-KVStore")
return nil, err
}
} else {
- logger.Errorf("Failed to get Meter config from kvstore for path %s", Path)
+ logger.Errorf(ctx, "Failed to get Meter config from kvstore for path %s", Path)
}
return &meterConfig, err
@@ -1005,18 +1006,18 @@
UniID uint32, TpID uint32) error {
Path := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
if err := RsrcMgr.KVStore.Delete(ctx, Path); err != nil {
- logger.Errorf("Failed to delete meter id %s from kvstore ", Path)
+ logger.Errorf(ctx, "Failed to delete meter id %s from kvstore ", Path)
return err
}
return nil
}
-func getFlowIDFromFlowInfo(FlowInfo *[]FlowInfo, flowID, gemportID uint32, flowStoreCookie uint64, flowCategory string,
+func getFlowIDFromFlowInfo(ctx context.Context, FlowInfo *[]FlowInfo, flowID, gemportID uint32, flowStoreCookie uint64, flowCategory string,
vlanVid uint32, vlanPcp ...uint32) error {
if FlowInfo != nil {
for _, Info := range *FlowInfo {
if int32(gemportID) == Info.Flow.GemportId && flowCategory != "" && Info.FlowCategory == flowCategory {
- logger.Debug("Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
+ logger.Debug(ctx, "Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
if Info.FlowCategory == "HSIA_FLOW" {
if err := checkVlanAndPbitEqualityForFlows(vlanVid, Info, vlanPcp[0]); err == nil {
return nil
@@ -1025,13 +1026,13 @@
}
if int32(gemportID) == Info.Flow.GemportId && flowStoreCookie != 0 && Info.FlowStoreCookie == flowStoreCookie {
if flowCategory != "" && Info.FlowCategory == flowCategory {
- logger.Debug("Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
+ logger.Debug(ctx, "Found flow matching with flow category", log.Fields{"flowId": flowID, "FlowCategory": flowCategory})
return nil
}
}
}
}
- logger.Debugw("the flow can be related to a different service", log.Fields{"flow_info": FlowInfo})
+ logger.Debugw(ctx, "the flow can be related to a different service", log.Fields{"flow_info": FlowInfo})
return errors.New("invalid flow-info")
}
@@ -1068,11 +1069,11 @@
var err error
if err = RsrcMgr.ResourceMgrs[intfID].GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
- logger.Errorf("failed to get onuifo for intfid %d", intfID)
+ logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
return err
}
if len(onuGemData) == 0 {
- logger.Errorw("failed to ger Onuid info ", log.Fields{"intfid": intfID, "onuid": onuID})
+ logger.Errorw(ctx, "failed to ger Onuid info ", log.Fields{"intfid": intfID, "onuid": onuID})
return err
}
@@ -1080,18 +1081,18 @@
if onugem.OnuID == onuID {
for _, gem := range onuGemData[idx].GemPorts {
if gem == gemPort {
- logger.Debugw("Gem already present in onugem info, skpping addition", log.Fields{"gem": gem})
+ logger.Debugw(ctx, "Gem already present in onugem info, skpping addition", log.Fields{"gem": gem})
return nil
}
}
- logger.Debugw("Added gem to onugem info", log.Fields{"gem": gemPort})
+ logger.Debugw(ctx, "Added gem to onugem info", log.Fields{"gem": gemPort})
onuGemData[idx].GemPorts = append(onuGemData[idx].GemPorts, gemPort)
break
}
}
err = RsrcMgr.ResourceMgrs[intfID].AddOnuGemInfo(ctx, intfID, onuGemData)
if err != nil {
- logger.Error("Failed to add onugem to kv store")
+ logger.Error(ctx, "Failed to add onugem to kv store")
return err
}
return err
@@ -1102,7 +1103,7 @@
var onuGemData []OnuGemInfo
if err := RsrcMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
- logger.Errorf("failed to get onuifo for intfid %d", IntfID)
+ logger.Errorf(ctx, "failed to get onuifo for intfid %d", IntfID)
return nil, err
}
@@ -1115,19 +1116,19 @@
var err error
if err = RsrcMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
- logger.Errorf("failed to get onuifo for intfid %d", IntfID)
+ logger.Errorf(ctx, "failed to get onuifo for intfid %d", IntfID)
return olterrors.NewErrPersistence("get", "OnuGemInfo", IntfID,
log.Fields{"onuGem": onuGem, "intfID": IntfID}, err)
}
onuGemData = append(onuGemData, onuGem)
err = RsrcMgr.ResourceMgrs[IntfID].AddOnuGemInfo(ctx, IntfID, onuGemData)
if err != nil {
- logger.Error("Failed to add onugem to kv store")
+ logger.Error(ctx, "Failed to add onugem to kv store")
return olterrors.NewErrPersistence("set", "OnuGemInfo", IntfID,
log.Fields{"onuGemData": onuGemData, "intfID": IntfID}, err)
}
- logger.Debugw("added onu to onugeminfo", log.Fields{"intf": IntfID, "onugem": onuGem})
+ logger.Debugw(ctx, "added onu to onugeminfo", log.Fields{"intf": IntfID, "onugem": onuGem})
return nil
}
@@ -1137,14 +1138,14 @@
var err error
if err = RsrcMgr.ResourceMgrs[intfID].GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
- logger.Errorf("failed to get onuifo for intfid %d", intfID)
+ logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
return
}
for idx, onu := range onuGemData {
if onu.OnuID == onuID {
for _, uni := range onu.UniPorts {
if uni == portNo {
- logger.Debugw("uni already present in onugem info", log.Fields{"uni": portNo})
+ logger.Debugw(ctx, "uni already present in onugem info", log.Fields{"uni": portNo})
return
}
}
@@ -1154,7 +1155,7 @@
}
err = RsrcMgr.ResourceMgrs[intfID].AddOnuGemInfo(ctx, intfID, onuGemData)
if err != nil {
- logger.Errorw("Failed to add uin port in onugem to kv store", log.Fields{"uni": portNo})
+ logger.Errorw(ctx, "Failed to add uin port in onugem to kv store", log.Fields{"uni": portNo})
return
}
return
@@ -1166,14 +1167,14 @@
path := fmt.Sprintf(OnuPacketINPath, pktIn.IntfID, pktIn.OnuID, pktIn.LogicalPort)
Value, err := json.Marshal(gemPort)
if err != nil {
- logger.Error("Failed to marshal data")
+ logger.Error(ctx, "Failed to marshal data")
return
}
if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
- logger.Errorw("Failed to put to kvstore", log.Fields{"path": path, "value": gemPort})
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"path": path, "value": gemPort})
return
}
- logger.Debugw("added gem packet in successfully", log.Fields{"path": path, "gem": gemPort})
+ logger.Debugw(ctx, "added gem packet in successfully", log.Fields{"path": path, "gem": gemPort})
return
}
@@ -1188,22 +1189,22 @@
value, err := RsrcMgr.KVStore.Get(ctx, path)
if err != nil {
- logger.Errorw("Failed to get from kv store", log.Fields{"path": path})
+ logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
return uint32(0), err
} else if value == nil {
- logger.Debugw("No pkt in gem found", log.Fields{"path": path})
+ logger.Debugw(ctx, "No pkt in gem found", log.Fields{"path": path})
return uint32(0), nil
}
if Val, err = kvstore.ToByte(value.Value); err != nil {
- logger.Error("Failed to convert to byte array")
+ logger.Error(ctx, "Failed to convert to byte array")
return uint32(0), err
}
if err = json.Unmarshal(Val, &gemPort); err != nil {
- logger.Error("Failed to unmarshall")
+ logger.Error(ctx, "Failed to unmarshall")
return uint32(0), err
}
- logger.Debugw("found packein gemport from path", log.Fields{"path": path, "gem": gemPort})
+ logger.Debugw(ctx, "found packein gemport from path", log.Fields{"path": path, "gem": gemPort})
return gemPort, nil
}
@@ -1213,7 +1214,7 @@
path := fmt.Sprintf(OnuPacketINPath, intfID, onuID, logicalPort)
if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorf("Falied to remove resource %s", path)
+ logger.Errorf(ctx, "Falied to remove resource %s", path)
return err
}
return nil
@@ -1222,7 +1223,7 @@
// DelOnuGemInfoForIntf deletes the onugem info from kvstore per interface
func (RsrcMgr *OpenOltResourceMgr) DelOnuGemInfoForIntf(ctx context.Context, intfID uint32) error {
if err := RsrcMgr.ResourceMgrs[intfID].DelOnuGemInfoForIntf(ctx, intfID); err != nil {
- logger.Errorw("failed to delete onu gem info for", log.Fields{"intfid": intfID})
+ logger.Errorw(ctx, "failed to delete onu gem info for", log.Fields{"intfid": intfID})
return err
}
return nil
@@ -1237,16 +1238,16 @@
path := fmt.Sprintf(NnniIntfID)
value, err := RsrcMgr.KVStore.Get(ctx, path)
if err != nil {
- logger.Error("failed to get data from kv store")
+ logger.Error(ctx, "failed to get data from kv store")
return nil, err
}
if value != nil {
if Val, err = kvstore.ToByte(value.Value); err != nil {
- logger.Error("Failed to convert to byte array")
+ logger.Error(ctx, "Failed to convert to byte array")
return nil, err
}
if err = json.Unmarshal(Val, &nni); err != nil {
- logger.Error("Failed to unmarshall")
+ logger.Error(ctx, "Failed to unmarshall")
return nil, err
}
}
@@ -1259,7 +1260,7 @@
nni, err := RsrcMgr.GetNNIFromKVStore(ctx)
if err != nil {
- logger.Error("failed to fetch nni interfaces from kv store")
+ logger.Error(ctx, "failed to fetch nni interfaces from kv store")
return err
}
@@ -1267,13 +1268,13 @@
nni = append(nni, nniIntf)
Value, err = json.Marshal(nni)
if err != nil {
- logger.Error("Failed to marshal data")
+ logger.Error(ctx, "Failed to marshal data")
}
if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
- logger.Errorw("Failed to put to kvstore", log.Fields{"path": path, "value": Value})
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"path": path, "value": Value})
return err
}
- logger.Debugw("added nni to kv successfully", log.Fields{"path": path, "nni": nniIntf})
+ logger.Debugw(ctx, "added nni to kv successfully", log.Fields{"path": path, "nni": nniIntf})
return nil
}
@@ -1283,7 +1284,7 @@
path := fmt.Sprintf(NnniIntfID)
if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorw("Failed to delete nni interfaces from kv store", log.Fields{"path": path})
+ logger.Errorw(ctx, "Failed to delete nni interfaces from kv store", log.Fields{"path": path})
return err
}
return nil
@@ -1296,7 +1297,7 @@
flowsForGem, err := RsrcMgr.GetFlowIDsGemMapForInterface(ctx, intf)
if err != nil {
- logger.Error("Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
+ logger.Error(ctx, "Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
return err
}
if flowsForGem == nil {
@@ -1305,17 +1306,17 @@
flowsForGem[gem] = flowIDs
val, err = json.Marshal(flowsForGem)
if err != nil {
- logger.Error("Failed to marshal data", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to marshal data", log.Fields{"error": err})
return err
}
RsrcMgr.flowIDToGemInfoLock.Lock()
defer RsrcMgr.flowIDToGemInfoLock.Unlock()
if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
- logger.Errorw("Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
return err
}
- logger.Debugw("added flowid list for gem to kv successfully", log.Fields{"path": path, "flowidlist": flowsForGem[gem]})
+ logger.Debugw(ctx, "added flowid list for gem to kv successfully", log.Fields{"path": path, "flowidlist": flowsForGem[gem]})
return nil
}
@@ -1326,11 +1327,11 @@
flowsForGem, err := RsrcMgr.GetFlowIDsGemMapForInterface(ctx, intf)
if err != nil {
- logger.Error("Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
+ logger.Error(ctx, "Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
return
}
if flowsForGem == nil {
- logger.Error("No flowids found ", log.Fields{"intf": intf, "gemport": gem})
+ logger.Error(ctx, "No flowids found ", log.Fields{"intf": intf, "gemport": gem})
return
}
// once we get the flows per gem map from kv , just delete the gem entry from the map
@@ -1338,14 +1339,14 @@
// once gem entry is deleted update the kv store.
val, err = json.Marshal(flowsForGem)
if err != nil {
- logger.Error("Failed to marshal data", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to marshal data", log.Fields{"error": err})
return
}
RsrcMgr.flowIDToGemInfoLock.Lock()
defer RsrcMgr.flowIDToGemInfoLock.Unlock()
if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
- logger.Errorw("Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
return
}
return
@@ -1360,16 +1361,16 @@
value, err := RsrcMgr.KVStore.Get(ctx, path)
RsrcMgr.flowIDToGemInfoLock.RUnlock()
if err != nil {
- logger.Error("failed to get data from kv store")
+ logger.Error(ctx, "failed to get data from kv store")
return nil, err
}
if value != nil && value.Value != nil {
if val, err = kvstore.ToByte(value.Value); err != nil {
- logger.Error("Failed to convert to byte array ", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"error": err})
return nil, err
}
if err = json.Unmarshal(val, &flowsForGem); err != nil {
- logger.Error("Failed to unmarshall", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to unmarshall", log.Fields{"error": err})
return nil, err
}
}
@@ -1382,7 +1383,7 @@
RsrcMgr.flowIDToGemInfoLock.Lock()
defer RsrcMgr.flowIDToGemInfoLock.Unlock()
if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorw("Failed to delete nni interfaces from kv store", log.Fields{"path": path})
+ logger.Errorw(ctx, "Failed to delete nni interfaces from kv store", log.Fields{"path": path})
return
}
return
@@ -1402,16 +1403,16 @@
kvPair, err := RsrcMgr.KVStore.Get(ctx, path)
if err != nil {
- logger.Error("failed to get data from kv store")
+ logger.Error(ctx, "failed to get data from kv store")
return nil, err
}
if kvPair != nil && kvPair.Value != nil {
if val, err = kvstore.ToByte(kvPair.Value); err != nil {
- logger.Error("Failed to convert to byte array ", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"error": err})
return nil, err
}
if err = json.Unmarshal(val, &mcastQueueToIntfMap); err != nil {
- logger.Error("Failed to unmarshall ", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to unmarshall ", log.Fields{"error": err})
return nil, err
}
}
@@ -1425,7 +1426,7 @@
mcastQueues, err := RsrcMgr.GetMcastQueuePerInterfaceMap(ctx)
if err != nil {
- logger.Errorw("Failed to get multicast queue info for interface", log.Fields{"error": err, "intf": intf})
+ logger.Errorw(ctx, "Failed to get multicast queue info for interface", log.Fields{"error": err, "intf": intf})
return err
}
if mcastQueues == nil {
@@ -1433,14 +1434,14 @@
}
mcastQueues[intf] = []uint32{gem, servicePriority}
if val, err = json.Marshal(mcastQueues); err != nil {
- logger.Errorw("Failed to marshal data", log.Fields{"error": err})
+ logger.Errorw(ctx, "Failed to marshal data", log.Fields{"error": err})
return err
}
if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
- logger.Errorw("Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
return err
}
- logger.Debugw("added multicast queue info to KV store successfully", log.Fields{"path": path, "mcastQueueInfo": mcastQueues[intf], "interfaceId": intf})
+ logger.Debugw(ctx, "added multicast queue info to KV store successfully", log.Fields{"path": path, "mcastQueueInfo": mcastQueues[intf], "interfaceId": intf})
return nil
}
@@ -1471,12 +1472,12 @@
Value, err = json.Marshal(groupInfo)
if err != nil {
- logger.Error("failed to Marshal flow group object")
+ logger.Error(ctx, "failed to Marshal flow group object")
return err
}
if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
- logger.Errorf("Failed to update resource %s", path)
+ logger.Errorf(ctx, "Failed to update resource %s", path)
return err
}
return nil
@@ -1491,7 +1492,7 @@
path = fmt.Sprintf(FlowGroup, groupID)
}
if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorf("Failed to remove resource %s due to %s", path, err)
+ logger.Errorf(ctx, "Failed to remove resource %s due to %s", path, err)
return false
}
return true
@@ -1515,11 +1516,11 @@
if kvPair != nil && kvPair.Value != nil {
Val, err := kvstore.ToByte(kvPair.Value)
if err != nil {
- logger.Errorw("Failed to convert flow group into byte array", log.Fields{"error": err})
+ logger.Errorw(ctx, "Failed to convert flow group into byte array", log.Fields{"error": err})
return false, groupInfo, err
}
if err = json.Unmarshal(Val, &groupInfo); err != nil {
- logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
+ logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
return false, groupInfo, err
}
return true, groupInfo, nil
diff --git a/internal/pkg/resourcemanager/resourcemanager_test.go b/internal/pkg/resourcemanager/resourcemanager_test.go
index 5b2ee29..817825d 100644
--- a/internal/pkg/resourcemanager/resourcemanager_test.go
+++ b/internal/pkg/resourcemanager/resourcemanager_test.go
@@ -127,7 +127,7 @@
// Get mock function implementation for KVClient
func (kvclient *MockResKVClient) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
- logger.Debugw("Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
+ logger.Debugw(ctx, "Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
if key != "" {
if strings.Contains(key, MeterConfig) {
var bands []*ofp.OfpMeterBandHeader
@@ -148,7 +148,7 @@
return nil, errors.New("invalid meter")
}
if strings.Contains(key, FlowIDpool) || strings.Contains(key, GemportIDPool) || strings.Contains(key, AllocIDPool) {
- logger.Debug("Error Error Error Key:", FlowIDpool, GemportIDPool, AllocIDPool)
+ logger.Debug(ctx, "Error Error Error Key:", FlowIDpool, GemportIDPool, AllocIDPool)
data := make(map[string]interface{})
data["pool"] = "1024"
data["start_idx"] = 1
@@ -157,17 +157,17 @@
return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
if strings.Contains(key, FlowIDInfo) || strings.Contains(key, FlowIDs) {
- logger.Debug("Error Error Error Key:", FlowIDs, FlowIDInfo)
+ logger.Debug(ctx, "Error Error Error Key:", FlowIDs, FlowIDInfo)
str, _ := json.Marshal([]uint32{1, 2})
return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
if strings.Contains(key, AllocIDs) || strings.Contains(key, GemportIDs) {
- logger.Debug("Error Error Error Key:", AllocIDs, GemportIDs)
+ logger.Debug(ctx, "Error Error Error Key:", AllocIDs, GemportIDs)
str, _ := json.Marshal(1)
return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
if strings.Contains(key, McastQueuesForIntf) {
- logger.Debug("Error Error Error Key:", McastQueuesForIntf)
+ logger.Debug(ctx, "Error Error Error Key:", McastQueuesForIntf)
mcastQueues := make(map[uint32][]uint32)
mcastQueues[10] = []uint32{4000, 0}
str, _ := json.Marshal(mcastQueues)
@@ -240,11 +240,11 @@
}
// CloseWatch mock function implementation for KVClient
-func (kvclient *MockResKVClient) CloseWatch(key string, ch chan *kvstore.Event) {
+func (kvclient *MockResKVClient) CloseWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
}
// Close mock function implementation for KVClient
-func (kvclient *MockResKVClient) Close() {
+func (kvclient *MockResKVClient) Close(ctx context.Context) {
}
// testResMgrObject maps fields type to OpenOltResourceMgr type.
@@ -951,7 +951,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := SetKVClient(tt.args.backend, tt.args.address, tt.args.DeviceID); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+ if got := SetKVClient(context.Background(), tt.args.backend, tt.args.address, tt.args.DeviceID); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
t.Errorf("SetKVClient() = %v, want %v", got, tt.want)
}
})
@@ -1018,7 +1018,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- err := getFlowIDFromFlowInfo(tt.args.FlowInfo, tt.args.flowID, tt.args.gemportID, tt.args.flowStoreCookie, tt.args.flowCategory, tt.args.vlanVid, tt.args.vlanPcp...)
+ err := getFlowIDFromFlowInfo(context.Background(), tt.args.FlowInfo, tt.args.flowID, tt.args.gemportID, tt.args.flowStoreCookie, tt.args.flowCategory, tt.args.vlanVid, tt.args.vlanPcp...)
if reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) && err != nil {
t.Errorf("getFlowIDFromFlowInfo() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -1046,7 +1046,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := newKVClient(tt.args.storeType, tt.args.address, tt.args.timeout)
+ got, err := newKVClient(context.Background(), tt.args.storeType, tt.args.address, tt.args.timeout)
if got != nil && reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
t.Errorf("newKVClient() got = %v, want %v", got, tt.want)
}