VOL-3822: Support for EthernetFramePerformanceMonitoringHistoryData PM counters
VOL-3826: Support for EthernetPerformanceMonitoringHistoryData PM counters
Change-Id: Ib2d28881a8e80fee997d4e2ea6fa17dbb8d50955
diff --git a/VERSION b/VERSION
index 23aa839..0495c4a 100755
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.2.2
+1.2.3
diff --git a/internal/pkg/onuadaptercore/device_handler.go b/internal/pkg/onuadaptercore/device_handler.go
index c014ae4..7d2d226 100644
--- a/internal/pkg/onuadaptercore/device_handler.go
+++ b/internal/pkg/onuadaptercore/device_handler.go
@@ -101,6 +101,7 @@
cUniUnLockFsm
cAniConfigFsm
cUniVlanConfigFsm
+ cL2PmFsm
)
type idleCheckStruct struct {
@@ -115,6 +116,7 @@
cUniUnLockFsm: {(*deviceHandler).devUniUnlockFsmInIdleState, cUniFsmIdleState},
cAniConfigFsm: {(*deviceHandler).devAniConfigFsmInIdleState, cAniFsmIdleState},
cUniVlanConfigFsm: {(*deviceHandler).devUniVlanConfigFsmInIdleState, cVlanFsmIdleState},
+ cL2PmFsm: {(*deviceHandler).l2PmFsmInIdleState, cL2PmFsmIdleState},
}
const (
@@ -275,7 +277,7 @@
// ##########################################################################################
// deviceHandler methods that implement the adapters interface requests ##### begin #########
-//adoptOrReconcileDevice adopts the OLT device
+//adoptOrReconcileDevice adopts the ONU device
func (dh *deviceHandler) adoptOrReconcileDevice(ctx context.Context, device *voltha.Device) {
logger.Debugw(ctx, "Adopt_or_reconcile_device", log.Fields{"device-id": device.Id, "Address": device.GetHostAndPort()})
@@ -1703,6 +1705,14 @@
log.Fields{"device-id": dh.deviceID})
}
_ = dh.deviceReasonUpdate(ctx, drInitialMibDownloaded, !dh.reconciling)
+
+ // Initialize classical L2 PM Interval Counters
+ if err := dh.pOnuMetricsMgr.pAdaptFsm.pFsm.Event(l2PmEventInit); err != nil {
+ // There is no way we should be landing here, but if we do then
+ // there is nothing much we can do about this other than log error
+ logger.Errorw(ctx, "error starting l2 pm fsm", log.Fields{"device-id": dh.device.Id, "err": err})
+ }
+
dh.ReadyForSpecificOmciConfig = true
// *** should generate UniUnlockStateDone event *****
if dh.pUnlockStateFsm == nil {
@@ -2685,7 +2695,20 @@
case <-dh.stopCollector:
dh.setCollectorIsRunning(false)
logger.Debugw(ctx, "stopping-collector-for-onu", log.Fields{"device-id": dh.device.Id})
+ // Stop the L2 PM FSM
+ go func() {
+ if dh.pOnuMetricsMgr.pAdaptFsm != nil && dh.pOnuMetricsMgr.pAdaptFsm.pFsm != nil {
+ if err := dh.pOnuMetricsMgr.pAdaptFsm.pFsm.Event(l2PmEventStop); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": dh.deviceID, "err": err})
+ }
+ } else {
+ logger.Errorw(ctx, "metrics manager fsm not initialized", log.Fields{"device-id": dh.deviceID})
+ }
+ }()
+
dh.pOnuMetricsMgr.stopProcessingOmciResponses <- true // Stop the OMCI GET response processing routine
+ dh.pOnuMetricsMgr.stopTicks <- true
+
return
case <-time.After(time.Duration(FrequencyGranularity) * time.Second): // Check every FrequencyGranularity to see if it is time for collecting metrics
if !dh.pmConfigs.FreqOverride { // If FreqOverride is false, then nextGlobalMetricCollectionTime applies
@@ -2702,7 +2725,8 @@
for n, g := range dh.pOnuMetricsMgr.groupMetricMap {
// If the group is enabled AND (current time is equal to OR after nextCollectionInterval, collect the group metric)
- if g.enabled && (time.Now().Equal(g.nextCollectionInterval) || time.Now().After(g.nextCollectionInterval)) {
+ // Since the L2 PM counters are collected in a separate FSM, we should avoid those counters in the check.
+ if g.enabled && !g.isL2PMCounter && (time.Now().Equal(g.nextCollectionInterval) || time.Now().After(g.nextCollectionInterval)) {
go dh.pOnuMetricsMgr.collectGroupMetric(ctx, n)
}
}
@@ -2718,7 +2742,8 @@
dh.pOnuMetricsMgr.onuMetricsManagerLock.Lock() // Lock as we are writing the next metric collection time
for _, g := range dh.pOnuMetricsMgr.groupMetricMap {
// If group enabled, and the nextCollectionInterval is old (before or equal to current time), update the next collection time stamp
- if g.enabled && (g.nextCollectionInterval.Before(time.Now()) || g.nextCollectionInterval.Equal(time.Now())) {
+ // Since the L2 PM counters are collected and managed in a separate FSM, we should avoid those counters in the check.
+ if g.enabled && !g.isL2PMCounter && (g.nextCollectionInterval.Before(time.Now()) || g.nextCollectionInterval.Equal(time.Now())) {
g.nextCollectionInterval = time.Now().Add(time.Duration(g.frequency) * time.Second)
}
}
@@ -2799,6 +2824,14 @@
return false
}
+func (dh *deviceHandler) l2PmFsmInIdleState(ctx context.Context, idleState string) bool {
+ if dh.pOnuMetricsMgr != nil && dh.pOnuMetricsMgr.pAdaptFsm != nil && dh.pOnuMetricsMgr.pAdaptFsm.pFsm != nil {
+ return dh.isFsmInState(ctx, dh.pOnuMetricsMgr.pAdaptFsm.pFsm, idleState)
+ }
+ logger.Warnw(ctx, "L2 PM FSM not defined!", log.Fields{"device-id": dh.deviceID})
+ return false
+}
+
func (dh *deviceHandler) allButCallingFsmInIdleState(ctx context.Context, callingFsm usedOmciConfigFsms) bool {
for fsmName, fsmStruct := range fsmIdleStateFuncMap {
if fsmName != callingFsm && !fsmStruct.idleCheckFunc(dh, ctx, fsmStruct.idleState) {
diff --git a/internal/pkg/onuadaptercore/omci_cc.go b/internal/pkg/onuadaptercore/omci_cc.go
index f916180..73b4901 100644
--- a/internal/pkg/onuadaptercore/omci_cc.go
+++ b/internal/pkg/onuadaptercore/omci_cc.go
@@ -26,6 +26,7 @@
"fmt"
"strconv"
"sync"
+ "time"
//"time"
@@ -2186,6 +2187,157 @@
return nil
}
+func (oo *omciCC) sendSyncTime(ctx context.Context, timeout int, highPrio bool, rxChan chan Message) error {
+ tid := oo.getNextTid(highPrio)
+ logger.Debugw(ctx, "send synchronize time request:", log.Fields{"device-id": oo.deviceID,
+ "SequNo": strconv.FormatInt(int64(tid), 16)})
+
+ omciLayer := &omci.OMCI{
+ TransactionID: tid,
+ MessageType: omci.SynchronizeTimeRequestType,
+ // DeviceIdentifier: omci.BaselineIdent, // Optional, defaults to Baseline
+ // Length: 0x28, // Optional, defaults to 40 octets
+ }
+ utcTime := time.Now().UTC()
+ request := &omci.SynchronizeTimeRequest{
+ MeBasePacket: omci.MeBasePacket{
+ EntityClass: me.OnuGClassID,
+ // Default Instance ID is 0
+ },
+ Year: uint16(utcTime.Year()),
+ Month: uint8(utcTime.Month()),
+ Day: uint8(utcTime.Day()),
+ Hour: uint8(utcTime.Hour()),
+ Minute: uint8(utcTime.Minute()),
+ Second: uint8(utcTime.Second()),
+ }
+
+ pkt, err := serializeOmciLayer(ctx, omciLayer, request)
+ if err != nil {
+ logger.Errorw(ctx, "Cannot serialize synchronize time request", log.Fields{"Err": err,
+ "device-id": oo.deviceID})
+ return err
+ }
+
+ omciRxCallbackPair := callbackPair{cbKey: tid,
+ cbEntry: callbackPairEntry{rxChan, oo.receiveOmciResponse},
+ }
+ err = oo.send(ctx, pkt, timeout, 0, highPrio, omciRxCallbackPair)
+ if err != nil {
+ logger.Errorw(ctx, "Cannot send synchronize time request", log.Fields{"Err": err,
+ "device-id": oo.deviceID})
+ return err
+ }
+ logger.Debug(ctx, "send synchronize time request done")
+ return nil
+}
+
+func (oo *omciCC) sendCreateOrDeleteEthernetPerformanceMonitoringHistoryME(ctx context.Context, timeout int, highPrio bool,
+ upstream bool, create bool, rxChan chan Message, entityID uint16) *me.ManagedEntity {
+ tid := oo.getNextTid(highPrio)
+ logger.Debugw(ctx, "send ethernet-performance-monitoring-history-me-msg:", log.Fields{"device-id": oo.deviceID,
+ "SequNo": strconv.FormatInt(int64(tid), 16), "InstId": strconv.FormatInt(int64(entityID), 16), "create": create, "upstream": upstream})
+ meParam := me.ParamData{EntityID: entityID}
+ var meInstance *me.ManagedEntity
+ var omciErr me.OmciErrors
+ if upstream {
+ meInstance, omciErr = me.NewEthernetFramePerformanceMonitoringHistoryDataUpstream(meParam)
+ } else {
+ meInstance, omciErr = me.NewEthernetFramePerformanceMonitoringHistoryDataDownstream(meParam)
+ }
+ if omciErr.GetError() == nil {
+ var omciLayer *omci.OMCI
+ var msgLayer gopacket.SerializableLayer
+ var err error
+ if create {
+ omciLayer, msgLayer, err = omci.EncodeFrame(meInstance, omci.CreateRequestType, omci.TransactionID(tid),
+ omci.AddDefaults(true))
+ } else {
+ omciLayer, msgLayer, err = omci.EncodeFrame(meInstance, omci.DeleteRequestType, omci.TransactionID(tid),
+ omci.AddDefaults(true))
+ }
+ if err != nil {
+ logger.Errorw(ctx, "Cannot encode ethernet frame performance monitoring history data ME",
+ log.Fields{"Err": err, "device-id": oo.deviceID, "upstream": upstream, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+ }
+
+ pkt, err := serializeOmciLayer(ctx, omciLayer, msgLayer)
+ if err != nil {
+ logger.Errorw(ctx, "Cannot serialize ethernet frame performance monitoring history data ME",
+ log.Fields{"Err": err, "device-id": oo.deviceID, "upstream": upstream, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+ }
+
+ omciRxCallbackPair := callbackPair{cbKey: tid,
+ cbEntry: callbackPairEntry{rxChan, oo.receiveOmciResponse},
+ }
+ err = oo.send(ctx, pkt, timeout, 0, highPrio, omciRxCallbackPair)
+ if err != nil {
+ logger.Errorw(ctx, "Cannot send ethernet frame performance monitoring history data ME",
+ log.Fields{"Err": err, "device-id": oo.deviceID, "upstream": upstream, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+ }
+ logger.Debugw(ctx, "send ethernet frame performance monitoring history data ME done",
+ log.Fields{"device-id": oo.deviceID, "upstream": upstream, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return meInstance
+ }
+ logger.Errorw(ctx, "Cannot generate ethernet frame performance monitoring history data ME Instance",
+ log.Fields{"Err": omciErr.GetError(), "device-id": oo.deviceID, "upstream": upstream, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+}
+
+func (oo *omciCC) sendCreateOrDeleteEthernetUniHistoryME(ctx context.Context, timeout int, highPrio bool,
+ create bool, rxChan chan Message, entityID uint16) *me.ManagedEntity {
+ tid := oo.getNextTid(highPrio)
+ logger.Debugw(ctx, "send ethernet-uni-history-me-msg:", log.Fields{"device-id": oo.deviceID,
+ "SequNo": strconv.FormatInt(int64(tid), 16), "InstId": strconv.FormatInt(int64(entityID), 16), "create": create})
+ meParam := me.ParamData{EntityID: entityID}
+ var meInstance *me.ManagedEntity
+ var omciErr me.OmciErrors
+ meInstance, omciErr = me.NewEthernetPerformanceMonitoringHistoryData(meParam)
+
+ if omciErr.GetError() == nil {
+ var omciLayer *omci.OMCI
+ var msgLayer gopacket.SerializableLayer
+ var err error
+ if create {
+ omciLayer, msgLayer, err = omci.EncodeFrame(meInstance, omci.CreateRequestType, omci.TransactionID(tid),
+ omci.AddDefaults(true))
+ } else {
+ omciLayer, msgLayer, err = omci.EncodeFrame(meInstance, omci.DeleteRequestType, omci.TransactionID(tid),
+ omci.AddDefaults(true))
+ }
+ if err != nil {
+ logger.Errorw(ctx, "Cannot encode ethernet uni history data ME",
+ log.Fields{"Err": err, "device-id": oo.deviceID, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+ }
+
+ pkt, err := serializeOmciLayer(ctx, omciLayer, msgLayer)
+ if err != nil {
+ logger.Errorw(ctx, "Cannot serialize ethernet uni history data ME",
+ log.Fields{"Err": err, "device-id": oo.deviceID, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+ }
+
+ omciRxCallbackPair := callbackPair{cbKey: tid,
+ cbEntry: callbackPairEntry{rxChan, oo.receiveOmciResponse},
+ }
+ err = oo.send(ctx, pkt, timeout, 0, highPrio, omciRxCallbackPair)
+ if err != nil {
+ logger.Errorw(ctx, "Cannot send ethernet uni history data ME",
+ log.Fields{"Err": err, "device-id": oo.deviceID, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+ }
+ logger.Debugw(ctx, "send ethernet uni history data ME done",
+ log.Fields{"device-id": oo.deviceID, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return meInstance
+ }
+ logger.Errorw(ctx, "Cannot generate ethernet uni history data ME Instance",
+ log.Fields{"Err": omciErr.GetError(), "device-id": oo.deviceID, "create": create, "InstId": strconv.FormatInt(int64(entityID), 16)})
+ return nil
+}
func isResponseWithMibDataSync(msgType omci.MessageType) bool {
for _, v := range responsesWithMibDataSync {
if v == msgType {
diff --git a/internal/pkg/onuadaptercore/onu_metrics_manager.go b/internal/pkg/onuadaptercore/onu_metrics_manager.go
index 68c628c..3110501 100644
--- a/internal/pkg/onuadaptercore/onu_metrics_manager.go
+++ b/internal/pkg/onuadaptercore/onu_metrics_manager.go
@@ -19,8 +19,8 @@
import (
"context"
- "errors"
"fmt"
+ "github.com/looplab/fsm"
"github.com/opencord/omci-lib-go"
me "github.com/opencord/omci-lib-go/generated"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
@@ -29,6 +29,29 @@
"time"
)
+const (
+ // events of L2 PM FSM
+ l2PmEventInit = "l2PmEventInit"
+ l2PmEventTick = "l2PmEventTick"
+ l2PmEventSuccess = "l2PmEventSuccess"
+ l2PmEventFailure = "l2PmEventFailure"
+ l2PmEventAddMe = "l2PmEventAddMe"
+ l2PmEventDeleteMe = "l2PmEventDeleteMe"
+ l2PmEventStop = "l2PmEventStop"
+)
+const (
+ // states of L2 PM FSM
+ l2PmStNull = "l2PmStNull"
+ l2PmStStarting = "l2PmStStarting"
+ l2PmStSyncTime = "l2PmStSyncTime"
+ l2PmStIdle = "l2PmStIdle"
+ l2PmStCreatePmMe = "l2PmStCreatePm"
+ l2PmStDeletePmMe = "l2PmStDeletePmMe"
+ l2PmStCollectData = "l2PmStCollectData"
+)
+
+const cL2PmFsmIdleState = l2PmStIdle
+
// general constants used for overall Metric Collection management
const (
DefaultMetricCollectionFrequency = 15 * 60 // unit in seconds. This setting can be changed from voltha NBI PmConfig configuration
@@ -46,7 +69,7 @@
// OpticalPowerGroupMetrics specific constants
const (
- OpticalPowerGroupMetricName = "OpticalPower"
+ OpticalPowerGroupMetricName = "PON_Optical"
OpticalPowerGroupMetricEnabled = true // This setting can be changed from voltha NBI PmConfig configuration
OpticalPowerMetricGroupCollectionFrequency = 5 * 60 // unit in seconds. This setting can be changed from voltha NBI PmConfig configuration
)
@@ -61,17 +84,96 @@
// UniStatusGroupMetrics specific constants
const (
- UniStatusGroupMetricName = "UniStatus"
+ UniStatusGroupMetricName = "UNI_Status"
UniStatusGroupMetricEnabled = true // This setting can be changed from voltha NBI PmConfig configuration
UniStatusMetricGroupCollectionFrequency = 5 * 60 // unit in seconds. This setting can be changed from voltha NBI PmConfig configuration
)
+// *** Classical L2 PM Counters begin ***
+
+// EthernetBridgeHistory are supported ethernet bridge history counters fetched from
+// Ethernet Frame Performance Monitoring History Data Downstream and Ethernet Frame Performance Monitoring History Data Upstream MEs.
+var EthernetBridgeHistory = map[string]voltha.PmConfig_PmType{
+ "class_id": voltha.PmConfig_CONTEXT,
+ "entity_id": voltha.PmConfig_CONTEXT,
+ "interval_end_time": voltha.PmConfig_CONTEXT,
+ "parent_class_id": voltha.PmConfig_CONTEXT,
+ "parent_entity_id": voltha.PmConfig_CONTEXT,
+ "upstream": voltha.PmConfig_CONTEXT,
+
+ "drop_events": voltha.PmConfig_COUNTER,
+ "octets": voltha.PmConfig_COUNTER,
+ "packets": voltha.PmConfig_COUNTER,
+ "broadcast_packets": voltha.PmConfig_COUNTER,
+ "multicast_packets": voltha.PmConfig_COUNTER,
+ "crc_errored_packets": voltha.PmConfig_COUNTER,
+ "undersize_packets": voltha.PmConfig_COUNTER,
+ "oversize_packets": voltha.PmConfig_COUNTER,
+ "64_octets": voltha.PmConfig_COUNTER,
+ "65_to_127_octets": voltha.PmConfig_COUNTER,
+ "128_to_255_octets": voltha.PmConfig_COUNTER,
+ "256_to_511_octets": voltha.PmConfig_COUNTER,
+ "512_to_1023_octets": voltha.PmConfig_COUNTER,
+ "1024_to_1518_octets": voltha.PmConfig_COUNTER,
+}
+
+// EthernetUniHistory are supported ethernet uni history counters fetched from
+// Ethernet Performance Monitoring History Data ME.
+var EthernetUniHistory = map[string]voltha.PmConfig_PmType{
+ "class_id": voltha.PmConfig_CONTEXT,
+ "entity_id": voltha.PmConfig_CONTEXT,
+ "interval_end_time": voltha.PmConfig_CONTEXT,
+
+ "fcs_errors": voltha.PmConfig_COUNTER,
+ "excessive_collision_counter": voltha.PmConfig_COUNTER,
+ "late_collision_counter": voltha.PmConfig_COUNTER,
+ "frames_too_long": voltha.PmConfig_COUNTER,
+ "buffer_overflows_on_rx": voltha.PmConfig_COUNTER,
+ "buffer_overflows_on_tx": voltha.PmConfig_COUNTER,
+ "single_collision_frame_counter": voltha.PmConfig_COUNTER,
+ "multiple_collisions_frame_counter": voltha.PmConfig_COUNTER,
+ "sqe_counter": voltha.PmConfig_COUNTER,
+ "deferred_tx_counter": voltha.PmConfig_COUNTER,
+ "internal_mac_tx_error_counter": voltha.PmConfig_COUNTER,
+ "carrier_sense_error_counter": voltha.PmConfig_COUNTER,
+ "alignment_error_counter": voltha.PmConfig_COUNTER,
+ "internal_mac_rx_error_counter": voltha.PmConfig_COUNTER,
+}
+
+// Constants specific for L2 PM collection
+const (
+ L2PmCollectionInterval = 15 * 60 // Unit in seconds. Do not change this as this fixed by OMCI specification for L2 PM counters
+ SyncTimeRetryInterval = 15 // Unit seconds
+ L2PmCreateAttempts = 3
+ L2PmCollectAttempts = 3
+ MaxL2PMGetPayLoadSize = 29
+)
+
+// EthernetUniHistoryName specific constants
+const (
+ EthernetBridgeHistoryName = "Ethernet_Bridge_Port_History"
+ EthernetBridgeHistoryEnabled = true // This setting can be changed from voltha NBI PmConfig configuration
+ EthernetBridgeHistoryFrequency = L2PmCollectionInterval
+)
+
+// EthernetBridgeHistory specific constants
+const (
+ EthernetUniHistoryName = "Ethernet_UNI_History"
+ EthernetUniHistoryEnabled = true // This setting can be changed from voltha NBI PmConfig configuration
+ EthernetUniHistoryFrequency = L2PmCollectionInterval
+)
+
+// *** Classical L2 PM Counters end ***
+
type groupMetric struct {
groupName string
enabled bool
frequency uint32 // valid only if FrequencyOverride is enabled.
metricMap map[string]voltha.PmConfig_PmType
nextCollectionInterval time.Time // valid only if FrequencyOverride is enabled.
+ isL2PMCounter bool // true for only L2 PM counters
+ collectAttempts uint32 // number of attempts to collect L2 PM data
+ createRetryAttempts uint32 // number of attempts to try creating the L2 PM ME
}
type standaloneMetric struct {
@@ -83,16 +185,25 @@
type onuMetricsManager struct {
pDeviceHandler *deviceHandler
+ pAdaptFsm *AdapterFsm
- commMetricsChan chan Message
- opticalMetricsChan chan me.AttributeValueMap
- uniStatusMetricsChan chan me.AttributeValueMap
+ opticalMetricsChan chan me.AttributeValueMap
+ uniStatusMetricsChan chan me.AttributeValueMap
+ l2PmChan chan me.AttributeValueMap
+ syncTimeResponseChan chan bool // true is success, false is fail
+ l2PmCreateOrDeleteResponseChan chan bool // true is success, false is fail
+
+ activeL2Pms []string // list of active l2 pm MEs created on the ONU.
+ l2PmToDelete []string // list of L2 PMs to delete
+ l2PmToAdd []string // list of L2 PM to add
groupMetricMap map[string]*groupMetric
standaloneMetricMap map[string]*standaloneMetric
stopProcessingOmciResponses chan bool
+ stopTicks chan bool
+
nextGlobalMetricCollectionTime time.Time // valid only if pmConfig.FreqOverride is set to false.
onuMetricsManagerLock sync.RWMutex
@@ -109,10 +220,16 @@
logger.Debugw(ctx, "init-onuMetricsManager", log.Fields{"device-id": dh.deviceID})
metricsManager.pDeviceHandler = dh
- metricsManager.commMetricsChan = make(chan Message)
+ commMetricsChan := make(chan Message)
metricsManager.opticalMetricsChan = make(chan me.AttributeValueMap)
metricsManager.uniStatusMetricsChan = make(chan me.AttributeValueMap)
+ metricsManager.l2PmChan = make(chan me.AttributeValueMap)
+
+ metricsManager.syncTimeResponseChan = make(chan bool)
+ metricsManager.l2PmCreateOrDeleteResponseChan = make(chan bool)
+
metricsManager.stopProcessingOmciResponses = make(chan bool)
+ metricsManager.stopTicks = make(chan bool)
metricsManager.groupMetricMap = make(map[string]*groupMetric)
metricsManager.standaloneMetricMap = make(map[string]*standaloneMetric)
@@ -154,6 +271,36 @@
}
dh.pmConfigs.Groups = append(dh.pmConfigs.Groups, &uniStatusGroupMetric)
+ // classical l2 pm counter start
+
+ // Populate ethernet bridge history group metrics
+ var ethBridgeHistoryConfigSlice []*voltha.PmConfig
+ for k, v := range EthernetBridgeHistory {
+ ethBridgeHistoryConfigSlice = append(ethBridgeHistoryConfigSlice, &voltha.PmConfig{Name: k, Type: v})
+ }
+ ethBridgeHistoryGroupMetric := voltha.PmGroupConfig{
+ GroupName: EthernetBridgeHistoryName,
+ Enabled: EthernetBridgeHistoryEnabled && dh.pOpenOnuAc.metricsEnabled,
+ GroupFreq: EthernetBridgeHistoryFrequency,
+ Metrics: ethBridgeHistoryConfigSlice,
+ }
+ dh.pmConfigs.Groups = append(dh.pmConfigs.Groups, ðBridgeHistoryGroupMetric)
+
+ // Populate ethernet bridge history group metrics
+ var ethUniHistoryConfigSlice []*voltha.PmConfig
+ for k, v := range EthernetUniHistory {
+ ethUniHistoryConfigSlice = append(ethUniHistoryConfigSlice, &voltha.PmConfig{Name: k, Type: v})
+ }
+ ethUniHistoryGroupMetric := voltha.PmGroupConfig{
+ GroupName: EthernetUniHistoryName,
+ Enabled: EthernetUniHistoryEnabled && dh.pOpenOnuAc.metricsEnabled,
+ GroupFreq: EthernetUniHistoryFrequency,
+ Metrics: ethUniHistoryConfigSlice,
+ }
+ dh.pmConfigs.Groups = append(dh.pmConfigs.Groups, ðUniHistoryGroupMetric)
+
+ // classical l2 pm counter end
+
// Add standalone metric (if present) after this (will be added to dh.pmConfigs.Metrics)
}
@@ -169,7 +316,12 @@
metricsManager.groupMetricMap[g.GroupName].metricMap = OpticalPowerGroupMetrics
case UniStatusGroupMetricName:
metricsManager.groupMetricMap[g.GroupName].metricMap = UniStatusGroupMetrics
-
+ case EthernetBridgeHistoryName:
+ metricsManager.groupMetricMap[g.GroupName].metricMap = EthernetBridgeHistory
+ metricsManager.groupMetricMap[g.GroupName].isL2PMCounter = true
+ case EthernetUniHistoryName:
+ metricsManager.groupMetricMap[g.GroupName].metricMap = EthernetUniHistory
+ metricsManager.groupMetricMap[g.GroupName].isL2PMCounter = true
default:
logger.Errorw(ctx, "unhandled-group-name", log.Fields{"groupName": g.GroupName})
}
@@ -189,6 +341,38 @@
}
}
+ metricsManager.pAdaptFsm = NewAdapterFsm("L2PmFSM", dh.deviceID, commMetricsChan)
+ if metricsManager.pAdaptFsm == nil {
+ logger.Errorw(ctx, "L2PMFsm AdapterFsm could not be instantiated!!", log.Fields{
+ "device-id": dh.deviceID})
+ return nil
+ }
+ // L2 PM FSM related state machine
+ metricsManager.pAdaptFsm.pFsm = fsm.NewFSM(
+ l2PmStNull,
+ fsm.Events{
+ {Name: l2PmEventInit, Src: []string{l2PmStNull}, Dst: l2PmStStarting},
+ {Name: l2PmEventTick, Src: []string{l2PmStStarting}, Dst: l2PmStSyncTime},
+ {Name: l2PmEventTick, Src: []string{l2PmStIdle, l2PmEventDeleteMe, l2PmEventAddMe}, Dst: l2PmStCollectData},
+ {Name: l2PmEventSuccess, Src: []string{l2PmStSyncTime, l2PmStCreatePmMe, l2PmStDeletePmMe, l2PmStCollectData}, Dst: l2PmStIdle},
+ {Name: l2PmEventFailure, Src: []string{l2PmStCreatePmMe, l2PmStDeletePmMe, l2PmStCollectData}, Dst: l2PmStIdle},
+ {Name: l2PmEventFailure, Src: []string{l2PmStSyncTime}, Dst: l2PmStSyncTime},
+ {Name: l2PmEventAddMe, Src: []string{l2PmStIdle}, Dst: l2PmStCreatePmMe},
+ {Name: l2PmEventDeleteMe, Src: []string{l2PmStIdle}, Dst: l2PmStDeletePmMe},
+ {Name: l2PmEventStop, Src: []string{l2PmStNull, l2PmStStarting, l2PmStSyncTime, l2PmStIdle, l2PmStCreatePmMe, l2PmStDeletePmMe, l2PmStCollectData}, Dst: l2PmStNull},
+ },
+ fsm.Callbacks{
+ "enter_state": func(e *fsm.Event) { metricsManager.pAdaptFsm.logFsmStateChange(ctx, e) },
+ "enter_" + l2PmStNull: func(e *fsm.Event) { metricsManager.l2PMFsmNull(ctx, e) },
+ "enter_" + l2PmStIdle: func(e *fsm.Event) { metricsManager.l2PMFsmIdle(ctx, e) },
+ "enter_" + l2PmStStarting: func(e *fsm.Event) { metricsManager.l2PMFsmStarting(ctx, e) },
+ "enter_" + l2PmStSyncTime: func(e *fsm.Event) { metricsManager.l2PMFsmSyncTime(ctx, e) },
+ "enter_" + l2PmStCollectData: func(e *fsm.Event) { metricsManager.l2PmFsmCollectData(ctx, e) },
+ "enter_" + l2PmStCreatePmMe: func(e *fsm.Event) { metricsManager.l2PmFsmCreatePM(ctx, e) },
+ "enter_" + l2PmStDeletePmMe: func(e *fsm.Event) { metricsManager.l2PmFsmDeletePM(ctx, e) },
+ },
+ )
+
// initialize the next metric collection intervals.
metricsManager.initializeMetricCollectionTime(ctx)
logger.Info(ctx, "init-onuMetricsManager completed", log.Fields{"device-id": dh.deviceID})
@@ -201,7 +385,7 @@
mm.onuMetricsManagerLock.Lock()
defer mm.onuMetricsManagerLock.Unlock()
for _, v := range mm.groupMetricMap {
- if v.enabled {
+ if v.enabled && !v.isL2PMCounter { // L2 PM counter collection is managed in a L2PmFsm
v.nextCollectionInterval = time.Now().Add(time.Duration(v.frequency) * time.Second)
}
}
@@ -258,7 +442,7 @@
mm.onuMetricsManagerLock.Lock()
defer mm.onuMetricsManagerLock.Unlock()
for k, v := range mm.groupMetricMap {
- if k == aGroupName {
+ if k == aGroupName && !v.isL2PMCounter { // We cannot allow the L2 PM counter frequency to be updated. It is 15min fixed by OMCI spec
v.frequency = newGroupFreq
// update internal pm config
mm.pDeviceHandler.pmConfigs.Groups[groupSliceIdx].GroupFreq = newGroupFreq
@@ -339,12 +523,31 @@
if k == aGroupName && v.enabled != group.Enabled {
mm.pDeviceHandler.pmConfigs.Groups[groupSliceIdx].Enabled = group.Enabled
v.enabled = group.Enabled
- // If the group is now enabled and frequency override is enabled, set the next group metric collection time
- if group.Enabled && mm.pDeviceHandler.pmConfigs.FreqOverride {
- v.nextCollectionInterval = time.Now().Add(time.Duration(v.frequency) * time.Second)
+ if group.Enabled {
+ if v.isL2PMCounter {
+ // If it is a L2 PM counter we need to mark the PM to be added
+ mm.l2PmToAdd = mm.appendIfMissing(mm.l2PmToAdd, v.groupName)
+ // If the group support flag toggles too soon, we need to delete the group name from l2PmToDelete slice
+ mm.l2PmToDelete = mm.removeIfFound(mm.l2PmToDelete, v.groupName)
+ } else if mm.pDeviceHandler.pmConfigs.FreqOverride { // otherwise just update the next collection interval
+ v.nextCollectionInterval = time.Now().Add(time.Duration(v.frequency) * time.Second)
+ }
+ } else { // group counter is disabled
+ if v.isL2PMCounter {
+ // If it is a L2 PM counter we need to mark the PM to be deleted
+ mm.l2PmToDelete = mm.appendIfMissing(mm.l2PmToDelete, v.groupName)
+ // If the group support flag toggles too soon, we need to delete the group name from l2PmToAdd slice
+ mm.l2PmToAdd = mm.removeIfFound(mm.l2PmToAdd, v.groupName)
+ }
}
updated = true
- logger.Infow(ctx, "group metric support updated", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "groupName": aGroupName, "enabled": group.Enabled})
+ if v.isL2PMCounter {
+ logger.Infow(ctx, "l2 pm group metric support updated",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "groupName": aGroupName, "enabled": group.Enabled, "l2PmToAdd": mm.l2PmToAdd, "l2PmToDelete": mm.l2PmToDelete})
+ } else {
+ logger.Infow(ctx, "group metric support updated", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "groupName": aGroupName, "enabled": group.Enabled})
+ }
+ break
}
}
@@ -487,7 +690,7 @@
opticalMetrics := make(map[string]float32)
// Get the ANI-G instance optical power attributes
requestedAttributes := me.AttributeValueMap{"OpticalSignalLevel": 0, "TransmitOpticalLevel": 0}
- if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.AniGClassID, anigInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.commMetricsChan); meInstance != nil {
+ if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.AniGClassID, anigInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); meInstance != nil {
select {
case meAttributes = <-mm.opticalMetricsChan:
logger.Debugw(ctx, "received optical metrics", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
@@ -563,7 +766,7 @@
var meAttributes me.AttributeValueMap
// Get the UNI-G instance optical power attributes
requestedAttributes := me.AttributeValueMap{"AdministrativeState": 0}
- if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.UniGClassID, unigInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.commMetricsChan); meInstance != nil {
+ if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.UniGClassID, unigInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); meInstance != nil {
// Wait for metrics or timeout
select {
case meAttributes = <-mm.uniStatusMetricsChan:
@@ -608,7 +811,7 @@
pptpMetrics := make(map[string]float32)
requestedAttributes := me.AttributeValueMap{"SensedType": 0, "OperationalState": 0, "AdministrativeState": 0}
- if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.PhysicalPathTerminationPointEthernetUniClassID, pptpInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.commMetricsChan); meInstance != nil {
+ if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.PhysicalPathTerminationPointEthernetUniClassID, pptpInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); meInstance != nil {
// Wait for metrics or timeout
select {
case meAttributes = <-mm.uniStatusMetricsChan:
@@ -663,7 +866,7 @@
veipMetrics := make(map[string]float32)
requestedAttributes := me.AttributeValueMap{"OperationalState": 0, "AdministrativeState": 0}
- if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.VirtualEthernetInterfacePointClassID, veipInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.commMetricsChan); meInstance != nil {
+ if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, me.VirtualEthernetInterfacePointClassID, veipInstID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); meInstance != nil {
// Wait for metrics or timeout
select {
case meAttributes = <-mm.uniStatusMetricsChan:
@@ -735,7 +938,7 @@
case <-mm.stopProcessingOmciResponses: // stop this routine
logger.Infow(ctx, "Stop routine to process OMCI-GET messages for device-id", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
return
- case message, ok := <-mm.commMetricsChan:
+ case message, ok := <-mm.pAdaptFsm.commChan:
if !ok {
logger.Errorw(ctx, "Message couldn't be read from channel", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
continue
@@ -760,7 +963,12 @@
case omci.GetResponseType:
//TODO: error handling
_ = mm.handleOmciGetResponseMessage(ctx, msg)
-
+ case omci.SynchronizeTimeResponseType:
+ _ = mm.handleOmciSynchronizeTimeResponseMessage(ctx, msg)
+ case omci.CreateResponseType:
+ _ = mm.handleOmciCreateResponseMessage(ctx, msg)
+ case omci.DeleteResponseType:
+ _ = mm.handleOmciDeleteResponseMessage(ctx, msg)
default:
logger.Warnw(ctx, "Unknown Message Type", log.Fields{"msgType": msg.OmciMsg.MessageType})
@@ -794,20 +1002,52 @@
case me.VirtualEthernetInterfacePointClassID:
mm.uniStatusMetricsChan <- meAttributes
return nil
+ case me.EthernetFramePerformanceMonitoringHistoryDataUpstreamClassID,
+ me.EthernetFramePerformanceMonitoringHistoryDataDownstreamClassID,
+ me.EthernetPerformanceMonitoringHistoryDataClassID:
+ mm.l2PmChan <- meAttributes
default:
logger.Errorw(ctx, "unhandled omci get response message",
log.Fields{"device-id": mm.pDeviceHandler.deviceID, "class-id": msgObj.EntityClass})
}
}
- return errors.New("unhandled-omci-get-response-message")
+ return fmt.Errorf("unhandled-omci-get-response-message")
+}
+
+func (mm *onuMetricsManager) handleOmciSynchronizeTimeResponseMessage(ctx context.Context, msg OmciMessage) error {
+ msgLayer := (*msg.OmciPacket).Layer(omci.LayerTypeSynchronizeTimeResponse)
+ if msgLayer == nil {
+ logger.Errorw(ctx, "omci Msg layer could not be detected for synchronize time response - handling stopped", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("omci Msg layer could not be detected for synchronize time response - handling stopped: %s", mm.pDeviceHandler.deviceID)
+ }
+ msgObj, msgOk := msgLayer.(*omci.SynchronizeTimeResponse)
+ if !msgOk {
+ logger.Errorw(ctx, "omci Msg layer could not be assigned for synchronize time response - handling stopped", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("omci Msg layer could not be assigned for synchronize time response - handling stopped: %s", mm.pDeviceHandler.deviceID)
+ }
+ logger.Debugw(ctx, "OMCI synchronize time response Data", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "data-fields": msgObj})
+ if msgObj.Result == me.Success {
+ switch msgObj.EntityClass {
+ case me.OnuGClassID:
+ logger.Infow(ctx, "omci synchronize time success", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ mm.syncTimeResponseChan <- true
+ return nil
+ default:
+ logger.Errorw(ctx, "unhandled omci message",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "class-id": msgObj.EntityClass})
+ }
+ }
+ mm.syncTimeResponseChan <- false
+ logger.Errorf(ctx, "unhandled-omci-synchronize-time-response-message--error-code-%v", msgObj.Result)
+ return fmt.Errorf("unhandled-omci-synchronize-time-response-message--error-code-%v", msgObj.Result)
}
// flushMetricCollectionChannels flushes all metric collection channels for any stale OMCI responses
func (mm *onuMetricsManager) flushMetricCollectionChannels(ctx context.Context) {
// flush commMetricsChan
select {
- case <-mm.commMetricsChan:
+ case <-mm.pAdaptFsm.commChan:
logger.Debug(ctx, "flushed common metrics channel")
default:
}
@@ -825,4 +1065,860 @@
logger.Debug(ctx, "flushed uni status metrics channel")
default:
}
+
+ // flush syncTimeResponseChan
+ select {
+ case <-mm.syncTimeResponseChan:
+ logger.Debug(ctx, "flushed sync time response channel")
+ default:
+ }
+
+ // flush l2PmChan
+ select {
+ case <-mm.l2PmChan:
+ logger.Debug(ctx, "flushed L2 PM collection channel")
+ default:
+ }
+
+ // flush stopTicks
+ select {
+ case <-mm.stopTicks:
+ logger.Debug(ctx, "flushed stopTicks channel")
+ default:
+ }
+
+}
+
+// ** L2 PM FSM Handlers start **
+
+func (mm *onuMetricsManager) l2PMFsmStarting(ctx context.Context, e *fsm.Event) {
+ // Loop through all the group metrics
+ // If it is a L2 PM Interval metric and it is enabled, then if it is not in the
+ // list of active L2 PM list then mark it for creation
+ // It it is a L2 PM Interval metric and it is disabled, then if it is in the
+ // list of active L2 PM list then mark it for deletion
+ mm.onuMetricsManagerLock.Lock()
+ for n, g := range mm.groupMetricMap {
+ if g.isL2PMCounter { // it is a l2 pm counter
+ if g.enabled { // metric enabled.
+ found := false
+ inner1:
+ for _, v := range mm.activeL2Pms {
+ if v == n {
+ found = true // metric already present in active l2 pm list
+ break inner1
+ }
+ }
+ if !found { // metric not in active l2 pm list. Mark this to be added later
+ mm.l2PmToAdd = mm.appendIfMissing(mm.l2PmToAdd, n)
+ }
+ } else { // metric not enabled.
+ found := false
+ inner2:
+ for _, v := range mm.activeL2Pms {
+ if v == n {
+ found = true // metric is found in active l2 pm list
+ break inner2
+ }
+ }
+ if found { // metric is found in active l2 pm list. Mark this to be deleted later
+ mm.l2PmToDelete = mm.appendIfMissing(mm.l2PmToDelete, n)
+ }
+ }
+ }
+ }
+ mm.onuMetricsManagerLock.Unlock()
+ logger.Debugw(ctx, "pms to add and delete",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "pms-to-add": mm.l2PmToAdd, "pms-to-delete": mm.l2PmToDelete})
+ go func() {
+ // push a tick event to move to next state
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventTick); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+}
+
+func (mm *onuMetricsManager) l2PMFsmSyncTime(ctx context.Context, e *fsm.Event) {
+ // Sync time with the ONU to establish 15min boundary for PM collection.
+ if err := mm.syncTime(ctx); err != nil {
+ go func() {
+ time.Sleep(SyncTimeRetryInterval * time.Second) // retry to sync time after this timeout
+ // This will result in FSM attempting to sync time again
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventFailure); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+ }
+ // Initiate a tick generation routine every L2PmCollectionInterval
+ go mm.generateTicks(ctx)
+
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventSuccess); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+}
+
+func (mm *onuMetricsManager) l2PMFsmNull(ctx context.Context, e *fsm.Event) {
+ // We need to reset the local data so that the L2 PM MEs are re-provisioned once the ONU is back up based on the latest PM CONFIG
+ mm.onuMetricsManagerLock.Lock()
+ mm.activeL2Pms = nil
+ mm.l2PmToAdd = nil
+ mm.l2PmToDelete = nil
+ mm.onuMetricsManagerLock.Unlock()
+}
+func (mm *onuMetricsManager) l2PMFsmIdle(ctx context.Context, e *fsm.Event) {
+ logger.Debugw(ctx, "Enter state idle", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+
+ mm.onuMetricsManagerLock.RLock()
+ numOfPmToDelete := len(mm.l2PmToDelete)
+ numOfPmToAdd := len(mm.l2PmToAdd)
+ mm.onuMetricsManagerLock.RUnlock()
+
+ if numOfPmToDelete > 0 {
+ logger.Debugw(ctx, "state idle - pms to delete", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "pms-to-delete": numOfPmToDelete})
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventDeleteMe); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+ } else if numOfPmToAdd > 0 {
+ logger.Debugw(ctx, "state idle - pms to add", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "pms-to-add": numOfPmToAdd})
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventAddMe); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+ }
+}
+
+func (mm *onuMetricsManager) l2PmFsmCollectData(ctx context.Context, e *fsm.Event) {
+ logger.Debugw(ctx, "state collect data", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ // Copy the activeL2Pms for which we want to collect the metrics since activeL2Pms can change dynamically
+ mm.onuMetricsManagerLock.RLock()
+ copyOfActiveL2Pms := make([]string, len(mm.activeL2Pms))
+ _ = copy(copyOfActiveL2Pms, mm.activeL2Pms)
+ mm.onuMetricsManagerLock.RUnlock()
+
+ for _, n := range copyOfActiveL2Pms {
+ switch n {
+ case EthernetBridgeHistoryName:
+ logger.Debugw(ctx, "state collect data - collecting data for EthernetFramePerformanceMonitoringHistoryData ME", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ var metricInfoSlice []*voltha.MetricInformation
+ for _, uniPort := range mm.pDeviceHandler.uniEntityMap {
+ // Attach the EthernetFramePerformanceMonitoringHistoryData ME to MacBridgePortConfigData on the UNI port
+ entityID := macBridgePortAniEID + uniPort.entityID
+ if metricInfo := mm.collectEthernetFramePerformanceMonitoringHistoryData(ctx, true, entityID); metricInfo != nil { // upstream
+ metricInfoSlice = append(metricInfoSlice, metricInfo)
+ }
+ if metricInfo := mm.collectEthernetFramePerformanceMonitoringHistoryData(ctx, false, entityID); metricInfo != nil { // downstream
+ metricInfoSlice = append(metricInfoSlice, metricInfo)
+ }
+ }
+ // Publish metrics if it is valid
+ if metricInfoSlice != nil {
+ mm.publishMetrics(ctx, metricInfoSlice)
+ } else {
+ // If collectAttempts exceeds L2PmCollectAttempts then remove it from activeL2Pms
+ // slice so that we do not collect data from that PM ME anymore
+ mm.onuMetricsManagerLock.Lock()
+ mm.groupMetricMap[n].collectAttempts++
+ if mm.groupMetricMap[n].collectAttempts > L2PmCollectAttempts {
+ mm.removeIfFound(mm.activeL2Pms, n)
+ }
+ logger.Warnw(ctx, "state collect data - no metrics collected",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "metricName": n, "collectAttempts": mm.groupMetricMap[n].collectAttempts})
+ mm.onuMetricsManagerLock.Unlock()
+ }
+ case EthernetUniHistoryName:
+ logger.Debugw(ctx, "state collect data - collecting data for EthernetPerformanceMonitoringHistoryData ME", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ var metricInfoSlice []*voltha.MetricInformation
+ for _, uniPort := range mm.pDeviceHandler.uniEntityMap {
+ if uniPort.portType == uniPPTP { // This metric is only applicable for PPTP Uni Type
+ // Attach the EthernetFramePerformanceMonitoringHistoryData ME to MacBridgePortConfigData on the UNI port
+ entityID := uniPort.entityID
+ if metricInfo := mm.collectEthernetUniHistoryData(ctx, entityID); metricInfo != nil { // upstream
+ metricInfoSlice = append(metricInfoSlice, metricInfo)
+ }
+ }
+ }
+ // Publish metrics if it is valid
+ if metricInfoSlice != nil {
+ mm.publishMetrics(ctx, metricInfoSlice)
+ } else {
+ // If collectAttempts exceeds L2PmCollectAttempts then remove it from activeL2Pms
+ // slice so that we do not collect data from that PM ME anymore
+ mm.onuMetricsManagerLock.Lock()
+ mm.groupMetricMap[n].collectAttempts++
+ if mm.groupMetricMap[n].collectAttempts > L2PmCollectAttempts {
+ mm.removeIfFound(mm.activeL2Pms, n)
+ }
+ logger.Warnw(ctx, "state collect data - no metrics collected",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "metricName": n, "collectAttempts": mm.groupMetricMap[n].collectAttempts})
+ mm.onuMetricsManagerLock.Unlock()
+ }
+ default:
+ logger.Errorw(ctx, "unsupported l2 pm", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "name": n})
+ }
+ }
+ // Does not matter we send success or failure here.
+ // Those PMs that we failed to collect data will be attempted to collect again in the next PM collection cycle (assuming
+ // we have not exceed max attempts to collect the PM data)
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventSuccess); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+}
+
+func (mm *onuMetricsManager) l2PmFsmCreatePM(ctx context.Context, e *fsm.Event) {
+ // Copy the l2PmToAdd for which we want to collect the metrics since l2PmToAdd can change dynamically
+ mm.onuMetricsManagerLock.RLock()
+ copyOfL2PmToAdd := make([]string, len(mm.l2PmToAdd))
+ _ = copy(copyOfL2PmToAdd, mm.l2PmToAdd)
+ mm.onuMetricsManagerLock.RUnlock()
+
+ logger.Debugw(ctx, "state create pm", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "pms-to-add": copyOfL2PmToAdd})
+ for _, n := range copyOfL2PmToAdd {
+ resp := false
+ switch n {
+ case EthernetBridgeHistoryName:
+ boolForDirection := make([]bool, 2) // stores true and false to indicate upstream and downstream directions.
+ boolForDirection = append(boolForDirection, true, false)
+ // Create ME twice, one for each direction. Boolean true is used to indicate upstream and false for downstream.
+ for _, direction := range boolForDirection {
+ inner1:
+ for _, uniPort := range mm.pDeviceHandler.uniEntityMap {
+ // Attach the EthernetFramePerformanceMonitoringHistoryData ME to MacBridgePortConfigData on the UNI port
+ entityID := macBridgePortAniEID + uniPort.entityID
+ mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendCreateOrDeleteEthernetPerformanceMonitoringHistoryME(
+ ctx, ConstDefaultOmciTimeout, true, direction, true, mm.pAdaptFsm.commChan, entityID)
+ select {
+ case resp = <-mm.l2PmCreateOrDeleteResponseChan:
+ logger.Debugw(ctx, "received create EthernetFramePerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "resp": resp, "uni": uniPort.uniID})
+ if !resp {
+ // We will attempt to create the MEs again in the next L2 PM Collection cycle
+ break inner1
+ }
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorw(ctx, "timeout waiting for create EthernetFramePerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "uni": uniPort.uniID})
+ }
+ }
+ }
+ case EthernetUniHistoryName:
+
+ inner2:
+ for _, uniPort := range mm.pDeviceHandler.uniEntityMap {
+ if uniPort.portType == uniPPTP { // This metric is only applicable for PPTP Uni Type
+ // Attach the EthernetPerformanceMonitoringHistoryData ME to MacBridgePortConfigData on the UNI port
+ entityID := uniPort.entityID
+ mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendCreateOrDeleteEthernetUniHistoryME(
+ ctx, ConstDefaultOmciTimeout, true, true, mm.pAdaptFsm.commChan, entityID)
+ select {
+ case resp = <-mm.l2PmCreateOrDeleteResponseChan:
+ logger.Debugw(ctx, "received create EthernetPerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "resp": resp, "uni": uniPort.uniID})
+ if !resp {
+ // We will attempt to create the MEs again in the next L2 PM Collection cycle
+ break inner2
+ }
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorw(ctx, "timeout waiting for create EthernetPerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "uni": uniPort.uniID})
+ }
+ }
+ }
+ default:
+ logger.Errorw(ctx, "unsupported l2 pm", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "name": n})
+ }
+ // On success Update the local list maintained for active PMs and PMs to add
+ if resp {
+ mm.onuMetricsManagerLock.Lock()
+ mm.activeL2Pms = mm.appendIfMissing(mm.activeL2Pms, n)
+ mm.l2PmToAdd = mm.removeIfFound(mm.l2PmToAdd, n)
+ mm.onuMetricsManagerLock.Unlock()
+ } else {
+ // If createRetryAttempts exceeds L2PmCreateAttempts then locally disable the PM
+ // and also remove it from l2PmToAdd slice so that we do not try to create the PM ME anymore
+ mm.onuMetricsManagerLock.Lock()
+ mm.groupMetricMap[n].createRetryAttempts++
+ if mm.groupMetricMap[n].createRetryAttempts > L2PmCreateAttempts {
+ mm.groupMetricMap[n].enabled = false
+ mm.removeIfFound(mm.l2PmToAdd, n)
+ }
+ logger.Warnw(ctx, "state create pm - failed to create pm",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "metricName": n, "createRetryAttempts": mm.groupMetricMap[n].createRetryAttempts})
+ mm.onuMetricsManagerLock.Unlock()
+ }
+ }
+ // Does not matter we send success or failure here.
+ // Those PMs that we failed to create will be attempted to create again in the next PM creation cycle (assuming
+ // we have not exceed max attempts to create the PM ME)
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventSuccess); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+}
+
+func (mm *onuMetricsManager) l2PmFsmDeletePM(ctx context.Context, e *fsm.Event) {
+ // Copy the l2PmToDelete for which we want to collect the metrics since l2PmToDelete can change dynamically
+ mm.onuMetricsManagerLock.RLock()
+ copyOfL2PmToDelete := make([]string, len(mm.l2PmToDelete))
+ _ = copy(copyOfL2PmToDelete, mm.l2PmToDelete)
+ mm.onuMetricsManagerLock.RUnlock()
+
+ logger.Debugw(ctx, "state delete pm", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "pms-to-delete": mm.l2PmToDelete})
+ for _, n := range copyOfL2PmToDelete {
+ resp := false
+ switch n {
+ case EthernetBridgeHistoryName:
+ boolForDirection := make([]bool, 2) // stores true and false to indicate upstream and downstream directions.
+ boolForDirection = append(boolForDirection, true, false)
+ // Create ME twice, one for each direction. Boolean true is used to indicate upstream and false for downstream.
+ for _, direction := range boolForDirection {
+ inner1:
+ for _, uniPort := range mm.pDeviceHandler.uniEntityMap {
+ // Attach the EthernetFramePerformanceMonitoringHistoryData ME to MacBridgePortConfigData on the UNI port
+ entityID := macBridgePortAniEID + uniPort.entityID
+ mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendCreateOrDeleteEthernetPerformanceMonitoringHistoryME(
+ ctx, ConstDefaultOmciTimeout, true, direction, false, mm.pAdaptFsm.commChan, entityID)
+ select {
+ case resp = <-mm.l2PmCreateOrDeleteResponseChan:
+ logger.Debugw(ctx, "received delete EthernetFramePerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "resp": resp, "uni": uniPort.uniID})
+ if !resp {
+ // We will attempt to delete the MEs again in the next L2 PM Collection cycle
+ break inner1
+ }
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorw(ctx, "timeout waiting for delete EthernetFramePerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "uni": uniPort.uniID})
+ }
+ }
+ }
+ case EthernetUniHistoryName:
+
+ inner2:
+ for _, uniPort := range mm.pDeviceHandler.uniEntityMap {
+ if uniPort.portType == uniPPTP { // This metric is only applicable for PPTP Uni Type
+ // Attach the EthernetPerformanceMonitoringHistoryData ME to MacBridgePortConfigData on the UNI port
+ entityID := uniPort.entityID
+ mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendCreateOrDeleteEthernetUniHistoryME(
+ ctx, ConstDefaultOmciTimeout, true, false, mm.pAdaptFsm.commChan, entityID)
+ select {
+ case resp = <-mm.l2PmCreateOrDeleteResponseChan:
+ logger.Debugw(ctx, "received delete EthernetPerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "resp": resp, "uni": uniPort.uniID})
+ if !resp {
+ // We will attempt to delete the MEs again in the next L2 PM Collection cycle
+ break inner2
+ }
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorw(ctx, "timeout waiting for delete EthernetPerformanceMonitoringHistoryData l2 pm me response",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "uni": uniPort.uniID})
+ }
+ }
+ }
+ default:
+ logger.Errorw(ctx, "unsupported l2 pm", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "name": n})
+ }
+ // On success Update the local list maintained for active PMs and PMs to delete
+ if resp {
+ mm.onuMetricsManagerLock.Lock()
+ mm.activeL2Pms = mm.removeIfFound(mm.activeL2Pms, n)
+ mm.l2PmToDelete = mm.removeIfFound(mm.l2PmToDelete, n)
+ mm.onuMetricsManagerLock.Unlock()
+ }
+ }
+ // Does not matter we send success or failure here.
+ // Those PMs that we failed to delete will be attempted to create again in the next PM collection cycle
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventSuccess); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+}
+
+// ** L2 PM FSM Handlers end **
+
+// syncTime synchronizes time with the ONU to establish a 15 min boundary for PM collection and reporting.
+func (mm *onuMetricsManager) syncTime(ctx context.Context) error {
+ if err := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendSyncTime(ctx, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); err != nil {
+ logger.Errorw(ctx, "cannot send sync time request", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return err
+ }
+
+ select {
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorf(ctx, "timed out waiting for sync time response from onu", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("timed-out-waiting-for-sync-time-response-%v", mm.pDeviceHandler.deviceID)
+ case syncTimeRes := <-mm.syncTimeResponseChan:
+ if !syncTimeRes {
+ return fmt.Errorf("failed-to-sync-time-%v", mm.pDeviceHandler.deviceID)
+ }
+ logger.Infow(ctx, "sync time success", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return nil
+ }
+}
+
+func (mm *onuMetricsManager) collectEthernetFramePerformanceMonitoringHistoryData(ctx context.Context, upstream bool, entityID uint16) *voltha.MetricInformation {
+ var mEnt *me.ManagedEntity
+ var omciErr me.OmciErrors
+ var classID me.ClassID
+ var meAttributes me.AttributeValueMap
+ logger.Debugw(ctx, "collecting data for EthernetFramePerformanceMonitoringHistoryData", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "upstream": upstream})
+ meParam := me.ParamData{EntityID: entityID}
+ if upstream {
+ if mEnt, omciErr = me.NewEthernetFramePerformanceMonitoringHistoryDataUpstream(meParam); omciErr == nil || mEnt == nil || omciErr.GetError() != nil {
+ logger.Errorw(ctx, "error creating me", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "upstream": upstream})
+ return nil
+ }
+ classID = me.EthernetFramePerformanceMonitoringHistoryDataUpstreamClassID
+ } else {
+ if mEnt, omciErr = me.NewEthernetFramePerformanceMonitoringHistoryDataDownstream(meParam); omciErr == nil || mEnt == nil || omciErr.GetError() != nil {
+ logger.Errorw(ctx, "error creating me", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "upstream": upstream})
+ return nil
+ }
+ classID = me.EthernetFramePerformanceMonitoringHistoryDataDownstreamClassID
+ }
+
+ requestedAttributes := make(me.AttributeValueMap)
+ size := 0
+ intervalEndTime := -1
+ ethPMHistData := make(map[string]float32)
+
+ for _, v := range mEnt.GetAttributeDefinitions() {
+ if (v.Size + size) <= MaxL2PMGetPayLoadSize {
+ requestedAttributes[v.Name] = v.DefValue
+ size = v.Size + size
+ } else { // We exceeded the allow omci get size
+ // Let's collect the attributes via get now and collect remaining in the next iteration
+ if err := mm.populateEthernetBridgeHistoryMetrics(ctx, upstream, classID, entityID, meAttributes, requestedAttributes, ethPMHistData, &intervalEndTime); err != nil {
+ logger.Errorw(ctx, "error during metric collection",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "upstream": upstream, "err": err})
+ return nil
+ }
+ size = 0 // reset size
+ requestedAttributes = make(me.AttributeValueMap) // reset map
+ }
+ }
+ // Collect the omci get attributes for the last bunch of attributes.
+ if len(requestedAttributes) > 0 {
+ if err := mm.populateEthernetBridgeHistoryMetrics(ctx, upstream, classID, entityID, meAttributes, requestedAttributes, ethPMHistData, &intervalEndTime); err != nil {
+ logger.Errorw(ctx, "error during metric collection",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "upstream": upstream, "err": err})
+ return nil
+ }
+ }
+
+ // Populate some relevant context for the EthernetFramePerformanceMonitoringHistoryData PM
+ ethPMHistData["class_id"] = float32(classID)
+ ethPMHistData["interval_end_time"] = float32(intervalEndTime)
+ ethPMHistData["parent_class_id"] = float32(me.MacBridgeConfigurationDataClassID) // EthernetFramePerformanceMonitoringHistoryData is attached to MBPCD ME
+ ethPMHistData["parent_entity_id"] = float32(entityID)
+ if upstream {
+ ethPMHistData["upstream"] = float32(1)
+ } else {
+ ethPMHistData["upstream"] = float32(0)
+ }
+
+ metricsContext := make(map[string]string)
+ metricsContext["onuID"] = fmt.Sprintf("%d", mm.pDeviceHandler.device.ProxyAddress.OnuId)
+ metricsContext["intfID"] = fmt.Sprintf("%d", mm.pDeviceHandler.device.ProxyAddress.ChannelId)
+ metricsContext["devicetype"] = mm.pDeviceHandler.DeviceType
+
+ raisedTs := time.Now().Unix()
+ mmd := voltha.MetricMetaData{
+ Title: EthernetBridgeHistoryName,
+ Ts: float64(raisedTs),
+ Context: metricsContext,
+ DeviceId: mm.pDeviceHandler.deviceID,
+ LogicalDeviceId: mm.pDeviceHandler.logicalDeviceID,
+ SerialNo: mm.pDeviceHandler.device.SerialNumber,
+ }
+
+ // create slice of metrics given that there could be more than one VEIP instance
+ metricInfo := voltha.MetricInformation{Metadata: &mmd, Metrics: ethPMHistData}
+ logger.Debugw(ctx, "collecting data for EthernetFramePerformanceMonitoringHistoryData successful",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "upstream": upstream, "metricInfo": metricInfo})
+ return &metricInfo
+}
+
+func (mm *onuMetricsManager) collectEthernetUniHistoryData(ctx context.Context, entityID uint16) *voltha.MetricInformation {
+ var mEnt *me.ManagedEntity
+ var omciErr me.OmciErrors
+ var classID me.ClassID
+ var meAttributes me.AttributeValueMap
+ logger.Debugw(ctx, "collecting data for EthernetFramePerformanceMonitoringHistoryData", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID})
+ meParam := me.ParamData{EntityID: entityID}
+ if mEnt, omciErr = me.NewEthernetPerformanceMonitoringHistoryData(meParam); omciErr == nil || mEnt == nil || omciErr.GetError() != nil {
+ logger.Errorw(ctx, "error creating me", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID})
+ return nil
+ }
+ classID = me.EthernetPerformanceMonitoringHistoryDataClassID
+
+ requestedAttributes := make(me.AttributeValueMap)
+ size := 0
+ intervalEndTime := -1
+ ethUniHistData := make(map[string]float32)
+
+ for _, v := range mEnt.GetAttributeDefinitions() {
+ if (v.Size + size) <= MaxL2PMGetPayLoadSize {
+ requestedAttributes[v.Name] = v.DefValue
+ size = v.Size + size
+ } else { // We exceeded the allow omci get size
+ // Let's collect the attributes via get now and collect remaining in the next iteration
+ if err := mm.populateEthernetUniHistoryMetrics(ctx, classID, entityID, meAttributes, requestedAttributes, ethUniHistData, &intervalEndTime); err != nil {
+ logger.Errorw(ctx, "error during metric collection",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "err": err})
+ return nil
+ }
+ size = 0 // reset size
+ requestedAttributes = make(me.AttributeValueMap) // reset map
+ }
+ }
+ // Collect the omci get attributes for the last bunch of attributes.
+ if len(requestedAttributes) > 0 {
+ if err := mm.populateEthernetUniHistoryMetrics(ctx, classID, entityID, meAttributes, requestedAttributes, ethUniHistData, &intervalEndTime); err != nil {
+ logger.Errorw(ctx, "error during metric collection",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "err": err})
+ return nil
+ }
+ }
+
+ // Populate some relevant context for the EthernetPerformanceMonitoringHistoryData PM
+ ethUniHistData["class_id"] = float32(classID)
+ ethUniHistData["interval_end_time"] = float32(intervalEndTime)
+
+ metricsContext := make(map[string]string)
+ metricsContext["onuID"] = fmt.Sprintf("%d", mm.pDeviceHandler.device.ProxyAddress.OnuId)
+ metricsContext["intfID"] = fmt.Sprintf("%d", mm.pDeviceHandler.device.ProxyAddress.ChannelId)
+ metricsContext["devicetype"] = mm.pDeviceHandler.DeviceType
+
+ raisedTs := time.Now().Unix()
+ mmd := voltha.MetricMetaData{
+ Title: EthernetUniHistoryName,
+ Ts: float64(raisedTs),
+ Context: metricsContext,
+ DeviceId: mm.pDeviceHandler.deviceID,
+ LogicalDeviceId: mm.pDeviceHandler.logicalDeviceID,
+ SerialNo: mm.pDeviceHandler.device.SerialNumber,
+ }
+
+ // create slice of metrics given that there could be more than one PPTP instance
+ metricInfo := voltha.MetricInformation{Metadata: &mmd, Metrics: ethUniHistData}
+ logger.Debugw(ctx, "collecting data for EthernetPerformanceMonitoringHistoryData successful",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID, "metricInfo": metricInfo})
+ return &metricInfo
+}
+
+// nolint: gocyclo
+func (mm *onuMetricsManager) populateEthernetBridgeHistoryMetrics(ctx context.Context, upstream bool, classID me.ClassID, entityID uint16,
+ meAttributes me.AttributeValueMap, requestedAttributes me.AttributeValueMap, ethPMHistData map[string]float32, intervalEndTime *int) error {
+ // Make sure "IntervalEndTime" is part of the requested attributes as we need this to compare the get responses when get request is multipart
+ if _, ok := requestedAttributes["IntervalEndTime"]; !ok {
+ requestedAttributes["IntervalEndTime"] = 0
+ }
+ if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, classID, entityID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); meInstance != nil {
+ select {
+ case meAttributes = <-mm.l2PmChan:
+ logger.Debugw(ctx, "received ethernet pm history data metrics",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "upstream": upstream, "entityID": entityID})
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorw(ctx, "timeout waiting for omci-get response for ethernet pm history data",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "upstream": upstream, "entityID": entityID})
+ // The metrics will be empty in this case
+ return fmt.Errorf("timeout-during-l2-pm-collection-for-ethernet-bridge-history-%v", mm.pDeviceHandler.deviceID)
+ }
+ // verify that interval end time has not changed during metric collection. If it changed, we abort the procedure
+ if *intervalEndTime == -1 { // first time
+ // Update the interval end time
+ if val, ok := meAttributes["IntervalEndTime"]; ok && val != nil {
+ *intervalEndTime = int(meAttributes["IntervalEndTime"].(uint8))
+ }
+ } else {
+ var currIntervalEndTime int
+ if val, ok := meAttributes["IntervalEndTime"]; ok && val != nil {
+ currIntervalEndTime = int(meAttributes["IntervalEndTime"].(uint8))
+ }
+ if currIntervalEndTime != *intervalEndTime { // interval end time changed during metric collection
+ logger.Errorw(ctx, "interval end time changed during metrics collection for ethernet pm history data",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID,
+ "currIntervalEndTime": *intervalEndTime, "newIntervalEndTime": currIntervalEndTime})
+ return fmt.Errorf("interval-end-time-changed-during-metric-collection-for-ethernet-bridge-history-%v", mm.pDeviceHandler.deviceID)
+ }
+ }
+ }
+ for k := range EthernetBridgeHistory {
+ // populate ethPMHistData only if metric key not already present (or populated), since it is possible that we populate
+ // the attributes in multiple iterations for a given L2 PM ME as there is a limit on the max OMCI GET payload size.
+ if _, ok := ethPMHistData[k]; !ok {
+ switch k {
+ case "drop_events":
+ if val, ok := meAttributes["DropEvents"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "octets":
+ if val, ok := meAttributes["Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "packets":
+ if val, ok := meAttributes["Packets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "broadcast_packets":
+ if val, ok := meAttributes["BroadcastPackets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "multicast_packets":
+ if val, ok := meAttributes["MulticastPackets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "crc_errored_packets":
+ if val, ok := meAttributes["CrcErroredPackets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "undersize_packets":
+ if val, ok := meAttributes["UndersizePackets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "oversize_packets":
+ if val, ok := meAttributes["OversizePackets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "64_octets":
+ if val, ok := meAttributes["Packets64Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "65_to_127_octets":
+ if val, ok := meAttributes["Packets65To127Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "128_to_255_octets":
+ if val, ok := meAttributes["Packets128To255Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "256_to_511_octets":
+ if val, ok := meAttributes["Packets256To511Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "512_to_1023_octets":
+ if val, ok := meAttributes["Packets512To1023Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ case "1024_to_1518_octets":
+ if val, ok := meAttributes["Packets1024To1518Octets"]; ok && val != nil {
+ ethPMHistData[k] = float32(val.(uint32))
+ }
+ default:
+ // do nothing
+ }
+ }
+ }
+ return nil
+}
+
+// nolint: gocyclo
+func (mm *onuMetricsManager) populateEthernetUniHistoryMetrics(ctx context.Context, classID me.ClassID, entityID uint16,
+ meAttributes me.AttributeValueMap, requestedAttributes me.AttributeValueMap, ethPMUniHistData map[string]float32, intervalEndTime *int) error {
+ // Make sure "IntervalEndTime" is part of the requested attributes as we need this to compare the get responses when get request is multipart
+ if _, ok := requestedAttributes["IntervalEndTime"]; !ok {
+ requestedAttributes["IntervalEndTime"] = 0
+ }
+ if meInstance := mm.pDeviceHandler.pOnuOmciDevice.PDevOmciCC.sendGetMe(ctx, classID, entityID, requestedAttributes, ConstDefaultOmciTimeout, true, mm.pAdaptFsm.commChan); meInstance != nil {
+ select {
+ case meAttributes = <-mm.l2PmChan:
+ logger.Debugw(ctx, "received ethernet uni history data metrics",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID})
+ case <-time.After(time.Duration(ConstDefaultOmciTimeout) * time.Second):
+ logger.Errorw(ctx, "timeout waiting for omci-get response for ethernet uni history data",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID})
+ // The metrics will be empty in this case
+ return fmt.Errorf("timeout-during-l2-pm-collection-for-ethernet-uni-history-%v", mm.pDeviceHandler.deviceID)
+ }
+ // verify that interval end time has not changed during metric collection. If it changed, we abort the procedure
+ if *intervalEndTime == -1 { // first time
+ // Update the interval end time
+ if val, ok := meAttributes["IntervalEndTime"]; ok && val != nil {
+ *intervalEndTime = int(meAttributes["IntervalEndTime"].(uint8))
+ }
+ } else {
+ var currIntervalEndTime int
+ if val, ok := meAttributes["IntervalEndTime"]; ok && val != nil {
+ currIntervalEndTime = int(meAttributes["IntervalEndTime"].(uint8))
+ }
+ if currIntervalEndTime != *intervalEndTime { // interval end time changed during metric collection
+ logger.Errorw(ctx, "interval end time changed during metrics collection for ethernet uni history data",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "entityID": entityID,
+ "currIntervalEndTime": *intervalEndTime, "newIntervalEndTime": currIntervalEndTime})
+ return fmt.Errorf("interval-end-time-changed-during-metric-collection-for-ethernet-uni-history-%v", mm.pDeviceHandler.deviceID)
+ }
+ }
+ }
+ for k := range EthernetUniHistory {
+ // populate ethPMUniHistData only if metric key not already present (or populated), since it is possible that we populate
+ // the attributes in multiple iterations for a given L2 PM ME as there is a limit on the max OMCI GET payload size.
+ if _, ok := ethPMUniHistData[k]; !ok {
+ switch k {
+ case "fcs_errors":
+ if val, ok := meAttributes["FcsErrors"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "excessive_collision_counter":
+ if val, ok := meAttributes["ExcessiveCollisionCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "late_collision_counter":
+ if val, ok := meAttributes["LateCollisionCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "frames_too_long":
+ if val, ok := meAttributes["FramesTooLong"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "buffer_overflows_on_rx":
+ if val, ok := meAttributes["BufferOverflowsOnReceive"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "buffer_overflows_on_tx":
+ if val, ok := meAttributes["BufferOverflowsOnTransmit"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "single_collision_frame_counter":
+ if val, ok := meAttributes["SingleCollisionFrameCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "multiple_collisions_frame_counter":
+ if val, ok := meAttributes["MultipleCollisionsFrameCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "sqe_counter":
+ if val, ok := meAttributes["SqeCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "deferred_tx_counter":
+ if val, ok := meAttributes["DeferredTransmissionCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "internal_mac_tx_error_counter":
+ if val, ok := meAttributes["InternalMacTransmitErrorCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "carrier_sense_error_counter":
+ if val, ok := meAttributes["CarrierSenseErrorCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "alignment_error_counter":
+ if val, ok := meAttributes["AlignmentErrorCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ case "internal_mac_rx_error_counter":
+ if val, ok := meAttributes["InternalMacReceiveErrorCounter"]; ok && val != nil {
+ ethPMUniHistData[k] = float32(val.(uint32))
+ }
+ default:
+ // do nothing
+ }
+ }
+ }
+ return nil
+}
+
+func (mm *onuMetricsManager) handleOmciCreateResponseMessage(ctx context.Context, msg OmciMessage) error {
+ msgLayer := (*msg.OmciPacket).Layer(omci.LayerTypeCreateResponse)
+ if msgLayer == nil {
+ logger.Errorw(ctx, "omci Msg layer could not be detected for create response - handling stopped", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("omci Msg layer could not be detected for create response - handling stopped: %s", mm.pDeviceHandler.deviceID)
+ }
+ msgObj, msgOk := msgLayer.(*omci.CreateResponse)
+ if !msgOk {
+ logger.Errorw(ctx, "omci Msg layer could not be assigned for create response - handling stopped", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("omci Msg layer could not be assigned for delete response - handling stopped: %s", mm.pDeviceHandler.deviceID)
+ }
+ logger.Debugw(ctx, "OMCI create response Data", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "data-fields": msgObj})
+ switch msgObj.EntityClass {
+ case me.EthernetFramePerformanceMonitoringHistoryDataUpstreamClassID,
+ me.EthernetFramePerformanceMonitoringHistoryDataDownstreamClassID,
+ me.EthernetPerformanceMonitoringHistoryDataClassID:
+ // If the result is me.InstanceExists it means the entity was already created. It is ok handled that as success
+ if msgObj.Result == me.Success || msgObj.Result == me.InstanceExists {
+ mm.l2PmCreateOrDeleteResponseChan <- true
+ } else {
+ logger.Warnw(ctx, "failed to create me", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "class-id": msgObj.EntityClass})
+ mm.l2PmCreateOrDeleteResponseChan <- false
+ }
+ return nil
+ default:
+ logger.Errorw(ctx, "unhandled omci create response message",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "class-id": msgObj.EntityClass})
+ }
+ return fmt.Errorf("unhandled-omci-create-response-message-%v", mm.pDeviceHandler.deviceID)
+}
+
+func (mm *onuMetricsManager) handleOmciDeleteResponseMessage(ctx context.Context, msg OmciMessage) error {
+ msgLayer := (*msg.OmciPacket).Layer(omci.LayerTypeDeleteResponse)
+ if msgLayer == nil {
+ logger.Errorw(ctx, "omci Msg layer could not be detected for delete response - handling stopped", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("omci Msg layer could not be detected for create response - handling stopped: %s", mm.pDeviceHandler.deviceID)
+ }
+ msgObj, msgOk := msgLayer.(*omci.DeleteResponse)
+ if !msgOk {
+ logger.Errorw(ctx, "omci Msg layer could not be assigned for delete response - handling stopped", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return fmt.Errorf("omci Msg layer could not be assigned for delete response - handling stopped: %s", mm.pDeviceHandler.deviceID)
+ }
+ logger.Debugw(ctx, "OMCI delete response Data", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "data-fields": msgObj})
+ switch msgObj.EntityClass {
+ case me.EthernetFramePerformanceMonitoringHistoryDataUpstreamClassID,
+ me.EthernetFramePerformanceMonitoringHistoryDataDownstreamClassID,
+ me.EthernetPerformanceMonitoringHistoryDataClassID:
+ // If the result is me.UnknownInstance it means the entity was already deleted. It is ok handled that as success
+ if msgObj.Result == me.Success || msgObj.Result == me.UnknownInstance {
+ mm.l2PmCreateOrDeleteResponseChan <- true
+ } else {
+ logger.Warnw(ctx, "failed to delete me", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "class-id": msgObj.EntityClass})
+ mm.l2PmCreateOrDeleteResponseChan <- false
+ }
+ return nil
+ default:
+ logger.Errorw(ctx, "unhandled omci delete response message",
+ log.Fields{"device-id": mm.pDeviceHandler.deviceID, "class-id": msgObj.EntityClass})
+ }
+ return fmt.Errorf("unhandled-omci-delete-response-message-%v", mm.pDeviceHandler.deviceID)
+}
+
+func (mm *onuMetricsManager) generateTicks(ctx context.Context) {
+ for {
+ select {
+ case <-time.After(L2PmCollectionInterval * time.Second):
+ go func() {
+ if err := mm.pAdaptFsm.pFsm.Event(l2PmEventTick); err != nil {
+ logger.Errorw(ctx, "error calling event", log.Fields{"device-id": mm.pDeviceHandler.deviceID, "err": err})
+ }
+ }()
+ case <-mm.stopTicks:
+ logger.Infow(ctx, "stopping ticks", log.Fields{"device-id": mm.pDeviceHandler.deviceID})
+ return
+ }
+ }
+}
+
+func (mm *onuMetricsManager) appendIfMissing(slice []string, n string) []string {
+ for _, ele := range slice {
+ if ele == n {
+ return slice
+ }
+ }
+ return append(slice, n)
+}
+
+func (mm *onuMetricsManager) removeIfFound(slice []string, n string) []string {
+ for i, ele := range slice {
+ if ele == n {
+ return append(slice[:i], slice[i+1:]...)
+ }
+ }
+ return slice
}
diff --git a/internal/pkg/onuadaptercore/openonu.go b/internal/pkg/onuadaptercore/openonu.go
index 7bc9936..3a2780a 100644
--- a/internal/pkg/onuadaptercore/openonu.go
+++ b/internal/pkg/onuadaptercore/openonu.go
@@ -377,6 +377,7 @@
logger.Infow(ctx, "delete-device", log.Fields{"device-id": device.Id, "SerialNumber": device.SerialNumber})
if handler := oo.getDeviceHandler(ctx, device.Id, false); handler != nil {
err := handler.deleteDevicePersistencyData(ctx)
+ handler.stopCollector <- true // stop the metric collector routine
//don't leave any garbage - even in error case
oo.deleteDeviceHandlerToMap(handler)
return err