Fixes for stricter sca rules
Change-Id: I027796c040009ec21d9864b1868757993d47cb35
diff --git a/rw_core/core/api/grpc_nbi_handler_test.go b/rw_core/core/api/grpc_nbi_handler_test.go
index 8d9cb16..9ad3926 100755
--- a/rw_core/core/api/grpc_nbi_handler_test.go
+++ b/rw_core/core/api/grpc_nbi_handler_test.go
@@ -1140,8 +1140,12 @@
}
// Wait for logical device to have the flows (or none
var vlFunction isLogicalDevicesConditionSatisfied = func(lds *voltha.LogicalDevices) bool {
- flows, _ := nbi.ListLogicalDeviceFlows(getContext(), &voltha.ID{Id: lds.Items[0].Id})
- return lds != nil && len(lds.Items) == 1 && len(flows.Items) == expectedNumFlows
+ id := ""
+ if lds != nil {
+ id = lds.Items[0].Id
+ }
+ flws, _ := nbi.ListLogicalDeviceFlows(getContext(), &voltha.ID{Id: id})
+ return lds != nil && len(lds.Items) == 1 && len(flws.Items) == expectedNumFlows
}
// No timeout implies a success
err := waitUntilConditionForLogicalDevices(nb.maxTimeout, nbi, vlFunction)
@@ -1430,7 +1434,12 @@
if err != nil {
logger.Fatalf(ctx, "could not create CPU profile: %v\n ", err)
}
- defer f.Close()
+ defer func() {
+ err = f.Close()
+ if err != nil {
+ logger.Errorf(ctx, "failed to close file: %v\n", err)
+ }
+ }()
runtime.SetBlockProfileRate(1)
runtime.SetMutexProfileFraction(-1)
if err := pprof.StartCPUProfile(f); err != nil {
diff --git a/rw_core/core/device/agent_flow.go b/rw_core/core/device/agent_flow.go
index 4589b91..3b04c02 100644
--- a/rw_core/core/device/agent_flow.go
+++ b/rw_core/core/device/agent_flow.go
@@ -231,7 +231,7 @@
return coreutils.DoneResponse(), status.Errorf(codes.Aborted, "%s", err)
}
if device.OperStatus != voltha.OperStatus_ACTIVE || device.ConnectStatus != voltha.ConnectStatus_REACHABLE || device.AdminState != voltha.AdminState_ENABLED {
- desc = fmt.Sprint("invalid device states")
+ desc = "invalid device states"
agent.logDeviceUpdate(ctx, "updateFlowsToAdapter", nil, nil, operStatus, &desc)
return coreutils.DoneResponse(), status.Errorf(codes.FailedPrecondition, "invalid device states")
}
diff --git a/rw_core/core/device/agent_image.go b/rw_core/core/device/agent_image.go
index 74a60c7..075e2bf 100644
--- a/rw_core/core/device/agent_image.go
+++ b/rw_core/core/device/agent_image.go
@@ -64,7 +64,7 @@
cloned := agent.cloneDeviceWithoutLock()
_, index, err := getImage(img, device)
if err != nil {
- cloned.ImageDownloads = append(device.ImageDownloads, clonedImg)
+ cloned.ImageDownloads = append(cloned.ImageDownloads, clonedImg)
} else {
cloned.ImageDownloads[index] = clonedImg
}
diff --git a/rw_core/core/device/event/event.go b/rw_core/core/device/event/event.go
index 00e08d2..2eeb70a 100644
--- a/rw_core/core/device/event/event.go
+++ b/rw_core/core/device/event/event.go
@@ -100,13 +100,13 @@
func (q *Manager) flushFailedPackets(ctx context.Context, tracker *callTracker) error {
if tracker.failedPacket != nil {
- switch tracker.failedPacket.(type) {
+ switch failedPacket := tracker.failedPacket.(type) {
case openflow_13.PacketIn:
logger.Debug(ctx, "enqueueing-last-failed-packet-in")
- q.packetInQueue <- tracker.failedPacket.(openflow_13.PacketIn)
+ q.packetInQueue <- failedPacket
case openflow_13.ChangeEvent:
logger.Debug(ctx, "enqueueing-last-failed-change-event")
- q.changeEventQueue <- tracker.failedPacket.(openflow_13.ChangeEvent)
+ q.changeEventQueue <- failedPacket
}
}
return nil
@@ -138,11 +138,9 @@
nil, time.Now().Unix())
// save the last failed packet in
streamingTracker.failedPacket = packet
- } else {
- if streamingTracker.failedPacket != nil {
- // reset last failed packet saved to avoid flush
- streamingTracker.failedPacket = nil
- }
+ } else if streamingTracker.failedPacket != nil {
+ // reset last failed packet saved to avoid flush
+ streamingTracker.failedPacket = nil
}
case <-q.packetInQueueDone:
logger.Debug(ctx, "another-receive-packets-in-running-bailing-out")
@@ -228,11 +226,9 @@
time.Now().Unix())
// save last failed change event
streamingTracker.failedPacket = event
- } else {
- if streamingTracker.failedPacket != nil {
- // reset last failed event saved on success to avoid flushing
- streamingTracker.failedPacket = nil
- }
+ } else if streamingTracker.failedPacket != nil {
+ // reset last failed event saved on success to avoid flushing
+ streamingTracker.failedPacket = nil
}
case <-q.changeEventQueueDone:
logger.Debug(ctx, "another-receive-change-events-already-running-bailing-out")
diff --git a/rw_core/core/device/flow/loader_test.go b/rw_core/core/device/flow/loader_test.go
index 958124d..536a447 100644
--- a/rw_core/core/device/flow/loader_test.go
+++ b/rw_core/core/device/flow/loader_test.go
@@ -18,8 +18,10 @@
import (
"bufio"
+ "context"
"fmt"
"os"
+ "path/filepath"
"regexp"
"strconv"
"testing"
@@ -59,17 +61,27 @@
}
func compare(regexesA, regexesB []*regexp.Regexp, fileNameA, fileNameB string) error {
- fileA, err := os.Open(fileNameA)
+ fileA, err := os.Open(filepath.Clean(fileNameA))
if err != nil {
return err
}
- defer fileA.Close()
+ defer func() {
+ err := fileA.Close()
+ if err != nil {
+ logger.Errorf(context.Background(), "failed to close file: %v", err)
+ }
+ }()
- fileB, err := os.Open(fileNameB)
+ fileB, err := os.Open(filepath.Clean(fileNameB))
if err != nil {
return err
}
- defer fileB.Close()
+ defer func() {
+ err := fileB.Close()
+ if err != nil {
+ logger.Errorf(context.Background(), "failed to close file: %v", err)
+ }
+ }()
scannerA, scannerB := bufio.NewScanner(fileA), bufio.NewScanner(fileB)
diff --git a/rw_core/core/device/logical_agent_meter.go b/rw_core/core/device/logical_agent_meter.go
index aad005f..f3a1c89 100644
--- a/rw_core/core/device/logical_agent_meter.go
+++ b/rw_core/core/device/logical_agent_meter.go
@@ -59,10 +59,11 @@
}
func (agent *LogicalAgent) meterAdd(ctx context.Context, meterMod *ofp.OfpMeterMod) error {
- logger.Debugw(ctx, "meterAdd", log.Fields{"metermod": *meterMod})
if meterMod == nil {
+ logger.Errorw(ctx, "failed-meterAdd-meterMod-is-nil", log.Fields{"logical-device-id": agent.logicalDeviceID})
return nil
}
+ logger.Debugw(ctx, "meterAdd", log.Fields{"metermod": *meterMod, "logical-device-id": agent.logicalDeviceID})
meterEntry := fu.MeterEntryFromMeterMod(ctx, meterMod)
@@ -73,22 +74,23 @@
defer meterHandle.Unlock()
if created {
- logger.Debugw(ctx, "Meter-added-successfully", log.Fields{"Added-meter": meterEntry})
+ logger.Debugw(ctx, "Meter-added-successfully", log.Fields{"Added-meter": meterEntry, "logical-device-id": agent.logicalDeviceID})
} else {
- logger.Infow(ctx, "Meter-already-exists", log.Fields{"meter": *meterMod})
+ logger.Infow(ctx, "Meter-already-exists", log.Fields{"meter": *meterMod, "logical-device-id": agent.logicalDeviceID})
}
return nil
}
func (agent *LogicalAgent) meterDelete(ctx context.Context, meterMod *ofp.OfpMeterMod) error {
- logger.Debug(ctx, "meterDelete", log.Fields{"meterMod": *meterMod})
if meterMod == nil {
+ logger.Errorw(ctx, "failed-meterDelete-meterMod-is-nil", log.Fields{"logical-device-id": agent.logicalDeviceID})
return nil
}
+ logger.Debug(ctx, "meterDelete", log.Fields{"meterMod": *meterMod, "logical-device-id": agent.logicalDeviceID})
meterHandle, have := agent.meterLoader.Lock(meterMod.MeterId)
if !have {
- logger.Warnw(ctx, "meter-not-found", log.Fields{"meterID": meterMod.MeterId})
+ logger.Warnw(ctx, "meter-not-found", log.Fields{"meterID": meterMod.MeterId, "logical-device-id": agent.logicalDeviceID})
return nil
}
defer meterHandle.Unlock()
@@ -103,19 +105,20 @@
return err
}
- logger.Debugw(ctx, "meterDelete-success", log.Fields{"meterID": meterMod.MeterId})
+ logger.Debugw(ctx, "meterDelete-success", log.Fields{"meterID": meterMod.MeterId, "logical-device-id": agent.logicalDeviceID})
return nil
}
func (agent *LogicalAgent) meterModify(ctx context.Context, meterMod *ofp.OfpMeterMod) error {
logger.Debug(ctx, "meterModify")
if meterMod == nil {
+ logger.Errorw(ctx, "failed-meterModify-meterMod-is-nil", log.Fields{"logical-device-id": agent.logicalDeviceID})
return nil
}
meterHandle, have := agent.meterLoader.Lock(meterMod.MeterId)
if !have {
- return fmt.Errorf("no-meter-to-modify: %d", meterMod.MeterId)
+ return fmt.Errorf("no-meter-to-modify: %d, logical-device-id: %s", meterMod.MeterId, agent.logicalDeviceID)
}
defer meterHandle.Unlock()
@@ -126,6 +129,6 @@
if err := meterHandle.Update(ctx, newMeter); err != nil {
return err
}
- logger.Debugw(ctx, "replaced-with-new-meter", log.Fields{"oldMeter": oldMeter, "newMeter": newMeter})
+ logger.Debugw(ctx, "replaced-with-new-meter", log.Fields{"oldMeter": oldMeter, "newMeter": newMeter, "logical-device-id": agent.logicalDeviceID})
return nil
}
diff --git a/rw_core/core/device/logical_agent_port.go b/rw_core/core/device/logical_agent_port.go
index 05a5108..c82ada0 100644
--- a/rw_core/core/device/logical_agent_port.go
+++ b/rw_core/core/device/logical_agent_port.go
@@ -172,10 +172,10 @@
newPort.OfpPort = &newOfpPort
if state == voltha.OperStatus_ACTIVE {
- newOfpPort.Config = newOfpPort.Config & ^uint32(ofp.OfpPortConfig_OFPPC_PORT_DOWN)
+ newOfpPort.Config &= ^uint32(ofp.OfpPortConfig_OFPPC_PORT_DOWN)
newOfpPort.State = uint32(ofp.OfpPortState_OFPPS_LIVE)
} else {
- newOfpPort.Config = newOfpPort.Config | uint32(ofp.OfpPortConfig_OFPPC_PORT_DOWN)
+ newOfpPort.Config |= uint32(ofp.OfpPortConfig_OFPPC_PORT_DOWN)
newOfpPort.State = uint32(ofp.OfpPortState_OFPPS_LINK_DOWN)
}
return &newPort
@@ -272,7 +272,7 @@
newOfpPort := *oldPort.OfpPort
newPort.OfpPort = &newOfpPort
- newOfpPort.Config = newOfpPort.Config & ^uint32(ofp.OfpPortConfig_OFPPC_PORT_DOWN)
+ newOfpPort.Config &= ^uint32(ofp.OfpPortConfig_OFPPC_PORT_DOWN)
if err := portHandle.Update(ctx, &newPort); err != nil {
return err
}
diff --git a/rw_core/core/device/manager.go b/rw_core/core/device/manager.go
index 9982611..b3fa7b7 100755
--- a/rw_core/core/device/manager.go
+++ b/rw_core/core/device/manager.go
@@ -612,12 +612,11 @@
return err
}
logger.Debugw(ctx, "successfully-loaded-parent-and-children", log.Fields{"device-id": deviceID})
- } else {
+ } else if device.ParentId != "" {
// Scenario B - use the parentId of that device (root device) to trigger the loading
- if device.ParentId != "" {
- return dMgr.load(ctx, device.ParentId)
- }
+ return dMgr.load(ctx, device.ParentId)
}
+
return nil
}
@@ -633,7 +632,13 @@
// trigger loading the devices along with their children and parent in memory
func (dMgr *Manager) ReconcileDevices(ctx context.Context, ids *voltha.IDs) (*empty.Empty, error) {
ctx = utils.WithRPCMetadataContext(ctx, "ReconcileDevices")
- logger.Debugw(ctx, "reconcile-devices", log.Fields{"num-devices": len(ids.Items)})
+
+ numDevices := 0
+ if ids != nil {
+ numDevices = len(ids.Items)
+ }
+
+ logger.Debugw(ctx, "reconcile-devices", log.Fields{"num-devices": numDevices})
if ids != nil && len(ids.Items) != 0 {
toReconcile := len(ids.Items)
reconciled := 0
diff --git a/rw_core/core/device/state/transitions.go b/rw_core/core/device/state/transitions.go
index c3bd372..103697f 100644
--- a/rw_core/core/device/state/transitions.go
+++ b/rw_core/core/device/state/transitions.go
@@ -431,14 +431,14 @@
var tempHandler []transitionHandler
var m *match
bestMatch := &match{}
- for _, aTransition := range tMap.transitions {
+ for i := range tMap.transitions {
// consider transition only if it matches deviceType or is a wild card - any
- if aTransition.deviceType != deviceType && aTransition.deviceType != any {
+ if tMap.transitions[i].deviceType != deviceType && tMap.transitions[i].deviceType != any {
continue
}
- tempHandler, m = getHandler(pState, cState, &aTransition)
+ tempHandler, m = getHandler(pState, cState, &tMap.transitions[i])
if tempHandler != nil {
- if m.isExactMatch() && aTransition.deviceType == deviceType {
+ if m.isExactMatch() && tMap.transitions[i].deviceType == deviceType {
return tempHandler
} else if m.isExactMatch() || m.isBetterMatch(bestMatch) {
currentMatch = tempHandler
diff --git a/rw_core/core/kv.go b/rw_core/core/kv.go
index 84f707b..631e82a 100644
--- a/rw_core/core/kv.go
+++ b/rw_core/core/kv.go
@@ -31,8 +31,7 @@
func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
- switch storeType {
- case "etcd":
+ if storeType == "etcd" {
return kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)
}
return nil, errors.New("unsupported-kv-store")