[VOL-2471] Update library to use package logger
This commit consists of the following:
1) Add a GetLogLevel() API to make it easier to use specific
logger. There is also the V() API that kind of do something
similar.
2) Add a common.go file to some heavily used packages in order
to dynamically set their log level and also to a set a specific
logger per package.
3) Use a per package logger for some of the heavily used packages
for improved performance.
Change-Id: If22a2c82d87d808f305677a2e793f8064f33291e
diff --git a/VERSION b/VERSION
index ef0bec0..9945222 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.2.27
+2.2.28
diff --git a/pkg/adapters/common/adapter_proxy.go b/pkg/adapters/common/adapter_proxy.go
index 7b09a1f..37015ad 100644
--- a/pkg/adapters/common/adapter_proxy.go
+++ b/pkg/adapters/common/adapter_proxy.go
@@ -38,7 +38,7 @@
proxy.kafkaICProxy = kafkaProxy
proxy.adapterTopic = adapterTopic
proxy.coreTopic = coreTopic
- log.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+ logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
return &proxy
}
@@ -50,14 +50,14 @@
toDeviceId string,
proxyDeviceId string,
messageId string) error {
- log.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+ logger.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
//Marshal the message
var marshalledMsg *any.Any
var err error
if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
- log.Warnw("cannot-marshal-msg", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-msg", log.Fields{"error": err})
return err
}
@@ -91,6 +91,6 @@
rpc := "process_inter_adapter_message"
success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
- log.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+ logger.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
return unPackResponse(rpc, "", success, result)
}
diff --git a/pkg/adapters/common/common.go b/pkg/adapters/common/common.go
new file mode 100644
index 0000000..c4b07c2
--- /dev/null
+++ b/pkg/adapters/common/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
+)
+
+const (
+ logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "common"})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/adapters/common/core_proxy.go b/pkg/adapters/common/core_proxy.go
index a69d9b4..0b8a023 100644
--- a/pkg/adapters/common/core_proxy.go
+++ b/pkg/adapters/common/core_proxy.go
@@ -44,7 +44,7 @@
proxy.coreTopic = coreTopic
proxy.deviceIdCoreMap = make(map[string]string)
proxy.lockDeviceIdCoreMap = sync.RWMutex{}
- log.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+ logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
return &proxy
}
@@ -56,9 +56,9 @@
unpackResult := &ic.Error{}
var err error
if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
}
- log.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
+ logger.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
// TODO: Need to get the real error code
return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
}
@@ -94,7 +94,7 @@
}
func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
- log.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+ logger.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
rpc := "Register"
topic := kafka.Topic{Name: ap.coreTopic}
replyToTopic := ap.getAdapterTopic()
@@ -109,12 +109,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
- log.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+ logger.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
return unPackResponse(rpc, "", success, result)
}
func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
- log.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
+ logger.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
rpc := "DeviceUpdate"
toTopic := ap.getCoreTopic(device.Id)
args := make([]*kafka.KVArg, 1)
@@ -125,12 +125,12 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, device.Id, args...)
- log.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
+ logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
return unPackResponse(rpc, device.Id, success, result)
}
func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
- log.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
+ logger.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
rpc := "PortCreated"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -149,12 +149,12 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
+ logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_OperStatus) error {
- log.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
+ logger.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
rpc := "PortsStateUpdate"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -175,12 +175,12 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+ logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
- log.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
+ logger.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
rpc := "DeleteAllPorts"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -196,13 +196,13 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+ logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
connStatus voltha.ConnectStatus_ConnectStatus, operStatus voltha.OperStatus_OperStatus) error {
- log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
+ logger.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
rpc := "DeviceStateUpdate"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -227,13 +227,13 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+ logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
- log.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
+ logger.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
rpc := "ChildDeviceDetected"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -278,12 +278,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
volthaDevice := &voltha.Device{}
if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
return volthaDevice, nil
@@ -291,9 +291,9 @@
unpackResult := &ic.Error{}
var err error
if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
}
- log.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+ logger.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
// TODO: Need to get the real error code
return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
}
@@ -301,7 +301,7 @@
}
func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
- log.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+ logger.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
rpc := "ChildDevicesLost"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -316,12 +316,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
return unPackResponse(rpc, parentDeviceId, success, result)
}
func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
- log.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+ logger.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
rpc := "ChildDevicesDetected"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -336,12 +336,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
return unPackResponse(rpc, parentDeviceId, success, result)
}
func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
- log.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
+ logger.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
rpc := "GetDevice"
toTopic := ap.getCoreTopic(parentDeviceId)
@@ -355,12 +355,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
volthaDevice := &voltha.Device{}
if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
return volthaDevice, nil
@@ -368,16 +368,16 @@
unpackResult := &ic.Error{}
var err error
if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
}
- log.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+ logger.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
// TODO: Need to get the real error code
return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
}
}
func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
- log.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
+ logger.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
rpc := "GetChildDevice"
toTopic := ap.getCoreTopic(parentDeviceId)
@@ -415,12 +415,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
volthaDevice := &voltha.Device{}
if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
return volthaDevice, nil
@@ -428,16 +428,16 @@
unpackResult := &ic.Error{}
var err error
if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
}
- log.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+ logger.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
// TODO: Need to get the real error code
return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
}
}
func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
- log.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+ logger.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
rpc := "GetChildDevices"
toTopic := ap.getCoreTopic(parentDeviceId)
@@ -451,12 +451,12 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
volthaDevices := &voltha.Devices{}
if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
return volthaDevices, nil
@@ -464,16 +464,16 @@
unpackResult := &ic.Error{}
var err error
if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
}
- log.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+ logger.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
// TODO: Need to get the real error code
return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
}
}
func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
- log.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+ logger.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
rpc := "PacketIn"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -497,12 +497,12 @@
Value: pkt,
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
+ logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
- log.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
+ logger.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
rpc := "DeviceReasonUpdate"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -521,12 +521,12 @@
Value: reason,
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
+ logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
- log.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+ logger.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
rpc := "DevicePMConfigUpdate"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -539,12 +539,12 @@
Value: pmConfigs,
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
- log.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
+ logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
return unPackResponse(rpc, pmConfigs.Id, success, result)
}
func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
- log.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+ logger.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
rpc := "ReconcileChildDevices"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -556,13 +556,13 @@
}
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
- log.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+ logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
return unPackResponse(rpc, parentDeviceId, success, result)
}
func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
operStatus voltha.OperStatus_OperStatus) error {
- log.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+ logger.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
rpc := "PortStateUpdate"
// Use a device specific topic to send the request. The adapter handling the device creates a device
// specific topic
@@ -593,6 +593,6 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
- log.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+ logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(rpc, deviceId, success, result)
}
diff --git a/pkg/adapters/common/events_proxy.go b/pkg/adapters/common/events_proxy.go
index ab6b0d0..ebc9423 100644
--- a/pkg/adapters/common/events_proxy.go
+++ b/pkg/adapters/common/events_proxy.go
@@ -81,7 +81,7 @@
/* Send out device events*/
func (ep *EventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
if deviceEvent == nil {
- log.Error("Recieved empty device event")
+ logger.Error("Recieved empty device event")
return errors.New("Device event nil")
}
var event voltha.Event
@@ -90,10 +90,10 @@
event.Header = ep.getEventHeader(deviceEvent.DeviceEventName, category, subCategory, voltha.EventType_DEVICE_EVENT, raisedTs)
event.EventType = &de
if err := ep.sendEvent(&event); err != nil {
- log.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+ logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
return err
}
- log.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+ logger.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
"DeviceEventName": deviceEvent.DeviceEventName})
@@ -105,7 +105,7 @@
// SendKpiEvent is to send kpi events to voltha.event topic
func (ep *EventProxy) SendKpiEvent(id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
if kpiEvent == nil {
- log.Error("Recieved empty kpi event")
+ logger.Error("Recieved empty kpi event")
return errors.New("KPI event nil")
}
var event voltha.Event
@@ -114,10 +114,10 @@
event.Header = ep.getEventHeader(id, category, subCategory, voltha.EventType_KPI_EVENT2, raisedTs)
event.EventType = &de
if err := ep.sendEvent(&event); err != nil {
- log.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+ logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
return err
}
- log.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+ logger.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
@@ -131,7 +131,7 @@
if err := ep.kafkaClient.Send(event, &ep.eventTopic); err != nil {
return err
}
- log.Debugw("Sent event to kafka", log.Fields{"event": event})
+ logger.Debugw("Sent event to kafka", log.Fields{"event": event})
return nil
}
diff --git a/pkg/adapters/common/request_handler.go b/pkg/adapters/common/request_handler.go
index dfcaf1e..cc84ef7 100644
--- a/pkg/adapters/common/request_handler.go
+++ b/pkg/adapters/common/request_handler.go
@@ -59,7 +59,7 @@
func (rhp *RequestHandlerProxy) Adopt_device(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -70,23 +70,23 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
case kafka.FromTopic:
if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
- log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
+ logger.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
//Update the core reference for that device
rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
@@ -101,7 +101,7 @@
func (rhp *RequestHandlerProxy) Reconcile_device(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -113,17 +113,17 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
case kafka.FromTopic:
if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
- log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
return nil, err
}
}
@@ -144,7 +144,7 @@
func (rhp *RequestHandlerProxy) Disable_device(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -156,17 +156,17 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
case kafka.FromTopic:
if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
- log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
return nil, err
}
}
@@ -182,7 +182,7 @@
func (rhp *RequestHandlerProxy) Reenable_device(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -194,17 +194,17 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
case kafka.FromTopic:
if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
- log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
return nil, err
}
}
@@ -220,7 +220,7 @@
func (rhp *RequestHandlerProxy) Reboot_device(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -232,17 +232,17 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
case kafka.FromTopic:
if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
- log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
return nil, err
}
}
@@ -263,7 +263,7 @@
func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -275,17 +275,17 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
case kafka.FromTopic:
if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
- log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
return nil, err
}
}
@@ -304,9 +304,9 @@
}
func (rhp *RequestHandlerProxy) Update_flows_bulk(args []*ic.Argument) (*empty.Empty, error) {
- log.Debug("Update_flows_bulk")
+ logger.Debug("Update_flows_bulk")
if len(args) < 5 {
- log.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -319,32 +319,32 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case "flows":
if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
- log.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
return nil, err
}
case "groups":
if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
- log.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
return nil, err
}
case "flow_metadata":
if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
- log.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+ logger.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
//Invoke the bulk flow update API of the adapter
if err := rhp.adapter.Update_flows_bulk(device, flows, groups, flowMetadata); err != nil {
return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -353,9 +353,9 @@
}
func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
- log.Debug("Update_flows_incrementally")
+ logger.Debug("Update_flows_incrementally")
if len(args) < 5 {
- log.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -368,32 +368,32 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case "flow_changes":
if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
- log.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
return nil, err
}
case "group_changes":
if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
- log.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
return nil, err
}
case "flow_metadata":
if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
- log.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+ logger.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
//Invoke the incremental flow update API of the adapter
if err := rhp.adapter.Update_flows_incrementally(device, flows, groups, flowMetadata); err != nil {
return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -402,9 +402,9 @@
}
func (rhp *RequestHandlerProxy) Update_pm_config(args []*ic.Argument) (*empty.Empty, error) {
- log.Debug("Update_pm_config")
+ logger.Debug("Update_pm_config")
if len(args) < 2 {
- log.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -415,22 +415,22 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case "pm_configs":
if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
- log.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
+ logger.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
//Invoke the pm config update API of the adapter
if err := rhp.adapter.Update_pm_config(device, pmConfigs); err != nil {
return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -439,9 +439,9 @@
}
func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
- log.Debugw("Receive_packet_out", log.Fields{"args": args})
+ logger.Debugw("Receive_packet_out", log.Fields{"args": args})
if len(args) < 3 {
- log.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -453,27 +453,27 @@
switch arg.Key {
case "deviceId":
if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
- log.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
return nil, err
}
case "outPort":
if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
- log.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
return nil, err
}
case "packet":
if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
- log.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+ logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
//Invoke the adopt device on the adapter
if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -491,7 +491,7 @@
func (rhp *RequestHandlerProxy) Get_ofp_device_info(args []*ic.Argument) (*ic.SwitchCapability, error) {
if len(args) < 2 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -501,31 +501,31 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+ logger.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
var cap *ic.SwitchCapability
var err error
if cap, err = rhp.adapter.Get_ofp_device_info(device); err != nil {
return nil, status.Errorf(codes.NotFound, "%s", err.Error())
}
- log.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
+ logger.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
return cap, nil
}
func (rhp *RequestHandlerProxy) Get_ofp_port_info(args []*ic.Argument) (*ic.PortCapability, error) {
if len(args) < 3 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -536,22 +536,22 @@
switch arg.Key {
case "device":
if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case "port_no":
if err := ptypes.UnmarshalAny(arg.Value, pNo); err != nil {
- log.Warnw("cannot-unmarshal-port-no", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-port-no", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Get_ofp_port_info", log.Fields{"deviceId": device.Id, "portNo": pNo.Val})
+ logger.Debugw("Get_ofp_port_info", log.Fields{"deviceId": device.Id, "portNo": pNo.Val})
var cap *ic.PortCapability
var err error
if cap, err = rhp.adapter.Get_ofp_port_info(device, pNo.Val); err != nil {
@@ -562,7 +562,7 @@
func (rhp *RequestHandlerProxy) Process_inter_adapter_message(args []*ic.Argument) (*empty.Empty, error) {
if len(args) < 2 {
- log.Warn("invalid-number-of-args", log.Fields{"args": args})
+ logger.Warn("invalid-number-of-args", log.Fields{"args": args})
err := errors.New("invalid-number-of-args")
return nil, err
}
@@ -572,18 +572,18 @@
switch arg.Key {
case "msg":
if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
- log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
return nil, err
}
case kafka.TransactionKey:
if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
- log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
return nil, err
}
}
}
- log.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+ logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
//Invoke the inter adapter API on the handler
if err := rhp.adapter.Process_inter_adapter_message(iaMsg); err != nil {
diff --git a/pkg/db/backend.go b/pkg/db/backend.go
index b2547c2..b807253 100644
--- a/pkg/db/backend.go
+++ b/pkg/db/backend.go
@@ -65,7 +65,7 @@
address := host + ":" + strconv.Itoa(port)
if b.Client, err = b.newClient(address, timeout); err != nil {
- log.Errorw("failed-to-create-kv-client",
+ logger.Errorw("failed-to-create-kv-client",
log.Fields{
"type": storeType, "host": host, "port": port,
"timeout": timeout, "prefix": pathPrefix,
@@ -99,11 +99,11 @@
if b.liveness != nil {
if b.alive != alive {
- log.Debug("update-liveness-channel-reason-change")
+ logger.Debug("update-liveness-channel-reason-change")
b.liveness <- alive
b.lastLivenessTime = time.Now()
} else if time.Now().Sub(b.lastLivenessTime) > b.LivenessChannelInterval {
- log.Debug("update-liveness-channel-reason-interval")
+ logger.Debug("update-liveness-channel-reason-interval")
b.liveness <- alive
b.lastLivenessTime = time.Now()
}
@@ -111,7 +111,7 @@
// Emit log message only for alive state change
if b.alive != alive {
- log.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+ logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
b.alive = alive
}
}
@@ -120,7 +120,7 @@
// post on Liveness channel
func (b *Backend) PerformLivenessCheck(timeout int) bool {
alive := b.Client.IsConnectionUp(timeout)
- log.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+ logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
b.updateLiveness(alive)
return alive
@@ -132,10 +132,10 @@
// by the service (i.e. rw_core / ro_core) to update readiness status
// and/or take other actions.
func (b *Backend) EnableLivenessChannel() chan bool {
- log.Debug("enable-kvstore-liveness-channel")
+ logger.Debug("enable-kvstore-liveness-channel")
if b.liveness == nil {
- log.Debug("create-kvstore-liveness-channel")
+ logger.Debug("create-kvstore-liveness-channel")
// Channel size of 10 to avoid any possibility of blocking in Load conditions
b.liveness = make(chan bool, 10)
@@ -191,7 +191,7 @@
defer b.Unlock()
formattedPath := b.makePath(key)
- log.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+ logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
pair, err := b.Client.List(formattedPath, b.Timeout)
@@ -206,7 +206,7 @@
defer b.Unlock()
formattedPath := b.makePath(key)
- log.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+ logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
pair, err := b.Client.Get(formattedPath, b.Timeout)
@@ -221,7 +221,7 @@
defer b.Unlock()
formattedPath := b.makePath(key)
- log.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath})
+ logger.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath})
err := b.Client.Put(formattedPath, value, b.Timeout)
@@ -236,7 +236,7 @@
defer b.Unlock()
formattedPath := b.makePath(key)
- log.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+ logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
err := b.Client.Delete(formattedPath, b.Timeout)
@@ -251,7 +251,7 @@
defer b.Unlock()
formattedPath := b.makePath(key)
- log.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+ logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
return b.Client.Watch(formattedPath)
}
@@ -262,7 +262,7 @@
defer b.Unlock()
formattedPath := b.makePath(key)
- log.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+ logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
b.Client.CloseWatch(formattedPath, ch)
}
diff --git a/pkg/db/backend_test.go b/pkg/db/backend_test.go
index 6d2fb8a..fdd9071 100644
--- a/pkg/db/backend_test.go
+++ b/pkg/db/backend_test.go
@@ -18,7 +18,6 @@
import (
"context"
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
"github.com/opencord/voltha-lib-go/v2/pkg/mocks"
"github.com/phayes/freeport"
"github.com/stretchr/testify/assert"
@@ -29,10 +28,6 @@
"time"
)
-func init() {
- log.AddPackage(log.JSON, log.FatalLevel, nil)
-}
-
const (
embedEtcdServerHost = "localhost"
defaultTimeout = 1
@@ -48,15 +43,15 @@
var err error
embedEtcdServerPort, err = freeport.GetFreePort()
if err != nil {
- log.Fatal(err)
+ logger.Fatal(err)
}
dummyEtcdServerPort, err = freeport.GetFreePort()
if err != nil {
- log.Fatal(err)
+ logger.Fatal(err)
}
peerPort, err := freeport.GetFreePort()
if err != nil {
- log.Fatal(err)
+ logger.Fatal(err)
}
etcdServer := mocks.StartEtcdServer(mocks.MKConfig("voltha.db.test", embedEtcdServerPort, peerPort, "voltha.lib.db", "error"))
res := m.Run()
diff --git a/pkg/db/common.go b/pkg/db/common.go
index 0851ede..d6cea42 100644
--- a/pkg/db/common.go
+++ b/pkg/db/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,14 +20,15 @@
)
const (
- logLevel = log.FatalLevel
+ logLevel = log.ErrorLevel
)
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
func init() {
// Setup this package so that it's log level can be modified at run time
- _, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+ var err error
+ logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
if err != nil {
panic(err)
}
diff --git a/pkg/db/kvstore/client.go b/pkg/db/kvstore/client.go
index 97fbec9..088593a 100644
--- a/pkg/db/kvstore/client.go
+++ b/pkg/db/kvstore/client.go
@@ -15,10 +15,6 @@
*/
package kvstore
-import (
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
-)
-
const (
// Default timeout in seconds when making a kvstore request
defaultKVGetTimeout = 5
@@ -43,10 +39,6 @@
Lease int64
}
-func init() {
- log.AddPackage(log.JSON, log.WarnLevel, nil)
-}
-
// NewKVPair creates a new KVPair object
func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
kv := new(KVPair)
diff --git a/pkg/db/kvstore/common.go b/pkg/db/kvstore/common.go
new file mode 100644
index 0000000..212b1d1
--- /dev/null
+++ b/pkg/db/kvstore/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
+)
+
+const (
+ logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kvstore"})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/db/kvstore/consulclient.go b/pkg/db/kvstore/consulclient.go
index a94de4d..fd74d43 100644
--- a/pkg/db/kvstore/consulclient.go
+++ b/pkg/db/kvstore/consulclient.go
@@ -19,7 +19,7 @@
"bytes"
"context"
"errors"
- log "github.com/opencord/voltha-lib-go/v2/pkg/log"
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
"sync"
"time"
//log "ciena.com/coordinator/common"
@@ -53,7 +53,7 @@
config.WaitTime = duration
consul, err := consulapi.NewClient(config)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
@@ -65,7 +65,7 @@
// IsConnectionUp returns whether the connection to the Consul KV store is up
func (c *ConsulClient) IsConnectionUp(timeout int) bool {
- log.Error("Unimplemented function")
+ logger.Error("Unimplemented function")
return false
}
@@ -80,7 +80,7 @@
// For now we ignore meta data
kvps, _, err := kv.List(key, &queryOptions)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
m := make(map[string]*KVPair)
@@ -102,7 +102,7 @@
// For now we ignore meta data
kvp, _, err := kv.Get(key, &queryOptions)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
if kvp != nil {
@@ -121,7 +121,7 @@
var val []byte
var er error
if val, er = ToByte(value); er != nil {
- log.Error(er)
+ logger.Error(er)
return er
}
@@ -133,7 +133,7 @@
defer c.writeLock.Unlock()
_, err := kv.Put(&kvp, &writeOptions)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return err
}
return nil
@@ -148,7 +148,7 @@
defer c.writeLock.Unlock()
_, err := kv.Delete(key, &writeOptions)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return err
}
return nil
@@ -156,11 +156,11 @@
func (c *ConsulClient) deleteSession() {
if c.sessionID != "" {
- log.Debug("cleaning-up-session")
+ logger.Debug("cleaning-up-session")
session := c.consul.Session()
_, err := session.Destroy(c.sessionID, nil)
if err != nil {
- log.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+ logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
}
}
c.sessionID = ""
@@ -177,17 +177,17 @@
for {
id, meta, err := session.Create(entry, nil)
if err != nil {
- log.Errorw("create-session-error", log.Fields{"error": err})
+ logger.Errorw("create-session-error", log.Fields{"error": err})
if retries == 0 {
return nil, "", err
}
} else if meta.RequestTime == 0 {
- log.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
+ logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
if retries == 0 {
return nil, "", errors.New("bad-meta-data")
}
} else if id == "" {
- log.Error("create-session-nil-id")
+ logger.Error("create-session-nil-id")
if retries == 0 {
return nil, "", errors.New("ID-nil")
}
@@ -198,7 +198,7 @@
if retries > 0 {
retries--
}
- log.Debug("retrying-session-create-after-a-second-delay")
+ logger.Debug("retrying-session-create-after-a-second-delay")
time.Sleep(time.Duration(1) * time.Second)
}
}
@@ -225,7 +225,7 @@
var val []byte
var er error
if val, er = ToByte(value); er != nil {
- log.Error(er)
+ logger.Error(er)
return nil, er
}
@@ -238,17 +238,17 @@
reservationSuccessful := false
defer func() {
if !reservationSuccessful {
- log.Debug("deleting-session")
+ logger.Debug("deleting-session")
c.deleteSession()
}
}()
session, sessionID, err := c.createSession(ttl, -1)
if err != nil {
- log.Errorw("no-session-created", log.Fields{"error": err})
+ logger.Errorw("no-session-created", log.Fields{"error": err})
return "", errors.New("no-session-created")
}
- log.Debugw("session-created", log.Fields{"session-id": sessionID})
+ logger.Debugw("session-created", log.Fields{"session-id": sessionID})
c.sessionID = sessionID
c.session = session
@@ -257,11 +257,11 @@
kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
result, _, err := kv.Acquire(&kvp, nil)
if err != nil {
- log.Errorw("error-acquiring-keys", log.Fields{"error": err})
+ logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
return nil, err
}
- log.Debugw("key-acquired", log.Fields{"key": key, "status": result})
+ logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
m, err := c.Get(key, defaultKVGetTimeout)
@@ -269,7 +269,7 @@
return nil, err
}
if m != nil {
- log.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+ logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
if m.Key == key && isEqual(m.Value, value) {
// My reservation is successful - register it. For now, support is only for 1 reservation per key
// per session.
@@ -299,11 +299,11 @@
kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
result, _, err = kv.Release(&kvp, nil)
if err != nil {
- log.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+ logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
return err
}
if !result {
- log.Errorw("cannot-release-reservation", log.Fields{"key": key})
+ logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
}
delete(c.keyReservations, key)
}
@@ -390,14 +390,14 @@
c.writeLock.Lock()
defer c.writeLock.Unlock()
if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
- log.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
+ logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
return
}
// Look for the channels
var pos = -1
for i, chCtxMap := range watchedChannelsContexts {
if chCtxMap.channel == ch {
- log.Debug("channel-found")
+ logger.Debug("channel-found")
chCtxMap.cancel()
//close the channel
close(ch)
@@ -409,7 +409,7 @@
if pos >= 0 {
c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
}
- log.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+ logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
}
func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
@@ -430,7 +430,7 @@
}
func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
- log.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
+ logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
defer c.CloseWatch(key, ch)
duration := GetDuration(defaultKVGetTimeout)
@@ -441,7 +441,7 @@
// Get the existing value, if any
previousKVPair, meta, err := kv.Get(key, &queryOptions)
if err != nil {
- log.Debug(err)
+ logger.Debug(err)
}
lastIndex := meta.LastIndex
@@ -456,30 +456,30 @@
pair, meta, err = kv.Get(key, waitOptions)
select {
case <-watchContext.Done():
- log.Debug("done-event-received-exiting")
+ logger.Debug("done-event-received-exiting")
return
default:
if err != nil {
- log.Warnw("error-from-watch", log.Fields{"error": err})
+ logger.Warnw("error-from-watch", log.Fields{"error": err})
ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
} else {
- log.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+ logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
}
}
if err != nil {
- log.Debug(err)
+ logger.Debug(err)
// On error, block for 10 milliseconds to prevent endless loop
time.Sleep(10 * time.Millisecond)
} else if meta.LastIndex <= lastIndex {
- log.Info("no-index-change-or-negative")
+ logger.Info("no-index-change-or-negative")
} else {
- log.Debugw("update-received", log.Fields{"pair": pair})
+ logger.Debugw("update-received", log.Fields{"pair": pair})
if pair == nil {
ch <- NewEvent(DELETE, key, []byte(""), -1)
} else if !c.isKVEqual(pair, previousKVPair) {
// Push the change onto the channel if the data has changed
// For now just assume it's a PUT change
- log.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+ logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
}
previousKVPair = pair
@@ -500,7 +500,7 @@
// Clear the sessionID
if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
- log.Errorw("error-closing-client", log.Fields{"error": err})
+ logger.Errorw("error-closing-client", log.Fields{"error": err})
}
}
diff --git a/pkg/db/kvstore/etcdclient.go b/pkg/db/kvstore/etcdclient.go
index 3ae767c..234bf05 100644
--- a/pkg/db/kvstore/etcdclient.go
+++ b/pkg/db/kvstore/etcdclient.go
@@ -50,7 +50,7 @@
DialTimeout: duration,
})
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
@@ -82,7 +82,7 @@
resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
cancel()
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
m := make(map[string]*KVPair)
@@ -102,7 +102,7 @@
resp, err := c.ectdAPI.Get(ctx, key)
cancel()
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
for _, ev := range resp.Kvs {
@@ -143,13 +143,13 @@
if err != nil {
switch err {
case context.Canceled:
- log.Warnw("context-cancelled", log.Fields{"error": err})
+ logger.Warnw("context-cancelled", log.Fields{"error": err})
case context.DeadlineExceeded:
- log.Warnw("context-deadline-exceeded", log.Fields{"error": err})
+ logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
case v3rpcTypes.ErrEmptyKey:
- log.Warnw("etcd-client-error", log.Fields{"error": err})
+ logger.Warnw("etcd-client-error", log.Fields{"error": err})
default:
- log.Warnw("bad-endpoints", log.Fields{"error": err})
+ logger.Warnw("bad-endpoints", log.Fields{"error": err})
}
return err
}
@@ -171,10 +171,10 @@
// delete the key
if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
- log.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
+ logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
return err
}
- log.Debugw("key(s)-deleted", log.Fields{"key": key})
+ logger.Debugw("key(s)-deleted", log.Fields{"key": key})
return nil
}
@@ -199,7 +199,7 @@
resp, err := c.ectdAPI.Grant(ctx, ttl)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return nil, err
}
// Register the lease id
@@ -212,7 +212,7 @@
defer func() {
if !reservationSuccessful {
if err = c.ReleaseReservation(key); err != nil {
- log.Error("cannot-release-lease")
+ logger.Error("cannot-release-lease")
}
}
}()
@@ -270,7 +270,7 @@
for key, leaseID := range c.keyReservations {
_, err := c.ectdAPI.Revoke(ctx, *leaseID)
if err != nil {
- log.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+ logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
return err
}
delete(c.keyReservations, key)
@@ -281,7 +281,7 @@
// ReleaseReservation releases reservation for a specific key.
func (c *EtcdClient) ReleaseReservation(key string) error {
// Get the leaseid using the key
- log.Debugw("Release-reservation", log.Fields{"key": key})
+ logger.Debugw("Release-reservation", log.Fields{"key": key})
var ok bool
var leaseID *v3Client.LeaseID
c.writeLock.Lock()
@@ -296,7 +296,7 @@
if leaseID != nil {
_, err := c.ectdAPI.Revoke(ctx, *leaseID)
if err != nil {
- log.Error(err)
+ logger.Error(err)
return err
}
delete(c.keyReservations, key)
@@ -322,7 +322,7 @@
if leaseID != nil {
_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
if err != nil {
- log.Errorw("lease-may-have-expired", log.Fields{"error": err})
+ logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
return err
}
} else {
@@ -349,7 +349,7 @@
// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
// json format.
- log.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
+ logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
// Launch a go routine to listen for updates
go c.listenForKeyChange(channel, ch, cancel)
@@ -406,17 +406,17 @@
defer c.writeLock.Unlock()
if watchedChannels, ok = c.getChannelMaps(key); !ok {
- log.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
+ logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
return
}
// Look for the channels
var pos = -1
for i, chMap := range watchedChannels {
if t, ok := chMap[ch]; ok {
- log.Debug("channel-found")
+ logger.Debug("channel-found")
// Close the etcd watcher before the client channel. This should close the etcd channel as well
if err := t.Close(); err != nil {
- log.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+ logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
}
pos = i
break
@@ -428,11 +428,11 @@
if pos >= 0 {
channelMaps = c.removeChannelMap(key, pos)
}
- log.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+ logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
}
func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
- log.Debug("start-listening-on-channel ...")
+ logger.Debug("start-listening-on-channel ...")
defer cancel()
defer close(ch)
for resp := range channel {
@@ -440,7 +440,7 @@
ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
}
}
- log.Debug("stop-listening-on-channel ...")
+ logger.Debug("stop-listening-on-channel ...")
}
func getEventType(event *v3Client.Event) int {
@@ -458,7 +458,7 @@
c.writeLock.Lock()
defer c.writeLock.Unlock()
if err := c.ectdAPI.Close(); err != nil {
- log.Errorw("error-closing-client", log.Fields{"error": err})
+ logger.Errorw("error-closing-client", log.Fields{"error": err})
}
}
diff --git a/pkg/flows/common.go b/pkg/flows/common.go
new file mode 100644
index 0000000..de85acf
--- /dev/null
+++ b/pkg/flows/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
+)
+
+const (
+ logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "flowsUtils"})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/flows/flow_utils.go b/pkg/flows/flow_utils.go
index 02a4b0b..d3cf74d 100644
--- a/pkg/flows/flow_utils.go
+++ b/pkg/flows/flow_utils.go
@@ -510,7 +510,7 @@
return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
}
}
- log.Debug("No-metadata-present")
+ logger.Debug("No-metadata-present")
return 0
}
@@ -523,7 +523,7 @@
return field.GetTableMetadata()
}
}
- log.Debug("No-metadata-present")
+ logger.Debug("No-metadata-present")
return 0
}
@@ -538,7 +538,7 @@
}
}
}
- log.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
+ logger.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
return 0
}
@@ -552,10 +552,10 @@
This is set in the ONOS OltPipeline as a write metadata instruction
*/
var tpId uint16 = 0
- log.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+ logger.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
if metadata != 0 {
tpId = uint16((metadata >> 32) & 0xFFFF)
- log.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+ logger.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
}
return tpId
}
@@ -570,10 +570,10 @@
*/
var uniPort uint32 = 0
md := GetMetadataFromWriteMetadataAction(flow)
- log.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
+ logger.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
if md != 0 {
uniPort = uint32(md & 0xFFFFFFFF)
- log.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+ logger.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
}
return uniPort
@@ -591,7 +591,7 @@
md := GetMetadataFromWriteMetadataAction(flow)
if md != 0 {
innerTag = uint16((md >> 48) & 0xFFFF)
- log.Debugw("Found CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+ logger.Debugw("Found CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
}
return innerTag
}
@@ -605,7 +605,7 @@
return 0
}
if md <= 0xffffffff {
- log.Debugw("onos-upgrade-suggested", log.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+ logger.Debugw("onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
return md
}
return (md >> 32) & 0xffffffff
@@ -730,7 +730,7 @@
meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
if meterMod == nil {
- log.Error("Invalid meter mod command")
+ logger.Error("Invalid meter mod command")
return meter
}
// config init
@@ -752,7 +752,7 @@
bandStats = append(bandStats, band)
}
meter.Stats.BandStats = bandStats
- log.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
+ logger.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
return meter
}
diff --git a/pkg/flows/flow_utils_test.go b/pkg/flows/flow_utils_test.go
index 8922a9c..a9dc9cc 100644
--- a/pkg/flows/flow_utils_test.go
+++ b/pkg/flows/flow_utils_test.go
@@ -17,7 +17,6 @@
import (
"bytes"
- "github.com/opencord/voltha-lib-go/v2/pkg/log"
ofp "github.com/opencord/voltha-protos/v2/go/openflow_13"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
@@ -32,7 +31,6 @@
)
func init() {
- log.AddPackage(log.JSON, log.WarnLevel, nil)
timeoutError = status.Errorf(codes.Aborted, "timeout")
taskFailureError = status.Error(codes.Internal, "test failure task")
timeoutError = status.Errorf(codes.Aborted, "timeout")
diff --git a/pkg/grpc/common_test.go b/pkg/grpc/common_test.go
index 6ed7b26..ff4980a 100644
--- a/pkg/grpc/common_test.go
+++ b/pkg/grpc/common_test.go
@@ -21,7 +21,7 @@
const (
/*
- * This sets the LogLevel of the Voltha logger. It's pinned to FatalLevel here, as we
+ * This sets the GetLogLevel of the Voltha logger. It's pinned to FatalLevel here, as we
* generally don't want to see logger output, even when running go test in verbose
* mode. Even "Error" level messages are expected to be output by some unit tests.
*
diff --git a/pkg/kafka/common.go b/pkg/kafka/common.go
new file mode 100644
index 0000000..84a4e07
--- /dev/null
+++ b/pkg/kafka/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+ "github.com/opencord/voltha-lib-go/v2/pkg/log"
+)
+
+const (
+ logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kafka"})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/kafka/kafka_inter_container_library.go b/pkg/kafka/kafka_inter_container_library.go
index 4e04b30..652bdfa 100644
--- a/pkg/kafka/kafka_inter_container_library.go
+++ b/pkg/kafka/kafka_inter_container_library.go
@@ -31,11 +31,6 @@
"time"
)
-// Initialize the logger - gets the default until the main function setup the logger
-func init() {
- log.AddPackage(log.JSON, log.DebugLevel, nil)
-}
-
const (
DefaultMaxRetries = 3
DefaultRequestTimeout = 10000 // 10000 milliseconds - to handle a wider latency range
@@ -148,11 +143,11 @@
}
func (kp *InterContainerProxy) Start() error {
- log.Info("Starting-Proxy")
+ logger.Info("Starting-Proxy")
// Kafka MsgClient should already have been created. If not, output fatal error
if kp.kafkaClient == nil {
- log.Fatal("kafka-client-not-set")
+ logger.Fatal("kafka-client-not-set")
}
// Create the Done channel
@@ -160,7 +155,7 @@
// Start the kafka client
if err := kp.kafkaClient.Start(); err != nil {
- log.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
+ logger.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
return err
}
@@ -177,7 +172,7 @@
}
func (kp *InterContainerProxy) Stop() {
- log.Info("stopping-intercontainer-proxy")
+ logger.Info("stopping-intercontainer-proxy")
kp.doneCh <- 1
// TODO : Perform cleanup
kp.kafkaClient.Stop()
@@ -188,10 +183,10 @@
// DeviceDiscovered publish the discovered device onto the kafka messaging bus
func (kp *InterContainerProxy) DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error {
- log.Debugw("sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
+ logger.Debugw("sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
// Simple validation
if deviceId == "" || deviceType == "" {
- log.Errorw("invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
+ logger.Errorw("invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
return errors.New("invalid-parameters")
}
// Create the device discovery message
@@ -212,7 +207,7 @@
var marshalledData *any.Any
var err error
if marshalledData, err = ptypes.MarshalAny(body); err != nil {
- log.Errorw("cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
return err
}
msg := &ic.InterContainerMessage{
@@ -222,7 +217,7 @@
// Send the message
if err := kp.kafkaClient.Send(msg, kp.deviceDiscoveryTopic); err != nil {
- log.Errorw("cannot-send-device-discovery-message", log.Fields{"error": err})
+ logger.Errorw("cannot-send-device-discovery-message", log.Fields{"error": err})
return err
}
return nil
@@ -242,7 +237,7 @@
// Encode the request
protoRequest, err := encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
if err != nil {
- log.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+ logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
return false, nil
}
@@ -251,7 +246,7 @@
if waitForResponse {
var err error
if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
- log.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+ logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
}
}
@@ -259,7 +254,7 @@
// specific key, hence ensuring a single partition is used to publish the request. This ensures that the
// subscriber on that topic will receive the request in the order it was sent. The key used is the deviceId.
//key := GetDeviceIdFromTopic(*toTopic)
- log.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+ logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
go kp.kafkaClient.Send(protoRequest, toTopic, key)
if waitForResponse {
@@ -279,7 +274,7 @@
select {
case msg, ok := <-ch:
if !ok {
- log.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+ logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
protoError := &ic.Error{Reason: "channel-closed"}
var marshalledArg *any.Any
if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
@@ -287,15 +282,15 @@
}
return false, marshalledArg
}
- log.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+ logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
var responseBody *ic.InterContainerResponseBody
var err error
if responseBody, err = decodeResponse(msg); err != nil {
- log.Errorw("decode-response-error", log.Fields{"error": err})
+ logger.Errorw("decode-response-error", log.Fields{"error": err})
}
return responseBody.Success, responseBody.Result
case <-ctx.Done():
- log.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+ logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
// pack the error as proto any type
protoError := &ic.Error{Reason: ctx.Err().Error()}
var marshalledArg *any.Any
@@ -304,7 +299,7 @@
}
return false, marshalledArg
case <-childCtx.Done():
- log.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+ logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
// pack the error as proto any type
protoError := &ic.Error{Reason: childCtx.Err().Error()}
var marshalledArg *any.Any
@@ -313,7 +308,7 @@
}
return false, marshalledArg
case <-kp.doneCh:
- log.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+ logger.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
return true, nil
}
}
@@ -329,7 +324,7 @@
var err error
if ch, err = kp.kafkaClient.Subscribe(&topic); err != nil {
//if ch, err = kp.Subscribe(topic); err != nil {
- log.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+ logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
return err
}
@@ -348,7 +343,7 @@
var ch <-chan *ic.InterContainerMessage
var err error
if ch, err = kp.kafkaClient.Subscribe(&topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
- log.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+ logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
return err
}
kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
@@ -387,7 +382,7 @@
// Unsubscribe to this topic first - this will close the subscribed channel
var err error
if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
- log.Errorw("unsubscribing-error", log.Fields{"topic": topic})
+ logger.Errorw("unsubscribing-error", log.Fields{"topic": topic})
}
delete(kp.topicToResponseChannelMap, topic)
return err
@@ -403,7 +398,7 @@
for topic, _ := range kp.topicToResponseChannelMap {
// Unsubscribe to this topic first - this will close the subscribed channel
if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
- log.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+ logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
}
delete(kp.topicToResponseChannelMap, topic)
}
@@ -438,7 +433,7 @@
for topic, _ := range kp.topicToRequestHandlerChannelMap {
// Close the kafka client client first by unsubscribing to this topic
if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
- log.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+ logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
}
delete(kp.topicToRequestHandlerChannelMap, topic)
}
@@ -486,10 +481,10 @@
func (kp *InterContainerProxy) DeleteTopic(topic Topic) error {
// If we have any consumers on that topic we need to close them
if err := kp.deleteFromTopicResponseChannelMap(topic.Name); err != nil {
- log.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+ logger.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
}
if err := kp.deleteFromTopicRequestHandlerChannelMap(topic.Name); err != nil {
- log.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+ logger.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
}
kp.deleteTopicTransactionIdToChannelMap(topic.Name)
@@ -503,7 +498,7 @@
}
protoValue, ok := returnedVal.(proto.Message)
if !ok {
- log.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+ logger.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
err := errors.New("response-value-not-proto-message")
return nil, err
}
@@ -512,7 +507,7 @@
var marshalledReturnedVal *any.Any
var err error
if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
- log.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
return nil, err
}
return marshalledReturnedVal, nil
@@ -534,7 +529,7 @@
var err error
// Error should never happen here
if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
- log.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
}
return &ic.InterContainerMessage{
@@ -547,7 +542,7 @@
//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
//or an error on failure
func encodeResponse(request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
- //log.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+ //logger.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
responseHeader := &ic.Header{
Id: request.Header.Id,
Type: ic.MessageType_RESPONSE,
@@ -562,7 +557,7 @@
var err error
for _, returnVal := range returnedValues {
if marshalledReturnedVal, err = encodeReturnedValue(returnVal); err != nil {
- log.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
}
break // for now we support only 1 returned value - (excluding the error)
}
@@ -575,7 +570,7 @@
// Marshal the response body
var marshalledResponseBody *any.Any
if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
- log.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
return nil, err
}
@@ -611,7 +606,7 @@
var marshalledArg *any.Any
var err error
if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
- log.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+ logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
return currentArgs
}
protoArg := &ic.Argument{
@@ -625,7 +620,7 @@
var marshalledArg *any.Any
var err error
if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
- log.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+ logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
return currentArgs
}
protoArg := &ic.Argument{
@@ -645,9 +640,9 @@
// Get the request body
requestBody := &ic.InterContainerRequestBody{}
if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
- log.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
} else {
- log.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+ logger.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
// let the callee unpack the arguments as its the only one that knows the real proto type
// Augment the requestBody with the message Id as it will be used in scenarios where cores
// are set in pairs and competing
@@ -659,7 +654,7 @@
out, err = CallFuncByName(targetInterface, requestBody.Rpc, requestBody.Args)
if err != nil {
- log.Warn(err)
+ logger.Warn(err)
}
}
// Response required?
@@ -679,7 +674,7 @@
if out[lastIndex].Interface() != nil { // Error
if retError, ok := out[lastIndex].Interface().(error); ok {
if retError.Error() == ErrorTransactionNotAcquired.Error() {
- log.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+ logger.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
return // Ignore - process is in competing mode and ignored transaction
}
returnError = &ic.Error{Reason: retError.Error()}
@@ -689,12 +684,12 @@
returnedValues = append(returnedValues, returnError)
}
} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
- log.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+ logger.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
return // Ignore - should not happen
} else { // Non-error case
success = true
for idx, val := range out {
- //log.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+ //logger.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
if idx != lastIndex {
returnedValues = append(returnedValues, val.Interface())
}
@@ -704,7 +699,7 @@
var icm *ic.InterContainerMessage
if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
- log.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
+ logger.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
icm = encodeDefaultFailedResponse(msg)
}
// To preserve ordering of messages, all messages to a given topic are sent to the same partition
@@ -713,22 +708,22 @@
// partitions.
replyTopic := &Topic{Name: msg.Header.FromTopic}
key := msg.Header.KeyTopic
- log.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+ logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
// TODO: handle error response.
go kp.kafkaClient.Send(icm, replyTopic, key)
}
} else if msg.Header.Type == ic.MessageType_RESPONSE {
- log.Debugw("response-received", log.Fields{"msg-header": msg.Header})
+ logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
go kp.dispatchResponse(msg)
} else {
- log.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
+ logger.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
}
}
func (kp *InterContainerProxy) waitForMessages(ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
// Wait for messages
for msg := range ch {
- //log.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+ //logger.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
go kp.handleMessage(msg, targetInterface)
}
}
@@ -737,7 +732,7 @@
kp.lockTransactionIdToChannelMap.RLock()
defer kp.lockTransactionIdToChannelMap.RUnlock()
if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
- log.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+ logger.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
return
}
kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
@@ -748,7 +743,7 @@
// API. There is one response channel waiting for kafka messages before dispatching the message to the
// corresponding waiting channel
func (kp *InterContainerProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
- log.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+ logger.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
// Create a specific channel for this consumers. We cannot use the channel from the kafkaclient as it will
// broadcast any message for this topic to all channels waiting on it.
@@ -759,7 +754,7 @@
}
func (kp *InterContainerProxy) unSubscribeForResponse(trnsId string) error {
- log.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+ logger.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
kp.deleteFromTransactionIdToChannelMap(trnsId)
return nil
}
@@ -803,12 +798,12 @@
// ascertain the value interface type is a proto.Message
protoValue, ok := arg.Value.(proto.Message)
if !ok {
- log.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+ logger.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
err := errors.New("argument-value-not-proto-message")
return nil, err
}
if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
- log.Warnw("cannot-marshal-request", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
return nil, err
}
protoArg := &ic.Argument{
@@ -821,7 +816,7 @@
var marshalledData *any.Any
var err error
if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
- log.Warnw("cannot-marshal-request", log.Fields{"error": err})
+ logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
return nil, err
}
request := &ic.InterContainerMessage{
@@ -835,10 +830,10 @@
// Extract the message body
responseBody := ic.InterContainerResponseBody{}
if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
- log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+ logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
return nil, err
}
- //log.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+ //logger.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
return &responseBody, nil
diff --git a/pkg/kafka/sarama_client.go b/pkg/kafka/sarama_client.go
index c05df69..6bc2a49 100755
--- a/pkg/kafka/sarama_client.go
+++ b/pkg/kafka/sarama_client.go
@@ -31,10 +31,6 @@
"time"
)
-func init() {
- log.AddPackage(log.JSON, log.DebugLevel, nil)
-}
-
type returnErrorFunction func() error
// consumerChannels represents one or more consumers listening on a kafka topic. Once a message is received on that
@@ -241,7 +237,7 @@
}
func (sc *SaramaClient) Start() error {
- log.Info("Starting-kafka-sarama-client")
+ logger.Info("Starting-kafka-sarama-client")
// Create the Done channel
sc.doneCh = make(chan int, 1)
@@ -257,20 +253,20 @@
// Create the Cluster Admin
if err = sc.createClusterAdmin(); err != nil {
- log.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
+ logger.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
return err
}
// Create the Publisher
if err := sc.createPublisher(); err != nil {
- log.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
+ logger.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
return err
}
if sc.consumerType == DefaultConsumerType {
// Create the master consumers
if err := sc.createConsumer(); err != nil {
- log.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
+ logger.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
return err
}
}
@@ -278,7 +274,7 @@
// Create the topic to consumers/channel map
sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
- log.Info("kafka-sarama-client-started")
+ logger.Info("kafka-sarama-client-started")
sc.started = true
@@ -286,7 +282,7 @@
}
func (sc *SaramaClient) Stop() {
- log.Info("stopping-sarama-client")
+ logger.Info("stopping-sarama-client")
sc.started = false
@@ -295,33 +291,33 @@
if sc.producer != nil {
if err := sc.producer.Close(); err != nil {
- log.Errorw("closing-producer-failed", log.Fields{"error": err})
+ logger.Errorw("closing-producer-failed", log.Fields{"error": err})
}
}
if sc.consumer != nil {
if err := sc.consumer.Close(); err != nil {
- log.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
+ logger.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
}
}
for key, val := range sc.groupConsumers {
- log.Debugw("closing-group-consumer", log.Fields{"topic": key})
+ logger.Debugw("closing-group-consumer", log.Fields{"topic": key})
if err := val.Close(); err != nil {
- log.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+ logger.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
}
}
if sc.cAdmin != nil {
if err := sc.cAdmin.Close(); err != nil {
- log.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
+ logger.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
}
}
//TODO: Clear the consumers map
//sc.clearConsumerChannelMap()
- log.Info("sarama-client-stopped")
+ logger.Info("sarama-client-stopped")
}
//createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
@@ -338,15 +334,15 @@
if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
if err == sarama.ErrTopicAlreadyExists {
// Not an error
- log.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
+ logger.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
return nil
}
- log.Errorw("create-topic-failure", log.Fields{"error": err})
+ logger.Errorw("create-topic-failure", log.Fields{"error": err})
return err
}
// TODO: Wait until the topic has been created. No API is available in the Sarama clusterAdmin to
// do so.
- log.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+ logger.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
return nil
}
@@ -368,16 +364,16 @@
if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
if err == sarama.ErrUnknownTopicOrPartition {
// Not an error as does not exist
- log.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
+ logger.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
return nil
}
- log.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
+ logger.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
return err
}
// Clear the topic from the consumer channel. This will also close any consumers listening on that topic.
if err := sc.clearTopicFromConsumerChannelMap(*topic); err != nil {
- log.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+ logger.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
return err
}
return nil
@@ -389,11 +385,11 @@
sc.lockTopic(topic)
defer sc.unLockTopic(topic)
- log.Debugw("subscribe", log.Fields{"topic": topic.Name})
+ logger.Debugw("subscribe", log.Fields{"topic": topic.Name})
// If a consumers already exist for that topic then resuse it
if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
- log.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
+ logger.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
// Create a channel specific for that consumers and add it to the consumers channel map
ch := make(chan *ic.InterContainerMessage)
sc.addChannelToConsumerChannelMap(topic, ch)
@@ -408,12 +404,12 @@
if sc.consumerType == PartitionConsumer {
if sc.autoCreateTopic {
if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
- log.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+ logger.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
}
if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(topic, getOffset(kvArgs...)); err != nil {
- log.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+ logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
} else if sc.consumerType == GroupCustomer {
@@ -421,7 +417,7 @@
// does not consume from a precreated topic in some scenarios
//if sc.autoCreateTopic {
// if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
- // log.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+ // logger.Errorw("create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
// return nil, err
// }
//}
@@ -435,12 +431,12 @@
groupId = sc.consumerGroupPrefix + topic.Name
}
if consumerListeningChannel, err = sc.setupGroupConsumerChannel(topic, groupId, getOffset(kvArgs...)); err != nil {
- log.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+ logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
return nil, err
}
} else {
- log.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+ logger.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
return nil, errors.New("unknown-consumer-type")
}
@@ -452,13 +448,13 @@
sc.lockTopic(topic)
defer sc.unLockTopic(topic)
- log.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+ logger.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
var err error
if err = sc.removeChannelFromConsumerChannelMap(*topic, ch); err != nil {
- log.Errorw("failed-removing-channel", log.Fields{"error": err})
+ logger.Errorw("failed-removing-channel", log.Fields{"error": err})
}
if err = sc.deleteFromGroupConsumers(topic.Name); err != nil {
- log.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
+ logger.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
}
return err
}
@@ -470,11 +466,11 @@
// events to the channel is rate-limited by livenessChannelInterval.
if sc.liveness != nil {
if sc.alive != alive {
- log.Info("update-liveness-channel-because-change")
+ logger.Info("update-liveness-channel-because-change")
sc.liveness <- alive
sc.lastLivenessTime = time.Now()
} else if time.Now().Sub(sc.lastLivenessTime) > sc.livenessChannelInterval {
- log.Info("update-liveness-channel-because-interval")
+ logger.Info("update-liveness-channel-because-interval")
sc.liveness <- alive
sc.lastLivenessTime = time.Now()
}
@@ -482,7 +478,7 @@
// Only emit a log message when the state changes
if sc.alive != alive {
- log.Info("set-client-alive", log.Fields{"alive": alive})
+ logger.Info("set-client-alive", log.Fields{"alive": alive})
sc.alive = alive
}
}
@@ -491,7 +487,7 @@
func (sc *SaramaClient) setUnhealthy() {
sc.healthy = false
if sc.healthiness != nil {
- log.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+ logger.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
sc.healthiness <- sc.healthy
}
}
@@ -511,35 +507,35 @@
switch err.Error() {
case context.DeadlineExceeded.Error():
- log.Info("is-liveness-error-timeout")
+ logger.Info("is-liveness-error-timeout")
return true
case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
- log.Info("is-liveness-error-no-brokers")
+ logger.Info("is-liveness-error-no-brokers")
return true
case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
- log.Info("is-liveness-error-shutting-down")
+ logger.Info("is-liveness-error-shutting-down")
return true
case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
- log.Info("is-liveness-error-not-available")
+ logger.Info("is-liveness-error-not-available")
return true
case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
- log.Info("is-liveness-error-circuit-breaker-open")
+ logger.Info("is-liveness-error-circuit-breaker-open")
return true
}
if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
- log.Info("is-liveness-error-connection-refused")
+ logger.Info("is-liveness-error-connection-refused")
return true
}
if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
- log.Info("is-liveness-error-io-timeout")
+ logger.Info("is-liveness-error-io-timeout")
return true
}
// Other errors shouldn't trigger a loss of liveness
- log.Infow("is-liveness-error-ignored", log.Fields{"err": err})
+ logger.Infow("is-liveness-error-ignored", log.Fields{"err": err})
return false
}
@@ -552,7 +548,7 @@
var ok bool
// ascertain the value interface type is a proto.Message
if protoMsg, ok = msg.(proto.Message); !ok {
- log.Warnw("message-not-proto-message", log.Fields{"msg": msg})
+ logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
return errors.New(fmt.Sprintf("not-a-proto-msg-%s", msg))
}
@@ -560,7 +556,7 @@
var err error
// Create the Sarama producer message
if marshalled, err = proto.Marshal(protoMsg); err != nil {
- log.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+ logger.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
return err
}
key := ""
@@ -579,10 +575,10 @@
// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
select {
case ok := <-sc.producer.Successes():
- log.Debugw("message-sent", log.Fields{"status": ok.Topic})
+ logger.Debugw("message-sent", log.Fields{"status": ok.Topic})
sc.updateLiveness(true)
case notOk := <-sc.producer.Errors():
- log.Debugw("error-sending", log.Fields{"status": notOk})
+ logger.Debugw("error-sending", log.Fields{"status": notOk})
if sc.isLivenessError(notOk) {
sc.updateLiveness(false)
}
@@ -597,10 +593,10 @@
// by the service (i.e. rw_core / ro_core) to update readiness status
// and/or take other actions.
func (sc *SaramaClient) EnableLivenessChannel(enable bool) chan bool {
- log.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
+ logger.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
if enable {
if sc.liveness == nil {
- log.Info("kafka-create-liveness-channel")
+ logger.Info("kafka-create-liveness-channel")
// At least 1, so we can immediately post to it without blocking
// Setting a bigger number (10) allows the monitor to fall behind
// without blocking others. The monitor shouldn't really fall
@@ -621,10 +617,10 @@
// if the kafka consumers die, or some other problem occurs which is
// catastrophic that would require re-creating the client.
func (sc *SaramaClient) EnableHealthinessChannel(enable bool) chan bool {
- log.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+ logger.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
if enable {
if sc.healthiness == nil {
- log.Info("kafka-create-healthiness-channel")
+ logger.Info("kafka-create-healthiness-channel")
// At least 1, so we can immediately post to it without blocking
// Setting a bigger number (10) allows the monitor to fall behind
// without blocking others. The monitor shouldn't really fall
@@ -659,10 +655,10 @@
// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
select {
case ok := <-sc.producer.Successes():
- log.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
+ logger.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
sc.updateLiveness(true)
case notOk := <-sc.producer.Errors():
- log.Debugw("liveness-error-sending", log.Fields{"status": notOk})
+ logger.Debugw("liveness-error-sending", log.Fields{"status": notOk})
if sc.isLivenessError(notOk) {
sc.updateLiveness(false)
}
@@ -700,7 +696,7 @@
var cAdmin sarama.ClusterAdmin
var err error
if cAdmin, err = sarama.NewClusterAdmin([]string{kafkaFullAddr}, config); err != nil {
- log.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": kafkaFullAddr})
+ logger.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": kafkaFullAddr})
return err
}
sc.cAdmin = cAdmin
@@ -760,7 +756,7 @@
consumerCh.channels = append(consumerCh.channels, ch)
return
}
- log.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+ logger.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
}
//closeConsumers closes a list of sarama consumers. The consumers can either be a partition consumers or a group consumers
@@ -770,7 +766,7 @@
// Is it a partition consumers?
if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
if errTemp := partionConsumer.Close(); errTemp != nil {
- log.Debugw("partition!!!", log.Fields{"err": errTemp})
+ logger.Debugw("partition!!!", log.Fields{"err": errTemp})
if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
// This can occur on race condition
err = nil
@@ -800,7 +796,7 @@
consumerCh.channels = removeChannel(consumerCh.channels, ch)
// If there are no more channels then we can close the consumers itself
if len(consumerCh.channels) == 0 {
- log.Debugw("closing-consumers", log.Fields{"topic": topic})
+ logger.Debugw("closing-consumers", log.Fields{"topic": topic})
err := closeConsumers(consumerCh.consumers)
//err := consumerCh.consumers.Close()
delete(sc.topicToConsumerChannelMap, topic.Name)
@@ -808,7 +804,7 @@
}
return nil
}
- log.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+ logger.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
return errors.New("topic-does-not-exist")
}
@@ -829,7 +825,7 @@
delete(sc.topicToConsumerChannelMap, topic.Name)
return err
}
- log.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+ logger.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
return nil
}
@@ -868,12 +864,12 @@
brokers := []string{kafkaFullAddr}
if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
- log.Errorw("error-starting-publisher", log.Fields{"error": err})
+ logger.Errorw("error-starting-publisher", log.Fields{"error": err})
return err
} else {
sc.producer = producer
}
- log.Info("Kafka-publisher-created")
+ logger.Info("Kafka-publisher-created")
return nil
}
@@ -889,12 +885,12 @@
brokers := []string{kafkaFullAddr}
if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
- log.Errorw("error-starting-consumers", log.Fields{"error": err})
+ logger.Errorw("error-starting-consumers", log.Fields{"error": err})
return err
} else {
sc.consumer = consumer
}
- log.Info("Kafka-consumers-created")
+ logger.Info("Kafka-consumers-created")
return nil
}
@@ -918,10 +914,10 @@
var err error
if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
- log.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+ logger.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
return nil, err
}
- log.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+ logger.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
//sc.groupConsumers[topic.Name] = consumer
sc.addToGroupConsumers(topic.Name, consumer)
@@ -942,7 +938,7 @@
}
func (sc *SaramaClient) consumeFromAPartition(topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
- log.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+ logger.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
startloop:
for {
select {
@@ -950,38 +946,38 @@
if ok {
if sc.isLivenessError(err) {
sc.updateLiveness(false)
- log.Warnw("partition-consumers-error", log.Fields{"error": err})
+ logger.Warnw("partition-consumers-error", log.Fields{"error": err})
}
} else {
// Channel is closed
break startloop
}
case msg, ok := <-consumer.Messages():
- //log.Debugw("message-received", log.Fields{"msg": msg, "receivedTopic": msg.Topic})
+ //logger.Debugw("message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
if !ok {
// channel is closed
break startloop
}
msgBody := msg.Value
sc.updateLiveness(true)
- log.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+ logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
icm := &ic.InterContainerMessage{}
if err := proto.Unmarshal(msgBody, icm); err != nil {
- log.Warnw("partition-invalid-message", log.Fields{"error": err})
+ logger.Warnw("partition-invalid-message", log.Fields{"error": err})
continue
}
go sc.dispatchToConsumers(consumerChnls, icm)
case <-sc.doneCh:
- log.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
+ logger.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
break startloop
}
}
- log.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
+ logger.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
sc.setUnhealthy()
}
func (sc *SaramaClient) consumeGroupMessages(topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
- log.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+ logger.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
startloop:
for {
@@ -991,44 +987,44 @@
if sc.isLivenessError(err) {
sc.updateLiveness(false)
}
- log.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+ logger.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
} else {
- log.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
+ logger.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
// channel is closed
break startloop
}
case msg, ok := <-consumer.Messages():
if !ok {
- log.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+ logger.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
// Channel closed
break startloop
}
sc.updateLiveness(true)
- log.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+ logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
msgBody := msg.Value
icm := &ic.InterContainerMessage{}
if err := proto.Unmarshal(msgBody, icm); err != nil {
- log.Warnw("invalid-message", log.Fields{"error": err})
+ logger.Warnw("invalid-message", log.Fields{"error": err})
continue
}
go sc.dispatchToConsumers(consumerChnls, icm)
consumer.MarkOffset(msg, "")
case ntf := <-consumer.Notifications():
- log.Debugw("group-received-notification", log.Fields{"notification": ntf})
+ logger.Debugw("group-received-notification", log.Fields{"notification": ntf})
case <-sc.doneCh:
- log.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
+ logger.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
break startloop
}
}
- log.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
+ logger.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
sc.setUnhealthy()
}
func (sc *SaramaClient) startConsumers(topic *Topic) error {
- log.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
+ logger.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
var consumerCh *consumerChannels
if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
- log.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
+ logger.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
return errors.New("consumers-not-exist")
}
// For each consumer listening for that topic, start a consumption loop
@@ -1038,7 +1034,7 @@
} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
go sc.consumeGroupMessages(topic, gConsumer, consumerCh)
} else {
- log.Errorw("invalid-consumer", log.Fields{"topic": topic})
+ logger.Errorw("invalid-consumer", log.Fields{"topic": topic})
return errors.New("invalid-consumer")
}
}
@@ -1052,7 +1048,7 @@
var err error
if pConsumers, err = sc.createPartitionConsumers(topic, initialOffset); err != nil {
- log.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+ logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
@@ -1085,7 +1081,7 @@
var pConsumer *scc.Consumer
var err error
if pConsumer, err = sc.createGroupConsumer(topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
- log.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+ logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
@@ -1106,10 +1102,10 @@
}
func (sc *SaramaClient) createPartitionConsumers(topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
- log.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
+ logger.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
partitionList, err := sc.consumer.Partitions(topic.Name)
if err != nil {
- log.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+ logger.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
@@ -1117,7 +1113,7 @@
for _, partition := range partitionList {
var pConsumer sarama.PartitionConsumer
if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
- log.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+ logger.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
pConsumers = append(pConsumers, pConsumer)
@@ -1132,7 +1128,7 @@
if channel == ch {
channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
close(channel)
- log.Debug("channel-closed")
+ logger.Debug("channel-closed")
return channels[:len(channels)-1]
}
}
@@ -1154,7 +1150,7 @@
consumer := sc.groupConsumers[topic]
delete(sc.groupConsumers, topic)
if err := consumer.Close(); err != nil {
- log.Errorw("failure-closing-consumer", log.Fields{"error": err})
+ logger.Errorw("failure-closing-consumer", log.Fields{"error": err})
return err
}
}
diff --git a/pkg/log/log.go b/pkg/log/log.go
index 026c6c0..43567e3 100644
--- a/pkg/log/log.go
+++ b/pkg/log/log.go
@@ -107,6 +107,9 @@
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
+
+ //Returns the log level of this specific logger
+ GetLogLevel() int
}
// Fields is used as key-value pairs for structured logging
@@ -119,8 +122,9 @@
var cfgs map[string]zp.Config
type logger struct {
- log *zp.SugaredLogger
- parent *zp.Logger
+ log *zp.SugaredLogger
+ parent *zp.Logger
+ packageName string
}
func intToAtomicLevel(l int) zp.AtomicLevel {
@@ -266,8 +270,9 @@
}
loggers[pkgName] = &logger{
- log: l.Sugar(),
- parent: l,
+ log: l.Sugar(),
+ parent: l,
+ packageName: pkgName,
}
return loggers[pkgName], nil
}
@@ -287,8 +292,9 @@
}
loggers[pkgName] = &logger{
- log: l.Sugar(),
- parent: l,
+ log: l.Sugar(),
+ parent: l,
+ packageName: pkgName,
}
}
return nil
@@ -334,8 +340,9 @@
// Set the logger
loggers[pkgName] = &logger{
- log: l.Sugar(),
- parent: l,
+ log: l.Sugar(),
+ parent: l,
+ packageName: pkgName,
}
return loggers[pkgName], nil
}
@@ -638,6 +645,11 @@
return l.parent.Core().Enabled(intToLevel(level))
}
+// GetLogLevel returns the current level of the logger
+func (l logger) GetLogLevel() int {
+ return levelToInt(cfgs[l.packageName].Level.Level())
+}
+
// With returns a logger initialized with the key-value pairs
func With(keysAndValues Fields) Logger {
return logger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
@@ -767,3 +779,8 @@
func V(level int) bool {
return getPackageLevelLogger().V(level)
}
+
+//GetLogLevel returns the log level of the invoking package
+func GetLogLevel() int {
+ return getPackageLevelLogger().GetLogLevel()
+}
diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go
index 5ca0955..f32c55e 100644
--- a/pkg/log/log_test.go
+++ b/pkg/log/log_test.go
@@ -91,6 +91,19 @@
l, err := GetPackageLogLevel(name)
assert.Nil(t, err)
assert.Equal(t, l, expectedLevel)
+ // Get the package log level by invoking the specific logger created for this package
+ // This is a less expensive operation that the GetPackageLogLevel() request
+ level := myLoggers[name].GetLogLevel()
+ assert.Equal(t, level, expectedLevel)
+ // Check the verbosity level
+ for _, level := range levels {
+ toDisplay := myLoggers[name].V(level)
+ if level < expectedLevel {
+ assert.False(t, toDisplay)
+ } else {
+ assert.True(t, toDisplay)
+ }
+ }
}
}
//Test set all package level
@@ -100,6 +113,17 @@
l, err := GetPackageLogLevel(name)
assert.Nil(t, err)
assert.Equal(t, l, expectedLevel)
+ level := myLoggers[name].GetLogLevel()
+ assert.Equal(t, level, expectedLevel)
+ // Check the verbosity level
+ for _, level := range levels {
+ toDisplay := myLoggers[name].V(level)
+ if level < expectedLevel {
+ assert.False(t, toDisplay)
+ } else {
+ assert.True(t, toDisplay)
+ }
+ }
}
}
}
diff --git a/pkg/mocks/common.go b/pkg/mocks/common.go
index c20dab1..d6d4062 100644
--- a/pkg/mocks/common.go
+++ b/pkg/mocks/common.go
@@ -20,14 +20,15 @@
)
const (
- logLevel = log.FatalLevel
+ logLevel = log.ErrorLevel
)
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
func init() {
// Setup this package so that it's log level can be modified at run time
- _, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "mocks"})
+ var err error
+ logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "mocks"})
if err != nil {
panic(err)
}
diff --git a/pkg/mocks/etcd_server.go b/pkg/mocks/etcd_server.go
index 3246ca0..487b991 100644
--- a/pkg/mocks/etcd_server.go
+++ b/pkg/mocks/etcd_server.go
@@ -18,7 +18,6 @@
import (
"fmt"
"go.etcd.io/etcd/embed"
- "log"
"net/url"
"os"
"time"
@@ -58,19 +57,19 @@
cfg.Dir = localPersistentStorageDir
cfg.Logger = "zap"
if !islogLevelValid(logLevel) {
- log.Fatalf("Invalid log level -%s", logLevel)
+ logger.Fatalf("Invalid log level -%s", logLevel)
}
cfg.LogLevel = logLevel
acurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", clientPort))
if err != nil {
- log.Fatalf("Invalid client port -%d", clientPort)
+ logger.Fatalf("Invalid client port -%d", clientPort)
}
cfg.ACUrls = []url.URL{*acurl}
cfg.LCUrls = []url.URL{*acurl}
apurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", peerPort))
if err != nil {
- log.Fatalf("Invalid peer port -%d", peerPort)
+ logger.Fatalf("Invalid peer port -%d", peerPort)
}
cfg.LPUrls = []url.URL{*apurl}
cfg.APUrls = []url.URL{*apurl}
@@ -100,23 +99,23 @@
// Remove the local directory as
// a safeguard for the case where a prior test failed
if err := os.RemoveAll(cfg.Dir); err != nil {
- log.Fatalf("Failure removing local directory %s", cfg.Dir)
+ logger.Fatalf("Failure removing local directory %s", cfg.Dir)
}
e, err := embed.StartEtcd(cfg)
if err != nil {
- log.Fatal(err)
+ logger.Fatal(err)
}
select {
case <-e.Server.ReadyNotify():
- log.Printf("Embedded Etcd server is ready!")
+ logger.Debug("Embedded Etcd server is ready!")
case <-time.After(serverStartUpTimeout):
e.Server.HardStop() // trigger a shutdown
e.Close()
- log.Fatal("Embedded Etcd server took too long to start!")
+ logger.Fatal("Embedded Etcd server took too long to start!")
case err := <-e.Err():
e.Server.HardStop() // trigger a shutdown
e.Close()
- log.Fatalf("Embedded Etcd server errored out - %s", err)
+ logger.Fatalf("Embedded Etcd server errored out - %s", err)
}
return &EtcdServer{server: e}
}
@@ -128,7 +127,7 @@
es.server.Server.HardStop()
es.server.Close()
if err := os.RemoveAll(storage); err != nil {
- log.Fatalf("Failure removing local directory %s", es.server.Config().Dir)
+ logger.Fatalf("Failure removing local directory %s", es.server.Config().Dir)
}
}
}
diff --git a/pkg/mocks/etcd_server_test.go b/pkg/mocks/etcd_server_test.go
index 0463daa..90a3654 100644
--- a/pkg/mocks/etcd_server_test.go
+++ b/pkg/mocks/etcd_server_test.go
@@ -21,7 +21,6 @@
"github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
"github.com/phayes/freeport"
"github.com/stretchr/testify/assert"
- "log"
"os"
"testing"
)
@@ -32,21 +31,21 @@
func setup() {
clientPort, err := freeport.GetFreePort()
if err != nil {
- log.Fatal(err)
+ logger.Fatal(err)
}
peerPort, err := freeport.GetFreePort()
if err != nil {
- log.Fatal(err)
+ logger.Fatal(err)
}
etcdServer = StartEtcdServer(MKConfig("voltha.mock.test", clientPort, peerPort, "voltha.lib.mocks.etcd", "error"))
if etcdServer == nil {
- log.Fatal("Embedded server failed to start")
+ logger.Fatal("Embedded server failed to start")
}
clientAddr := fmt.Sprintf("localhost:%d", clientPort)
client, err = kvstore.NewEtcdClient(clientAddr, 10)
if err != nil || client == nil {
etcdServer.Stop()
- log.Fatal("Failed to create an Etcd client")
+ logger.Fatal("Failed to create an Etcd client")
}
}
diff --git a/pkg/mocks/kafka_client.go b/pkg/mocks/kafka_client.go
index 87dd9e8..381c093 100644
--- a/pkg/mocks/kafka_client.go
+++ b/pkg/mocks/kafka_client.go
@@ -38,7 +38,7 @@
}
func (kc *KafkaClient) Start() error {
- log.Debug("kafka-client-started")
+ logger.Debug("kafka-client-started")
return nil
}
@@ -51,11 +51,11 @@
}
delete(kc.topicsChannelMap, topic)
}
- log.Debug("kafka-client-stopped")
+ logger.Debug("kafka-client-stopped")
}
func (kc *KafkaClient) CreateTopic(topic *kafka.Topic, numPartition int, repFactor int) error {
- log.Debugw("CreatingTopic", log.Fields{"topic": topic.Name, "numPartition": numPartition, "replicationFactor": repFactor})
+ logger.Debugw("CreatingTopic", log.Fields{"topic": topic.Name, "numPartition": numPartition, "replicationFactor": repFactor})
kc.lock.Lock()
defer kc.lock.Unlock()
if _, ok := kc.topicsChannelMap[topic.Name]; ok {
@@ -67,7 +67,7 @@
}
func (kc *KafkaClient) DeleteTopic(topic *kafka.Topic) error {
- log.Debugw("DeleteTopic", log.Fields{"topic": topic.Name})
+ logger.Debugw("DeleteTopic", log.Fields{"topic": topic.Name})
kc.lock.Lock()
defer kc.lock.Unlock()
delete(kc.topicsChannelMap, topic.Name)
@@ -75,7 +75,7 @@
}
func (kc *KafkaClient) Subscribe(topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ic.InterContainerMessage, error) {
- log.Debugw("Subscribe", log.Fields{"topic": topic.Name, "args": kvArgs})
+ logger.Debugw("Subscribe", log.Fields{"topic": topic.Name, "args": kvArgs})
kc.lock.Lock()
defer kc.lock.Unlock()
ch := make(chan *ic.InterContainerMessage)
@@ -89,7 +89,7 @@
}
func (kc *KafkaClient) UnSubscribe(topic *kafka.Topic, ch <-chan *ic.InterContainerMessage) error {
- log.Debugw("UnSubscribe", log.Fields{"topic": topic.Name})
+ logger.Debugw("UnSubscribe", log.Fields{"topic": topic.Name})
kc.lock.Lock()
defer kc.lock.Unlock()
if chnls, ok := kc.topicsChannelMap[topic.Name]; ok {
@@ -118,7 +118,7 @@
kc.lock.RLock()
defer kc.lock.RUnlock()
for _, ch := range kc.topicsChannelMap[topic.Name] {
- log.Debugw("Publishing", log.Fields{"fromTopic": req.Header.FromTopic, "toTopic": topic.Name, "id": req.Header.Id})
+ logger.Debugw("Publishing", log.Fields{"fromTopic": req.Header.FromTopic, "toTopic": topic.Name, "id": req.Header.Id})
ch <- req
}
return nil