[VOL-3199] Added support for dynamic enable/disable of Trace Publishing
Change-Id: I920b2964c89ad823985da29d7f8279689a62d3b6
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index 498eb45..550260f 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -34,7 +34,7 @@
)
var (
- BenchmarkProxyLogger log.Logger
+ BenchmarkProxyLogger log.CLogger
TestProxyRootLogicalDevice *Proxy
TestProxyRootDevice *Proxy
TestProxyRootAdapter *Proxy
@@ -50,16 +50,17 @@
)
func init() {
- BenchmarkProxyLogger, _ = log.AddPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
+ BenchmarkProxyLogger, _ = log.RegisterPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
+ ctx := context.Background()
//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
//Setup default logger - applies for packages that do not have specific logger set
if _, err := log.SetDefaultLogger(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"}); err != nil {
- log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+ BenchmarkProxyLogger.With(log.Fields{"error": err}).Fatal(ctx, "Cannot setup logging")
}
// Update all loggers (provisioned via init) with a common field
if err := log.UpdateAllLoggers(log.Fields{"instanceId": "PLT"}); err != nil {
- log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+ BenchmarkProxyLogger.With(log.Fields{"error": err}).Fatal(ctx, "Cannot setup logging")
}
log.SetPackageLogLevel("github.com/opencord/voltha-go/db/model", log.DebugLevel)
@@ -136,12 +137,13 @@
var mockBackend = &db.Backend{Client: &mockKV{data: make(map[string]interface{})}}
func TestProxy_1_1_1_Add_NewDevice(t *testing.T) {
+ ctx := context.Background()
devIDBin, _ := uuid.New().MarshalBinary()
TestProxyDeviceID = "0001" + hex.EncodeToString(devIDBin)[:12]
TestProxyDevice.Id = TestProxyDeviceID
if err := TestProxyRootDevice.Set(context.Background(), TestProxyDeviceID, TestProxyDevice); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to add test proxy device due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to add test proxy device due to error: %v", err)
t.Errorf("failed to add device: %s", err)
}
t.Logf("Added device : %+v", TestProxyDevice.Id)
@@ -149,7 +151,7 @@
// Verify that the added device can now be retrieved
d := &voltha.Device{}
if have, err := TestProxyRootDevice.Get(context.Background(), TestProxyDeviceID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find added device")
@@ -160,16 +162,17 @@
}
func TestProxy_1_1_2_Add_ExistingDevice(t *testing.T) {
+ ctx := context.Background()
TestProxyDevice.Id = TestProxyDeviceID
if err := TestProxyRootDevice.Set(context.Background(), "devices", TestProxyDevice); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to add device to test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to add device to test proxy due to error: %v", err)
assert.NotNil(t, err)
}
d := &voltha.Device{}
if have, err := TestProxyRootDevice.Get(context.Background(), TestProxyDeviceID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find added device")
@@ -184,12 +187,13 @@
}
func TestProxy_1_1_3_Add_NewAdapter(t *testing.T) {
+ ctx := context.Background()
TestProxyAdapterID = "test-adapter"
TestProxyAdapter.Id = TestProxyAdapterID
// Add the adapter
if err := TestProxyRootAdapter.Set(context.Background(), TestProxyAdapterID, TestProxyAdapter); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to add adapter to test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to add adapter to test proxy due to error: %v", err)
assert.NotNil(t, err)
} else {
t.Logf("Added adapter : %+v", TestProxyAdapter.Id)
@@ -198,7 +202,7 @@
// Verify that the added device can now be retrieved
d := &voltha.Device{}
if have, err := TestProxyRootAdapter.Get(context.Background(), TestProxyAdapterID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to retrieve device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to retrieve device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find added adapter")
@@ -210,9 +214,10 @@
}
func TestProxy_1_2_1_Get_AllDevices(t *testing.T) {
+ ctx := context.Background()
var devices []*voltha.Device
if err := TestProxyRootDevice.List(context.Background(), &devices); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get all devices info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get all devices info from test proxy due to error: %v", err)
assert.NotNil(t, err)
}
if len(devices) == 0 {
@@ -225,9 +230,10 @@
}
func TestProxy_1_2_2_Get_SingleDevice(t *testing.T) {
+ ctx := context.Background()
d := &voltha.Device{}
if have, err := TestProxyRootDevice.Get(context.Background(), TestProxyTargetDeviceID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get single device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get single device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Errorf("Failed to find device : %s", TestProxyTargetDeviceID)
@@ -238,11 +244,12 @@
}
func TestProxy_1_3_1_Update_Device(t *testing.T) {
+ ctx := context.Background()
var fwVersion int
retrieved := &voltha.Device{}
if have, err := TestProxyRootDevice.Get(context.Background(), TestProxyTargetDeviceID, retrieved); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to get device")
@@ -259,12 +266,12 @@
retrieved.FirmwareVersion = strconv.Itoa(fwVersion)
if err := TestProxyRootDevice.Set(context.Background(), TestProxyTargetDeviceID, retrieved); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to update device info test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to update device info test proxy due to error: %v", err)
assert.NotNil(t, err)
}
afterUpdate := &voltha.Device{}
if have, err := TestProxyRootDevice.Get(context.Background(), TestProxyTargetDeviceID, afterUpdate); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get device info from test proxy due to error: %v", err)
} else if !have {
t.Error("Failed to update device")
} else {
@@ -273,7 +280,7 @@
d := &voltha.Device{}
if have, err := TestProxyRootDevice.Get(context.Background(), TestProxyTargetDeviceID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get device info from test proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get device info from test proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find updated device (root proxy)")
@@ -285,12 +292,13 @@
}
func TestProxy_1_3_3_Update_Adapter(t *testing.T) {
+ ctx := context.Background()
adaptersProxy := NewDBPath(mockBackend).Proxy("adapters")
retrieved := &voltha.Adapter{}
if have, err := TestProxyRootAdapter.Get(context.Background(), TestProxyAdapterID, retrieved); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to retrieve adapter info from adapters proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to retrieve adapter info from adapters proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to get adapter")
@@ -300,7 +308,7 @@
retrieved.Version = "test-adapter-version-2"
if err := adaptersProxy.Set(context.Background(), TestProxyAdapterID, retrieved); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to update adapter info in adapters proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to update adapter info in adapters proxy due to error: %v", err)
assert.NotNil(t, err)
} else {
t.Logf("Updated adapter : %s", retrieved.Id)
@@ -308,7 +316,7 @@
d := &voltha.Adapter{}
if have, err := TestProxyRootAdapter.Get(context.Background(), TestProxyAdapterID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get updated adapter info from adapters proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get updated adapter info from adapters proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find updated adapter (root proxy)")
@@ -320,8 +328,9 @@
}
func TestProxy_1_4_1_Remove_Device(t *testing.T) {
+ ctx := context.Background()
if err := TestProxyRootDevice.Remove(context.Background(), TestProxyDeviceID); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to remove device from devices proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to remove device from devices proxy due to error: %v", err)
t.Errorf("failed to remove device: %s", err)
} else {
t.Logf("Removed device : %+v", TestProxyDeviceID)
@@ -330,7 +339,7 @@
d := &voltha.Device{}
have, err := TestProxyRootDevice.Get(context.Background(), TestProxyDeviceID, d)
if err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get device info from devices proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get device info from devices proxy due to error: %v", err)
assert.NotNil(t, err)
} else if have {
djson, _ := json.Marshal(d)
@@ -341,13 +350,14 @@
}
func TestProxy_2_1_1_Add_NewLogicalDevice(t *testing.T) {
+ ctx := context.Background()
ldIDBin, _ := uuid.New().MarshalBinary()
TestProxyLogicalDeviceID = "0001" + hex.EncodeToString(ldIDBin)[:12]
TestProxyLogicalDevice.Id = TestProxyLogicalDeviceID
if err := TestProxyRootLogicalDevice.Set(context.Background(), TestProxyLogicalDeviceID, TestProxyLogicalDevice); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to add new logical device into proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to add new logical device into proxy due to error: %v", err)
assert.NotNil(t, err)
} else {
t.Logf("Added logical device : %s", TestProxyLogicalDevice.Id)
@@ -355,7 +365,7 @@
ld := &voltha.LogicalDevice{}
if have, err := TestProxyRootLogicalDevice.Get(context.Background(), TestProxyLogicalDeviceID, ld); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get logical device info from logical device proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get logical device info from logical device proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find added logical device")
@@ -366,16 +376,17 @@
}
func TestProxy_2_1_2_Add_ExistingLogicalDevice(t *testing.T) {
+ ctx := context.Background()
TestProxyLogicalDevice.Id = TestProxyLogicalDeviceID
if err := TestProxyRootLogicalDevice.Set(context.Background(), "logical_devices", TestProxyLogicalDevice); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to add logical device due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to add logical device due to error: %v", err)
assert.NotNil(t, err)
}
device := &voltha.LogicalDevice{}
if have, err := TestProxyRootLogicalDevice.Get(context.Background(), "logical_devices", device); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get logical device info from logical device proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get logical device info from logical device proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find added logical device")
@@ -387,9 +398,10 @@
}
func TestProxy_2_2_1_Get_AllLogicalDevices(t *testing.T) {
+ ctx := context.Background()
var logicalDevices []*voltha.LogicalDevice
if err := TestProxyRootLogicalDevice.List(context.Background(), &logicalDevices); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get all logical devices from proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get all logical devices from proxy due to error: %v", err)
assert.NotNil(t, err)
}
if len(logicalDevices) == 0 {
@@ -402,9 +414,10 @@
}
func TestProxy_2_2_2_Get_SingleLogicalDevice(t *testing.T) {
+ ctx := context.Background()
ld := &voltha.LogicalDevice{}
if have, err := TestProxyRootLogicalDevice.Get(context.Background(), TestProxyTargetLogicalDeviceID, ld); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get single logical device from proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get single logical device from proxy due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Errorf("Failed to find logical device : %s", TestProxyTargetLogicalDeviceID)
@@ -416,11 +429,12 @@
}
func TestProxy_2_3_1_Update_LogicalDevice(t *testing.T) {
+ ctx := context.Background()
var fwVersion int
retrieved := &voltha.LogicalDevice{}
if have, err := TestProxyRootLogicalDevice.Get(context.Background(), TestProxyTargetLogicalDeviceID, retrieved); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get logical devices due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get logical devices due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to get logical device")
@@ -437,7 +451,7 @@
retrieved.RootDeviceId = strconv.Itoa(fwVersion)
if err := TestProxyRootLogicalDevice.Set(context.Background(), TestProxyTargetLogicalDeviceID, retrieved); err != nil {
- BenchmarkProxyLogger.Errorf("Faield to update logical device info due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Faield to update logical device info due to error: %v", err)
assert.NotNil(t, err)
} else {
t.Log("Updated logical device")
@@ -445,7 +459,7 @@
d := &voltha.LogicalDevice{}
if have, err := TestProxyRootLogicalDevice.Get(context.Background(), TestProxyTargetLogicalDeviceID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get logical device info due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get logical device info due to error: %v", err)
assert.NotNil(t, err)
} else if !have {
t.Error("Failed to find updated logical device (root proxy)")
@@ -457,8 +471,9 @@
}
func TestProxy_2_4_1_Remove_Device(t *testing.T) {
+ ctx := context.Background()
if err := TestProxyRootLogicalDevice.Remove(context.Background(), "logical_devices/"+TestProxyLogicalDeviceID); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to remove device from logical devices proxy due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to remove device from logical devices proxy due to error: %v", err)
t.Errorf("Failed to remove logical device: %s", err)
} else {
t.Logf("Removed device : %+v", TestProxyLogicalDeviceID)
@@ -466,7 +481,7 @@
d := &voltha.LogicalDevice{}
if have, err := TestProxyRootLogicalDevice.Get(context.Background(), "logical_devices/"+TestProxyLogicalDeviceID, d); err != nil {
- BenchmarkProxyLogger.Errorf("Failed to get logical device info due to error: %v", err)
+ BenchmarkProxyLogger.Errorf(ctx, "Failed to get logical device info due to error: %v", err)
assert.NotNil(t, err)
} else if have {
djson, _ := json.Marshal(d)
diff --git a/go.mod b/go.mod
index b7aabf1..dbecb5d 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@
github.com/gogo/protobuf v1.3.0
github.com/golang/protobuf v1.3.2
github.com/google/uuid v1.1.1
- github.com/opencord/voltha-lib-go/v3 v3.2.5
+ github.com/opencord/voltha-lib-go/v3 v3.2.8
github.com/opencord/voltha-protos/v3 v3.4.1
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
github.com/stretchr/testify v1.4.0
diff --git a/go.sum b/go.sum
index 07f65f1..cf618f0 100644
--- a/go.sum
+++ b/go.sum
@@ -198,8 +198,8 @@
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opencord/voltha-lib-go/v3 v3.2.5 h1:njPoUEcRu6gATMeKJqbQM4lnh+SKC1iRJm++muSBCQ8=
-github.com/opencord/voltha-lib-go/v3 v3.2.5/go.mod h1:MIWsmMGxIRLK8dG+CNVcE/cJCzOondyRbwJ1708BYgU=
+github.com/opencord/voltha-lib-go/v3 v3.2.8 h1:aQGuX6aJmCK3d0JXnYOiMhzDsMwVy+lyw5FOZ/ggakk=
+github.com/opencord/voltha-lib-go/v3 v3.2.8/go.mod h1:MIWsmMGxIRLK8dG+CNVcE/cJCzOondyRbwJ1708BYgU=
github.com/opencord/voltha-protos/v3 v3.4.1 h1:Nrr0jKy7WiMiFlXYiK8z73RvI4K4L5b/top7d3htQQI=
github.com/opencord/voltha-protos/v3 v3.4.1/go.mod h1:nl1ETp5Iw3avxOaKD8BJlYY5wYI4KeV95aT1pL63nto=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
index 01543ff..5a18411 100644
--- a/rw_core/core/core.go
+++ b/rw_core/core/core.go
@@ -82,6 +82,7 @@
// sync logging config with kv store
cm := conf.NewConfigManager(ctx, kvClient, cf.KVStoreType, cf.KVStoreAddress, cf.KVStoreTimeout)
go conf.StartLogLevelConfigProcessing(cm, ctx)
+ go conf.StartLogFeaturesConfigProcessing(cm, ctx)
backend := cm.Backend
backend.LivenessChannelInterval = cf.LiveProbeInterval / 2
diff --git a/rw_core/core/device/agent.go b/rw_core/core/device/agent.go
index ebfb641..b6bd0ec 100755
--- a/rw_core/core/device/agent.go
+++ b/rw_core/core/device/agent.go
@@ -671,7 +671,7 @@
agent.requestQueue.RequestComplete()
if err := agent.deviceMgr.processTransition(log.WithSpanFromContext(context.Background(), ctx), device, previousState); err != nil {
- log.Errorw("failed-process-transition", log.Fields{"device-id": device.Id, "previousAdminState": previousState.Admin, "currentAdminState": device.AdminState})
+ logger.Errorw(ctx, "failed-process-transition", log.Fields{"device-id": device.Id, "previousAdminState": previousState.Admin, "currentAdminState": device.AdminState})
}
return nil
}
diff --git a/rw_core/core/device/remote/adapter_proxy_test.go b/rw_core/core/device/remote/adapter_proxy_test.go
index 151483a..fd6b94d 100755
--- a/rw_core/core/device/remote/adapter_proxy_test.go
+++ b/rw_core/core/device/remote/adapter_proxy_test.go
@@ -52,8 +52,9 @@
)
func init() {
+ ctx := context.Background()
if _, err := log.SetDefaultLogger(log.JSON, 0, log.Fields{"instanceId": coreInstanceID}); err != nil {
- log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+ logger.With(log.Fields{"error": err}).Fatal(ctx, "Cannot setup logging")
}
// Set the log level to Warning
log.SetAllLogLevel(log.WarnLevel)
@@ -68,8 +69,6 @@
kafka.MsgClient(kc),
kafka.DefaultTopic(&kafka.Topic{Name: coreName}))
- ctx := context.Background()
-
if err = coreKafkaICProxy.Start(ctx); err != nil {
logger.Fatalw(ctx, "Failure-starting-core-kafka-intercontainerProxy", log.Fields{"error": err})
}
diff --git a/rw_core/main.go b/rw_core/main.go
index 98d3985..35472e5 100644
--- a/rw_core/main.go
+++ b/rw_core/main.go
@@ -141,7 +141,7 @@
// Add the probe to the context to pass to all the services started
probeCtx := context.WithValue(ctx, probe.ProbeContextKey, p)
- closer, err := log.InitTracingAndLogCorrelation(cf.TraceEnabled, cf.TraceAgentAddress, cf.LogCorrelationEnabled)
+ closer, err := log.GetGlobalLFM().InitTracingAndLogCorrelation(cf.TraceEnabled, cf.TraceAgentAddress, cf.LogCorrelationEnabled)
if err != nil {
logger.Warnw(ctx, "unable-to-initialize-tracing-and-log-correlation-module", log.Fields{"error": err})
} else {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
index 505dc79..188bbbd 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
@@ -146,7 +146,7 @@
}
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
return unPackResponse(ctx, rpc, device.Id, success, result)
}
@@ -170,7 +170,7 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
@@ -194,7 +194,7 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
@@ -215,7 +215,7 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
@@ -235,7 +235,7 @@
Value: &ic.IntType{Val: int64(portNo)},
}}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
logger.Debugw(ctx, "GetDevicePort-response", log.Fields{"device-id": deviceID, "success": success})
if success {
@@ -269,7 +269,7 @@
Value: &voltha.ID{Id: deviceID},
}}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
logger.Debugw(ctx, "ListDevicePorts-response", log.Fields{"device-id": deviceID, "success": success})
if success {
@@ -317,7 +317,7 @@
}
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
@@ -368,7 +368,7 @@
Value: oId,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
@@ -406,7 +406,7 @@
Value: id,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
return unPackResponse(ctx, rpc, parentDeviceId, success, result)
}
@@ -426,7 +426,7 @@
Value: id,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
return unPackResponse(ctx, rpc, parentDeviceId, success, result)
}
@@ -445,7 +445,7 @@
Value: id,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
@@ -505,7 +505,7 @@
}
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
@@ -541,7 +541,7 @@
Value: id,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
if success {
@@ -587,7 +587,7 @@
Key: "packet",
Value: pkt,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
@@ -611,7 +611,7 @@
Key: "device_reason",
Value: reason,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
@@ -629,7 +629,7 @@
Key: "device_pm_config",
Value: pmConfigs,
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
}
@@ -646,7 +646,7 @@
{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
}
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
return unPackResponse(ctx, rpc, parentDeviceId, success, result)
}
@@ -683,7 +683,7 @@
// Use a device specific topic as we are the only adaptercore handling requests for this device
replyToTopic := ap.getAdapterTopic()
- success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+ success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
return unPackResponse(ctx, rpc, deviceId, success, result)
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
index 11aa8e6..9ea86f7 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
@@ -41,10 +41,11 @@
ConfigTypeLogLevel ConfigType = iota
ConfigTypeMetadata
ConfigTypeKafka
+ ConfigTypeLogFeatures
)
func (c ConfigType) String() string {
- return [...]string{"loglevel", "metadata", "kafka"}[c]
+ return [...]string{"loglevel", "metadata", "kafka", "logfeatures"}[c]
}
// ChangeEvent represents the event recieved from watch
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logfeaturescontroller.go
new file mode 100644
index 0000000..a0d77b8
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logfeaturescontroller.go
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package config
+
+import (
+ "context"
+ "errors"
+ "github.com/opencord/voltha-lib-go/v3/pkg/log"
+ "os"
+ "strings"
+)
+
+const (
+ defaultTracingStatusKey = "trace_publish" // kvstore key containing tracing configuration status
+)
+
+// ComponentLogFeatureController represents Configuration for Logging related features of Tracing and Log
+// Correlation of specific Voltha component.
+type ComponentLogFeaturesController struct {
+ ComponentName string
+ componentNameConfig *ComponentConfig
+ configManager *ConfigManager
+ initialTracingStatus bool // Initial default tracing status set by helm chart
+}
+
+func NewComponentLogFeaturesController(ctx context.Context, cm *ConfigManager) (*ComponentLogFeaturesController, error) {
+ logger.Debug(ctx, "creating-new-component-log-features-controller")
+ componentName := os.Getenv("COMPONENT_NAME")
+ if componentName == "" {
+ return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+ }
+
+ tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
+
+ return &ComponentLogFeaturesController{
+ ComponentName: componentName,
+ componentNameConfig: nil,
+ configManager: cm,
+ initialTracingStatus: tracingStatus,
+ }, nil
+
+}
+
+// StartLogFeaturesConfigProcessing persists initial config of Log Features into Config Store before
+// starting the loading and processing of Configuration updates
+func StartLogFeaturesConfigProcessing(cm *ConfigManager, ctx context.Context) {
+ cc, err := NewComponentLogFeaturesController(ctx, cm)
+ if err != nil {
+ logger.Errorw(ctx, "unable-to-construct-component-log-features-controller-instance-for-monitoring", log.Fields{"error": err})
+ return
+ }
+
+ cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogFeatures)
+ logger.Debugw(ctx, "component-log-features-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+
+ cc.persistInitialLogFeaturesConfigs(ctx)
+
+ cc.processLogFeaturesConfig(ctx)
+}
+
+// Method to persist Initial status of Log Correlation and Tracing features (as set from command line)
+// into config store (etcd kvstore), if not set yet
+func (cc *ComponentLogFeaturesController) persistInitialLogFeaturesConfigs(ctx context.Context) {
+
+ _, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+ if err != nil {
+ statusString := "DISABLED"
+ if cc.initialTracingStatus {
+ statusString = "ENABLED"
+ }
+ err = cc.componentNameConfig.Save(ctx, defaultTracingStatusKey, statusString)
+ if err != nil {
+ logger.Errorw(ctx, "failed-to-persist-component-initial-tracing-status-at-startup", log.Fields{"error": err, "tracingstatus": statusString})
+ }
+ }
+}
+
+// processLogFeaturesConfig will first load and apply configuration of log features. Then it will start waiting for any changes
+// made to configuration in config store (etcd) and apply the same
+func (cc *ComponentLogFeaturesController) processLogFeaturesConfig(ctx context.Context) {
+
+ // Load and apply Tracing Status for first time
+ cc.loadAndApplyTracingStatusUpdate(ctx)
+
+ componentConfigEventChan := cc.componentNameConfig.MonitorForConfigChange(ctx)
+
+ // process the change events received on the channel
+ var configEvent *ConfigChangeEvent
+ for {
+ select {
+ case <-ctx.Done():
+ return
+
+ case configEvent = <-componentConfigEventChan:
+ logger.Debugw(ctx, "processing-log-features-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+
+ if strings.HasSuffix(configEvent.ConfigAttribute, defaultTracingStatusKey) {
+ cc.loadAndApplyTracingStatusUpdate(ctx)
+ }
+ }
+ }
+
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyTracingStatusUpdate(ctx context.Context) {
+
+ desiredTracingStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+ if err != nil || desiredTracingStatus == "" {
+ logger.Warn(ctx, "unable-to-retrieve-tracing-status-from-config-store")
+ return
+ }
+
+ if desiredTracingStatus != "ENABLED" && desiredTracingStatus != "DISABLED" {
+ logger.Warnw(ctx, "unsupported-tracing-status-configured-in-config-store", log.Fields{"tracing-status": desiredTracingStatus})
+ return
+ }
+
+ logger.Debugw(ctx, "retrieved-tracing-status", log.Fields{"tracing-status": desiredTracingStatus})
+
+ log.GetGlobalLFM().SetTracePublishingStatus(desiredTracingStatus == "ENABLED")
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
index f595dc1..efc0953 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
@@ -20,6 +20,7 @@
"context"
"errors"
"fmt"
+ "sync"
"time"
"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
@@ -40,7 +41,8 @@
Timeout time.Duration
Address string
PathPrefix string
- alive bool // Is this backend connection alive?
+ alive bool // Is this backend connection alive?
+ livenessMutex sync.Mutex
liveness chan bool // channel to post alive state
LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
lastLivenessTime time.Time // Instant of last alive state push
@@ -91,8 +93,9 @@
// so that in a live state, the core does not timeout and
// send a forced liveness message. Push alive state if the
// last push to channel was beyond livenessChannelInterval
+ b.livenessMutex.Lock()
+ defer b.livenessMutex.Unlock()
if b.liveness != nil {
-
if b.alive != alive {
logger.Debug(ctx, "update-liveness-channel-reason-change")
b.liveness <- alive
@@ -128,14 +131,10 @@
// and/or take other actions.
func (b *Backend) EnableLivenessChannel(ctx context.Context) chan bool {
logger.Debug(ctx, "enable-kvstore-liveness-channel")
-
+ b.livenessMutex.Lock()
+ defer b.livenessMutex.Unlock()
if b.liveness == nil {
- logger.Debug(ctx, "create-kvstore-liveness-channel")
-
- // Channel size of 10 to avoid any possibility of blocking in Load conditions
b.liveness = make(chan bool, 10)
-
- // Post initial alive state
b.liveness <- b.alive
b.lastLivenessTime = time.Now()
}
@@ -182,6 +181,9 @@
// List retrieves one or more items that match the specified key
func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
+ span, ctx := log.CreateChildSpan(ctx, "etcd-list")
+ defer span.Finish()
+
formattedPath := b.makePath(ctx, key)
logger.Debugw(ctx, "listing-key", log.Fields{"key": key, "path": formattedPath})
@@ -194,6 +196,9 @@
// Get retrieves an item that matches the specified key
func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
+ span, ctx := log.CreateChildSpan(ctx, "etcd-get")
+ defer span.Finish()
+
formattedPath := b.makePath(ctx, key)
logger.Debugw(ctx, "getting-key", log.Fields{"key": key, "path": formattedPath})
@@ -206,6 +211,9 @@
// Put stores an item value under the specifed key
func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
+ span, ctx := log.CreateChildSpan(ctx, "etcd-put")
+ defer span.Finish()
+
formattedPath := b.makePath(ctx, key)
logger.Debugw(ctx, "putting-key", log.Fields{"key": key, "path": formattedPath})
@@ -218,6 +226,9 @@
// Delete removes an item under the specified key
func (b *Backend) Delete(ctx context.Context, key string) error {
+ span, ctx := log.CreateChildSpan(ctx, "etcd-delete")
+ defer span.Finish()
+
formattedPath := b.makePath(ctx, key)
logger.Debugw(ctx, "deleting-key", log.Fields{"key": key, "path": formattedPath})
@@ -230,6 +241,9 @@
// CreateWatch starts watching events for the specified key
func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
+ span, ctx := log.CreateChildSpan(ctx, "etcd-create-watch")
+ defer span.Finish()
+
formattedPath := b.makePath(ctx, key)
logger.Debugw(ctx, "creating-key-watch", log.Fields{"key": key, "path": formattedPath})
@@ -238,6 +252,9 @@
// DeleteWatch stops watching events for the specified key
func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
+ span, ctx := log.CreateChildSpan(ctx, "etcd-delete-watch")
+ defer span.Finish()
+
formattedPath := b.makePath(ctx, key)
logger.Debugw(ctx, "deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/grpc/server.go
index 9f7c785..f5f9550 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/grpc/server.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/grpc/server.go
@@ -19,7 +19,7 @@
"context"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
- "github.com/opentracing/opentracing-go"
+ "github.com/opencord/voltha-lib-go/v3/pkg/log"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -102,10 +102,10 @@
// Use Intercepters to automatically inject and publish Open Tracing Spans by this GRPC server
serverOptions := []grpc.ServerOption{
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
- grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(opentracing.GlobalTracer())),
+ grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
- grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(opentracing.GlobalTracer())),
+ grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
mkServerInterceptor(s),
))}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
index a876c09..266f6c1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
@@ -227,7 +227,7 @@
ep.deviceTypeServiceMap = make(map[string]string)
// Load the adapters
- blobs, err := ep.backend.List(context.Background(), "adapters")
+ blobs, err := ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "adapters")
if err != nil {
return err
}
@@ -257,7 +257,7 @@
}
}
// Load the device types
- blobs, err = ep.backend.List(context.Background(), "device_types")
+ blobs, err = ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "device_types")
if err != nil {
return err
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
index 87c7ce4..69450fa 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
@@ -75,10 +75,12 @@
lockOfTopicLockMap sync.RWMutex
metadataMaxRetry int
alive bool
+ livenessMutex sync.Mutex
liveness chan bool
livenessChannelInterval time.Duration
lastLivenessTime time.Time
started bool
+ healthinessMutex sync.Mutex
healthy bool
healthiness chan bool
}
@@ -463,6 +465,8 @@
// so that in a live state, the core does not timeout and
// send a forced liveness message. Production of liveness
// events to the channel is rate-limited by livenessChannelInterval.
+ sc.livenessMutex.Lock()
+ defer sc.livenessMutex.Unlock()
if sc.liveness != nil {
if sc.alive != alive {
logger.Info(ctx, "update-liveness-channel-because-change")
@@ -485,6 +489,8 @@
// Once unhealthy, we never go back
func (sc *SaramaClient) setUnhealthy(ctx context.Context) {
sc.healthy = false
+ sc.healthinessMutex.Lock()
+ defer sc.healthinessMutex.Unlock()
if sc.healthiness != nil {
logger.Infow(ctx, "set-client-unhealthy", log.Fields{"healthy": sc.healthy})
sc.healthiness <- sc.healthy
@@ -594,6 +600,8 @@
func (sc *SaramaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
logger.Infow(ctx, "kafka-enable-liveness-channel", log.Fields{"enable": enable})
if enable {
+ sc.livenessMutex.Lock()
+ defer sc.livenessMutex.Unlock()
if sc.liveness == nil {
logger.Info(ctx, "kafka-create-liveness-channel")
// At least 1, so we can immediately post to it without blocking
@@ -618,6 +626,8 @@
func (sc *SaramaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
logger.Infow(ctx, "kafka-enable-healthiness-channel", log.Fields{"enable": enable})
if enable {
+ sc.healthinessMutex.Lock()
+ defer sc.healthinessMutex.Unlock()
if sc.healthiness == nil {
logger.Info(ctx, "kafka-create-healthiness-channel")
// At least 1, so we can immediately post to it without blocking
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/common.go
new file mode 100644
index 0000000..b0ce81b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/common.go
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package log
+
+var logger CLogger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = RegisterPackage(JSON, ErrorLevel, Fields{})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
index 1e23da1..b8d498c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
@@ -14,29 +14,39 @@
* limitations under the License.
*/
-//Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
-//1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
-//2. Dynamic log level change - for all registered packages (SetAllLogLevel)
-//3. Dynamic log level change - for a given package (SetPackageLogLevel)
-//4. Provides a default logger for unregistered packages
-//5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
-//6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+// Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+// 1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+// 2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+// 3. Dynamic log level change - for a given package (SetPackageLogLevel)
+// 4. Provides a default logger for unregistered packages (however avoid its usage)
+// 5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+// 6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
//
// Using package-level logging (recommended approach). In the examples below, log refers to this log package.
-// 1. In the appropriate package add the following in the init section of the package. The log level can be changed
-// and any number of default fields can be added as well. The log level specifies the lowest log level that will be
-// in the output while the fields will be automatically added to all log printouts.
//
-// log.AddPackage(mylog.JSON, log.WarnLevel, log.Fields{"anyFieldName": "any value"})
+// 1. In the appropriate package, add the following in the init section of the package (usually in a common.go file)
+// The log level can be changed and any number of default fields can be added as well. The log level specifies
+// the lowest log level that will be in the output while the fields will be automatically added to all log printouts.
+// However, as voltha components re-initialize the log level of each registered package to default initial loglevel
+// passed as CLI argument, the log level passed in RegisterPackage call effectively has no effect.
//
-//2. In the calling package, just invoke any of the publicly available functions of the logger. Here is an example
-// to write an Info log with additional fields:
+// var logger log.CLogger
+// func init() {
+// logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"key1": "value1"})
+// }
//
-//log.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+// 2. In the calling package, use any of the publicly available functions of local package-level logger instance created
+// in previous step. Here is an example to write an Info log with additional fields:
//
-//3. To dynamically change the log level, you can use 1)SetLogLevel from inside your package or 2) SetPackageLogLevel
-// from anywhere or 3) SetAllLogLevel from anywhere.
+// logger.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
//
+// 3. To dynamically change the log level, you can use
+// a) SetLogLevel from inside your package or
+// b) SetPackageLogLevel from anywhere or
+// c) SetAllLogLevel from anywhere.
+//
+// Dynamic Loglevel configuration feature also uses SetPackageLogLevel method based on triggers received due to
+// Changes to configured loglevels
package log
@@ -510,134 +520,134 @@
// Debug logs a message at level Debug on the standard logger.
func (l clogger) Debug(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Debug(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
}
// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
func (l clogger) Debugln(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Debug(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
}
// Debugw logs a message at level Debug on the standard logger.
func (l clogger) Debugf(ctx context.Context, format string, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Debugf(format, args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugf(format, args...)
}
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (l clogger) Debugw(ctx context.Context, msg string, keysAndValues Fields) {
if l.V(DebugLevel) {
- l.log.With(ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
}
}
// Info logs a message at level Info on the standard logger.
func (l clogger) Info(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Info(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
}
// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
func (l clogger) Infoln(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Info(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
//msg := fmt.Sprintln(args...)
//l.sourced().Info(msg[:len(msg)-1])
}
// Infof logs a message at level Info on the standard logger.
func (l clogger) Infof(ctx context.Context, format string, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Infof(format, args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infof(format, args...)
}
// Infow logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (l clogger) Infow(ctx context.Context, msg string, keysAndValues Fields) {
if l.V(InfoLevel) {
- l.log.With(ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
}
}
// Warn logs a message at level Warn on the standard logger.
func (l clogger) Warn(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
}
// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
func (l clogger) Warnln(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
}
// Warnf logs a message at level Warn on the standard logger.
func (l clogger) Warnf(ctx context.Context, format string, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Warnf(format, args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
}
// Warnw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (l clogger) Warnw(ctx context.Context, msg string, keysAndValues Fields) {
if l.V(WarnLevel) {
- l.log.With(ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
}
}
// Error logs a message at level Error on the standard logger.
func (l clogger) Error(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Error(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
}
// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
func (l clogger) Errorln(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Error(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
}
// Errorf logs a message at level Error on the standard logger.
func (l clogger) Errorf(ctx context.Context, format string, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Errorf(format, args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorf(format, args...)
}
// Errorw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (l clogger) Errorw(ctx context.Context, msg string, keysAndValues Fields) {
if l.V(ErrorLevel) {
- l.log.With(ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
}
}
// Fatal logs a message at level Fatal on the standard logger.
func (l clogger) Fatal(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Fatal(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
}
// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
func (l clogger) Fatalln(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Fatal(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func (l clogger) Fatalf(ctx context.Context, format string, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Fatalf(format, args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalf(format, args...)
}
// Fatalw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (l clogger) Fatalw(ctx context.Context, msg string, keysAndValues Fields) {
if l.V(FatalLevel) {
- l.log.With(ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
}
}
// Warning logs a message at level Warn on the standard logger.
func (l clogger) Warning(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
}
// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
func (l clogger) Warningln(ctx context.Context, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
}
// Warningf logs a message at level Warn on the standard logger.
func (l clogger) Warningf(ctx context.Context, format string, args ...interface{}) {
- l.log.With(ExtractContextAttributes(ctx)...).Warnf(format, args...)
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
}
// V reports whether verbosity level l is at least the requested verbose level.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
deleted file mode 100644
index b47b562..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Older Version of Logger interface without support of Context Injection
-// This is Depreciated and should not be used anymore. Instead use CLogger
-// defined in log.go file.
-// This file will be deleted once all code files of voltha compopnents have been
-// changed to use new CLogger interface methods supporting context injection
-package log
-
-import (
- zp "go.uber.org/zap"
-)
-
-// Logger represents an abstract logging interface. Any logging implementation used
-// will need to abide by this interface
-type Logger interface {
- Debug(...interface{})
- Debugln(...interface{})
- Debugf(string, ...interface{})
- Debugw(string, Fields)
-
- Info(...interface{})
- Infoln(...interface{})
- Infof(string, ...interface{})
- Infow(string, Fields)
-
- Warn(...interface{})
- Warnln(...interface{})
- Warnf(string, ...interface{})
- Warnw(string, Fields)
-
- Error(...interface{})
- Errorln(...interface{})
- Errorf(string, ...interface{})
- Errorw(string, Fields)
-
- Fatal(...interface{})
- Fatalln(...interface{})
- Fatalf(string, ...interface{})
- Fatalw(string, Fields)
-
- With(Fields) Logger
-
- // The following are added to be able to use this logger as a gRPC LoggerV2 if needed
- //
- Warning(...interface{})
- Warningln(...interface{})
- Warningf(string, ...interface{})
-
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l LogLevel) bool
-
- //Returns the log level of this specific logger
- GetLogLevel() LogLevel
-}
-
-// logger has been refactored to be a thin wrapper on clogger implementation to support
-// all existing log statements during transition to new clogger
-type logger struct {
- cl *clogger
-}
-
-func AddPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (Logger, error) {
- // Get package name of caller method and pass further on; else this method is considered caller
- pkgName, _, _, _ := getCallerInfo()
-
- pkgNames = append(pkgNames, pkgName)
- clg, err := RegisterPackage(outputType, level, defaultFields, pkgNames...)
- if err != nil {
- return nil, err
- }
-
- return logger{cl: clg.(*clogger)}, nil
-}
-
-func getPackageLevelSugaredLogger() *zp.SugaredLogger {
- pkgName, _, _, _ := getCallerInfo()
- if _, exist := loggers[pkgName]; exist {
- return loggers[pkgName].log
- }
- return defaultLogger.log
-}
-
-func getPackageLevelLogger() CLogger {
- pkgName, _, _, _ := getCallerInfo()
- if _, exist := loggers[pkgName]; exist {
- return loggers[pkgName]
- }
- return defaultLogger
-}
-
-// With returns a logger initialized with the key-value pairs
-func (l logger) With(keysAndValues Fields) Logger {
- return logger{cl: &clogger{log: l.cl.log.With(serializeMap(keysAndValues)...), parent: l.cl.parent}}
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func (l logger) Debug(args ...interface{}) {
- l.cl.log.Debug(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
-func (l logger) Debugln(args ...interface{}) {
- l.cl.log.Debug(args...)
-}
-
-// Debugw logs a message at level Debug on the standard logger.
-func (l logger) Debugf(format string, args ...interface{}) {
- l.cl.log.Debugf(format, args...)
-}
-
-// Debugw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Debugw(msg string, keysAndValues Fields) {
- if l.V(DebugLevel) {
- l.cl.log.Debugw(msg, serializeMap(keysAndValues)...)
- }
-}
-
-// Info logs a message at level Info on the standard logger.
-func (l logger) Info(args ...interface{}) {
- l.cl.log.Info(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
-func (l logger) Infoln(args ...interface{}) {
- l.cl.log.Info(args...)
- //msg := fmt.Sprintln(args...)
- //l.sourced().Info(msg[:len(msg)-1])
-}
-
-// Infof logs a message at level Info on the standard logger.
-func (l logger) Infof(format string, args ...interface{}) {
- l.cl.log.Infof(format, args...)
-}
-
-// Infow logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Infow(msg string, keysAndValues Fields) {
- if l.V(InfoLevel) {
- l.cl.log.Infow(msg, serializeMap(keysAndValues)...)
- }
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func (l logger) Warn(args ...interface{}) {
- l.cl.log.Warn(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
-func (l logger) Warnln(args ...interface{}) {
- l.cl.log.Warn(args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func (l logger) Warnf(format string, args ...interface{}) {
- l.cl.log.Warnf(format, args...)
-}
-
-// Warnw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Warnw(msg string, keysAndValues Fields) {
- if l.V(WarnLevel) {
- l.cl.log.Warnw(msg, serializeMap(keysAndValues)...)
- }
-}
-
-// Error logs a message at level Error on the standard logger.
-func (l logger) Error(args ...interface{}) {
- l.cl.log.Error(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
-func (l logger) Errorln(args ...interface{}) {
- l.cl.log.Error(args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func (l logger) Errorf(format string, args ...interface{}) {
- l.cl.log.Errorf(format, args...)
-}
-
-// Errorw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Errorw(msg string, keysAndValues Fields) {
- if l.V(ErrorLevel) {
- l.cl.log.Errorw(msg, serializeMap(keysAndValues)...)
- }
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func (l logger) Fatal(args ...interface{}) {
- l.cl.log.Fatal(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
-func (l logger) Fatalln(args ...interface{}) {
- l.cl.log.Fatal(args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func (l logger) Fatalf(format string, args ...interface{}) {
- l.cl.log.Fatalf(format, args...)
-}
-
-// Fatalw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Fatalw(msg string, keysAndValues Fields) {
- if l.V(FatalLevel) {
- l.cl.log.Fatalw(msg, serializeMap(keysAndValues)...)
- }
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func (l logger) Warning(args ...interface{}) {
- l.cl.log.Warn(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
-func (l logger) Warningln(args ...interface{}) {
- l.cl.log.Warn(args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func (l logger) Warningf(format string, args ...interface{}) {
- l.cl.log.Warnf(format, args...)
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func (l logger) V(level LogLevel) bool {
- return l.cl.parent.Core().Enabled(logLevelToLevel(level))
-}
-
-// GetLogLevel returns the current level of the logger
-func (l logger) GetLogLevel() LogLevel {
- return levelToLogLevel(cfgs[l.cl.packageName].Level.Level())
-}
-
-// With returns a logger initialized with the key-value pairs
-func With(keysAndValues Fields) Logger {
- return logger{cl: &clogger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}}
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func Debug(args ...interface{}) {
- getPackageLevelSugaredLogger().Debug(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger.
-func Debugln(args ...interface{}) {
- getPackageLevelSugaredLogger().Debug(args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func Debugf(format string, args ...interface{}) {
- getPackageLevelSugaredLogger().Debugf(format, args...)
-}
-
-// Debugw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Debugw(msg string, keysAndValues Fields) {
- getPackageLevelSugaredLogger().Debugw(msg, serializeMap(keysAndValues)...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func Info(args ...interface{}) {
- getPackageLevelSugaredLogger().Info(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger.
-func Infoln(args ...interface{}) {
- getPackageLevelSugaredLogger().Info(args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func Infof(format string, args ...interface{}) {
- getPackageLevelSugaredLogger().Infof(format, args...)
-}
-
-//Infow logs a message with some additional context. The variadic key-value
-//pairs are treated as they are in With.
-func Infow(msg string, keysAndValues Fields) {
- getPackageLevelSugaredLogger().Infow(msg, serializeMap(keysAndValues)...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func Warn(args ...interface{}) {
- getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger.
-func Warnln(args ...interface{}) {
- getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func Warnf(format string, args ...interface{}) {
- getPackageLevelSugaredLogger().Warnf(format, args...)
-}
-
-// Warnw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Warnw(msg string, keysAndValues Fields) {
- getPackageLevelSugaredLogger().Warnw(msg, serializeMap(keysAndValues)...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func Error(args ...interface{}) {
- getPackageLevelSugaredLogger().Error(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger.
-func Errorln(args ...interface{}) {
- getPackageLevelSugaredLogger().Error(args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func Errorf(format string, args ...interface{}) {
- getPackageLevelSugaredLogger().Errorf(format, args...)
-}
-
-// Errorw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Errorw(msg string, keysAndValues Fields) {
- getPackageLevelSugaredLogger().Errorw(msg, serializeMap(keysAndValues)...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func Fatal(args ...interface{}) {
- getPackageLevelSugaredLogger().Fatal(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger.
-func Fatalln(args ...interface{}) {
- getPackageLevelSugaredLogger().Fatal(args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func Fatalf(format string, args ...interface{}) {
- getPackageLevelSugaredLogger().Fatalf(format, args...)
-}
-
-// Fatalw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Fatalw(msg string, keysAndValues Fields) {
- getPackageLevelSugaredLogger().Fatalw(msg, serializeMap(keysAndValues)...)
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func Warning(args ...interface{}) {
- getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger.
-func Warningln(args ...interface{}) {
- getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func Warningf(format string, args ...interface{}) {
- getPackageLevelSugaredLogger().Warnf(format, args...)
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func V(level LogLevel) bool {
- return getPackageLevelLogger().V(level)
-}
-
-//GetLogLevel returns the log level of the invoking package
-func GetLogLevel() LogLevel {
- return getPackageLevelLogger().GetLogLevel()
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go
index b22ca14..82c3d7d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go
@@ -22,24 +22,53 @@
import (
"context"
"errors"
+ "fmt"
"github.com/opentracing/opentracing-go"
jtracing "github.com/uber/jaeger-client-go"
jcfg "github.com/uber/jaeger-client-go/config"
"io"
- "io/ioutil"
"os"
"strings"
+ "sync"
)
const (
RootSpanNameKey = "op-name"
)
-// Flag indicating whether to extract Log Fields from Span embedded in the received Context
-var extractLogFieldsFromContext bool = true
+// Global Settings governing the Log Correlation and Tracing features. Should only
+// be updated through the exposed public methods
+type LogFeaturesManager struct {
+ isTracePublishingEnabled bool
+ isLogCorrelationEnabled bool
+ componentName string // Name of component extracted from ENV variable
+ activeTraceAgentAddress string
+ lock sync.Mutex
+}
-// Flag indicating whether to process Span related operations; to save CPU cycles when disabled
-var processSpanOperations bool = true
+var globalLFM *LogFeaturesManager = &LogFeaturesManager{}
+
+func GetGlobalLFM() *LogFeaturesManager {
+ return globalLFM
+}
+
+// A Wrapper to utilize currently Active Tracer instance. The middleware library being used for generating
+// Spans for GRPC API calls does not support dynamically setting the Active Tracer similar to the SetGlobalTracer method
+// provided by OpenTracing API
+type ActiveTracerProxy struct {
+}
+
+func (atw ActiveTracerProxy) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
+ return opentracing.GlobalTracer().StartSpan(operationName, opts...)
+}
+
+func (atw ActiveTracerProxy) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
+ return opentracing.GlobalTracer().Inject(sm, format, carrier)
+}
+
+func (atw ActiveTracerProxy) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+ return opentracing.GlobalTracer().Extract(format, carrier)
+}
// Jaeger complaint Logger instance to redirect logs to Default Logger
type traceLogger struct {
@@ -55,54 +84,189 @@
tl.logger.Debugf(context.Background(), msg, args...)
}
-// This method will start the Tracing for a component using Component name injected from the Chart
-// The close() method on returned Closer instance should be called in defer mode to gracefully
-// terminate tracing on component shutdown
-func InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
- if !tracePublishEnabled && !logCorrelationEnabled {
- defaultLogger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
- extractLogFieldsFromContext = false
- processSpanOperations = false
- return ioutil.NopCloser(strings.NewReader("")), nil
+// Wrapper to handle correct Closer call at the time of Process Termination
+type traceCloser struct {
+}
+
+func (c traceCloser) Close() error {
+ currentActiveTracer := opentracing.GlobalTracer()
+ if currentActiveTracer != nil {
+ if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+ jTracer.Close()
+ }
}
- if !logCorrelationEnabled {
- defaultLogger.Info(context.Background(), "Disabling Log Fields extraction from context as configured")
- extractLogFieldsFromContext = false
- }
+ return nil
+}
- componentName := os.Getenv("COMPONENT_NAME")
- if componentName == "" {
+// Method to Initialize Jaeger based Tracing client based on initial status of Tracing Publish and Log Correlation
+func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
+ lfm.componentName = os.Getenv("COMPONENT_NAME")
+ if lfm.componentName == "" {
return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
}
- // Use basic configuration to start with; will extend later to support dynamic config updates
- cfg := jcfg.Configuration{}
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ // Use NoopTracer when both Tracing Publishing and Log Correlation are disabled
+ if !tracePublishEnabled && !logCorrelationEnabled {
+ logger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
+ lfm.isTracePublishingEnabled = false
+ lfm.isLogCorrelationEnabled = false
+ opentracing.SetGlobalTracer(opentracing.NoopTracer{})
+ return traceCloser{}, nil
+ }
+
+ tracer, _, err := lfm.constructJaegerTracer(tracePublishEnabled, traceAgentAddress, true)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize variables representing Active Status
+ opentracing.SetGlobalTracer(tracer)
+ lfm.isTracePublishingEnabled = tracePublishEnabled
+ lfm.activeTraceAgentAddress = traceAgentAddress
+ lfm.isLogCorrelationEnabled = logCorrelationEnabled
+ return traceCloser{}, nil
+}
+
+// Method to replace Active Tracer along with graceful closer of previous tracer
+func (lfm *LogFeaturesManager) replaceActiveTracer(tracer opentracing.Tracer) {
+ currentActiveTracer := opentracing.GlobalTracer()
+ opentracing.SetGlobalTracer(tracer)
+
+ if currentActiveTracer != nil {
+ if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+ jTracer.Close()
+ }
+ }
+}
+
+func (lfm *LogFeaturesManager) GetLogCorrelationStatus() bool {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ return lfm.isLogCorrelationEnabled
+}
+
+func (lfm *LogFeaturesManager) SetLogCorrelationStatus(isEnabled bool) {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ if isEnabled == lfm.isLogCorrelationEnabled {
+ logger.Debugf(context.Background(), "Ignoring Log Correlation Set operation with value %t; current Status same as desired", isEnabled)
+ return
+ }
+
+ if isEnabled {
+ // Construct new Tracer instance if Log Correlation has been enabled and current active tracer is a NoopTracer instance.
+ // Continue using the earlier tracer instance in case of any error
+ if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); ok {
+ tracer, _, err := lfm.constructJaegerTracer(lfm.isTracePublishingEnabled, lfm.activeTraceAgentAddress, false)
+ if err != nil {
+ logger.Warnf(context.Background(), "Log Correlation Enable operation failed with error: %s", err.Error())
+ return
+ }
+
+ lfm.replaceActiveTracer(tracer)
+ }
+
+ lfm.isLogCorrelationEnabled = true
+ logger.Info(context.Background(), "Log Correlation has been enabled")
+
+ } else {
+ // Switch to NoopTracer when Log Correlation has been disabled and Tracing Publish is already disabled
+ if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); !ok && !lfm.isTracePublishingEnabled {
+ lfm.replaceActiveTracer(opentracing.NoopTracer{})
+ }
+
+ lfm.isLogCorrelationEnabled = false
+ logger.Info(context.Background(), "Log Correlation has been disabled")
+ }
+}
+
+func (lfm *LogFeaturesManager) GetTracePublishingStatus() bool {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ return lfm.isTracePublishingEnabled
+}
+
+func (lfm *LogFeaturesManager) SetTracePublishingStatus(isEnabled bool) {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ if isEnabled == lfm.isTracePublishingEnabled {
+ logger.Debugf(context.Background(), "Ignoring Trace Publishing Set operation with value %t; current Status same as desired", isEnabled)
+ return
+ }
+
+ if isEnabled {
+ // Construct new Tracer instance if Tracing Publish has been enabled (even if a Jaeger instance is already active)
+ // This is needed to ensure that a fresh lookup of Jaeger Agent address is performed again while performing
+ // Disable-Enable of Tracing
+ tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+ if err != nil {
+ logger.Warnf(context.Background(), "Trace Publishing Enable operation failed with error: %s", err.Error())
+ return
+ }
+ lfm.replaceActiveTracer(tracer)
+
+ lfm.isTracePublishingEnabled = true
+ logger.Info(context.Background(), "Tracing Publishing has been enabled")
+ } else {
+ // Switch to NoopTracer when Tracing Publish has been disabled and Log Correlation is already disabled
+ if !lfm.isLogCorrelationEnabled {
+ lfm.replaceActiveTracer(opentracing.NoopTracer{})
+ } else {
+ // Else construct a new Jaeger Instance with publishing disabled
+ tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+ if err != nil {
+ logger.Warnf(context.Background(), "Trace Publishing Disable operation failed with error: %s", err.Error())
+ return
+ }
+ lfm.replaceActiveTracer(tracer)
+ }
+
+ lfm.isTracePublishingEnabled = false
+ logger.Info(context.Background(), "Tracing Publishing has been disabled")
+ }
+}
+
+// Method to contruct a new Jaeger Tracer instance based on given Trace Agent address and Publish status.
+// The last attribute indicates whether to use Loopback IP for creating Jaeger Client when the DNS lookup
+// of supplied Trace Agent address has failed. It is fine to fallback during the initialization step, but
+// not later (when enabling/disabling the status dynamically)
+func (lfm *LogFeaturesManager) constructJaegerTracer(tracePublishEnabled bool, traceAgentAddress string, fallbackToLoopbackAllowed bool) (opentracing.Tracer, io.Closer, error) {
+ cfg := jcfg.Configuration{ServiceName: lfm.componentName}
var err error
var jReporterConfig jcfg.ReporterConfig
var jReporterCfgOption jtracing.Reporter
- // Attempt Trace Agent Address only if Trace Publishing is enabled; else directly use Loopback IP
- if tracePublishEnabled {
- jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
- jReporterCfgOption, err = jReporterConfig.NewReporter(componentName, jtracing.NewNullMetrics(), traceLogger{logger: defaultLogger})
+ logger.Info(context.Background(), "Constructing new Jaeger Tracer instance")
- if err != nil {
- defaultLogger.Errorw(context.Background(), "Unable to create Reporter with given Trace Agent address",
- Fields{"error": err, "address": traceAgentAddress})
- // The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
- // It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
- // Thus, falback to use loopback IP for Reporter initialization before throwing back any error
- tracePublishEnabled = false
+ // Attempt Trace Agent Address first; will fallback to Loopback IP if it fails
+ jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
+ jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+
+ if err != nil {
+ if !fallbackToLoopbackAllowed {
+ return nil, nil, errors.New("Reporter Creation for given Trace Agent address " + traceAgentAddress + " failed with error : " + err.Error())
}
- }
- if !tracePublishEnabled {
+ logger.Infow(context.Background(), "Unable to create Reporter with given Trace Agent address",
+ Fields{"error": err, "address": traceAgentAddress})
+ // The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
+ // It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
+ // Thus, falback to use loopback IP for Reporter initialization before throwing back any error
+ tracePublishEnabled = false
+
jReporterConfig.LocalAgentHostPort = "127.0.0.1:6831"
- jReporterCfgOption, err = jReporterConfig.NewReporter(componentName, jtracing.NewNullMetrics(), traceLogger{logger: defaultLogger})
+ jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
if err != nil {
- return nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
+ return nil, nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
}
}
@@ -112,18 +276,18 @@
samplerParam = 1
}
jSamplerConfig := jcfg.SamplerConfig{Type: "const", Param: float64(samplerParam)}
- jSamplerCfgOption, err := jSamplerConfig.NewSampler(componentName, jtracing.NewNullMetrics())
+ jSamplerCfgOption, err := jSamplerConfig.NewSampler(lfm.componentName, jtracing.NewNullMetrics())
if err != nil {
- return nil, errors.New("Unable to create Sampler : " + err.Error())
+ return nil, nil, errors.New("Unable to create Sampler : " + err.Error())
}
- return cfg.InitGlobalTracer(componentName, jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
+ return cfg.NewTracer(jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
}
func TerminateTracing(c io.Closer) {
err := c.Close()
if err != nil {
- defaultLogger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
+ logger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
}
}
@@ -135,8 +299,8 @@
// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
//
// If no Span is found associated with context, blank slice is returned without any log fields
-func ExtractContextAttributes(ctx context.Context) []interface{} {
- if !extractLogFieldsFromContext {
+func (lfm *LogFeaturesManager) ExtractContextAttributes(ctx context.Context) []interface{} {
+ if !lfm.isLogCorrelationEnabled {
return make([]interface{}, 0)
}
@@ -146,10 +310,10 @@
if span := opentracing.SpanFromContext(ctx); span != nil {
if jspan, ok := span.(*jtracing.Span); ok {
// Add Log fields for operation identified by Root Level Span (Trace)
- opId := jspan.SpanContext().TraceID().String()
+ opId := fmt.Sprintf("%016x", jspan.SpanContext().TraceID().Low) // Using Sprintf to avoid removal of leading 0s
opName := jspan.BaggageItem(RootSpanNameKey)
- taskId := jspan.SpanContext().SpanID().String()
+ taskId := fmt.Sprintf("%016x", uint64(jspan.SpanContext().SpanID())) // Using Sprintf to avoid removal of leading 0s
taskName := jspan.OperationName()
if opName == "" {
@@ -220,8 +384,10 @@
// Method to inject Error into the Span in event of any operation failure
func MarkSpanError(ctx context.Context, err error) {
span := opentracing.SpanFromContext(ctx)
- span.SetTag("error", true)
- span.SetTag("err", err)
+ if span != nil {
+ span.SetTag("error", true)
+ span.SetTag("err", err)
+ }
}
// Creates a Child Span from Parent Span embedded in passed context. Should be used before starting a new major
@@ -231,7 +397,7 @@
// 3. In start of a Go Routine responsible for performing a major task involving significant duration
// 4. Any method which is suspected to be time consuming...
func CreateChildSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
- if !processSpanOperations {
+ if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
return opentracing.NoopTracer{}.StartSpan(taskName), ctx
}
@@ -254,7 +420,7 @@
// Some situations where this method would be suitable includes Kafka Async RPC call, Propagation of Event across
// a channel etc.
func CreateAsyncSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
- if !processSpanOperations {
+ if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
return opentracing.NoopTracer{}.StartSpan(taskName), ctx
}
@@ -265,7 +431,7 @@
// We should always be creating Aysnc span from a Valid parent span. If not, create a Child span instead
if parentSpan == nil {
- defaultLogger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
+ logger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
asyncSpan, newCtx = opentracing.StartSpanFromContext(ctx, taskName)
} else {
// Use Background context as the base for Follows-from case; else new span is getting both Child and FollowsFrom relationship
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9c5f761..2fd46fe 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -102,7 +102,7 @@
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
-# github.com/opencord/voltha-lib-go/v3 v3.2.5
+# github.com/opencord/voltha-lib-go/v3 v3.2.8
github.com/opencord/voltha-lib-go/v3/pkg/adapters
github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif
github.com/opencord/voltha-lib-go/v3/pkg/adapters/common