[VOL-1667] Fix instance Id and other minor fixes
This commit fixes the following:
1) Remove the instance ID as an input parameter and let the code
sets it to hostname
2) Removes logs settings in the rw_core that were overwriting the
input parameter setting
3) Removes unnecessary device loading at creation time (applies to
the core in a core-pair that is only monitoring the transaction).
The device ID is not know by that Core at that time.
4) Some minor cleanups
Change-Id: If781103bfb449dcae5421284456c4b0fe67704fd
diff --git a/compose/rw_core.yml b/compose/rw_core.yml
index 6948546..4600b1a 100644
--- a/compose/rw_core.yml
+++ b/compose/rw_core.yml
@@ -32,8 +32,9 @@
- -kv_store_data_prefix=service/voltha
- -in_competing_mode=false
- -timeout_long_request=3000
- - -timeout_request=300
- - -log_level=0
+ - -timeout_request=500
+ - -core_timeout=500
+ - -log_level=2
ports:
- 50057:50057
volumes:
diff --git a/rw_core/config/config.go b/rw_core/config/config.go
index 34b5867..9671f1a 100644
--- a/rw_core/config/config.go
+++ b/rw_core/config/config.go
@@ -19,14 +19,12 @@
"flag"
"fmt"
"github.com/opencord/voltha-go/common/log"
- "os"
)
// RW Core service default constants
const (
ConsulStoreName = "consul"
EtcdStoreName = "etcd"
- default_InstanceID = "rwcore001"
default_GrpcPort = 50057
default_GrpcHost = ""
default_KafkaAdapterHost = "127.0.0.1"
@@ -58,7 +56,6 @@
// RWCoreFlags represents the set of configurations used by the read-write core service
type RWCoreFlags struct {
// Command line parameters
- InstanceID string
RWCoreEndpoint string
GrpcHost string
GrpcPort int
@@ -94,7 +91,6 @@
// NewRWCoreFlags returns a new RWCore config
func NewRWCoreFlags() *RWCoreFlags {
var rwCoreFlag = RWCoreFlags{ // Default values
- InstanceID: default_InstanceID,
RWCoreEndpoint: default_RWCoreEndpoint,
GrpcHost: default_GrpcHost,
GrpcPort: default_GrpcPort,
@@ -130,9 +126,6 @@
var help string
- help = fmt.Sprintf("RW instance id")
- flag.StringVar(&(cf.InstanceID), "instance-id", default_InstanceID, help)
-
help = fmt.Sprintf("RW core endpoint address")
flag.StringVar(&(cf.RWCoreEndpoint), "vcore-endpoint", default_RWCoreEndpoint, help)
@@ -203,14 +196,4 @@
flag.StringVar(&(cf.CoreBindingKey), "core_binding_key", default_CoreBindingKey, help)
flag.Parse()
-
- containerName := getContainerInfo()
- if len(containerName) > 0 {
- cf.InstanceID = containerName
- }
-
-}
-
-func getContainerInfo() string {
- return os.Getenv("HOSTNAME")
}
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
index f03c7d2..938d8e9 100644
--- a/rw_core/core/core.go
+++ b/rw_core/core/core.go
@@ -89,7 +89,7 @@
if err := core.startKafkaMessagingProxy(ctx); err != nil {
log.Fatal("Failure-starting-kafkaMessagingProxy")
}
- log.Info("values", log.Fields{"kmp": core.kmp})
+ log.Debugw("values", log.Fields{"kmp": core.kmp})
core.adapterMgr = newAdapterManager(core.clusterDataProxy, core.instanceId)
core.deviceMgr = newDeviceManager(core)
core.logicalDeviceMgr = newLogicalDeviceManager(core, core.deviceMgr, core.kmp, core.clusterDataProxy, core.config.DefaultCoreTimeout)
@@ -120,14 +120,12 @@
log.Info("adaptercore-stopped")
}
-//startGRPCService creates the grpc service handlers, registers it to the grpc server
-// and starts the server
+//startGRPCService creates the grpc service handlers, registers it to the grpc server and starts the server
func (core *Core) startGRPCService(ctx context.Context) {
// create an insecure gserver server
core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false)
log.Info("grpc-server-created")
- //core.grpcNBIAPIHandler = NewAPIHandler(core.deviceMgr, core.logicalDeviceMgr, core.adapterMgr, core.config.InCompetingMode, core.config.LongRunningRequestTimeout, core.config.DefaultRequestTimeout)
core.grpcNBIAPIHandler = NewAPIHandler(core)
log.Infow("grpc-handler", log.Fields{"core_binding_key": core.config.CoreBindingKey})
core.logicalDeviceMgr.setGrpcNbiHandler(core.grpcNBIAPIHandler)
@@ -230,9 +228,6 @@
}
func (core *Core) startDeviceManager(ctx context.Context) {
- // TODO: Interaction between the logicaldevicemanager and devicemanager should mostly occur via
- // callbacks. For now, until the model is ready, devicemanager will keep a reference to the
- // logicaldevicemanager to initiate the creation of logical devices
log.Info("DeviceManager-Starting...")
core.deviceMgr.start(ctx, core.logicalDeviceMgr)
log.Info("DeviceManager-Started")
diff --git a/rw_core/core/device_ownership.go b/rw_core/core/device_ownership.go
index 49d860a..6efbdb8 100644
--- a/rw_core/core/device_ownership.go
+++ b/rw_core/core/device_ownership.go
@@ -47,10 +47,10 @@
deviceMgr *DeviceManager
logicalDeviceMgr *LogicalDeviceManager
deviceMap map[string]*ownership
- deviceMapLock *sync.RWMutex
+ deviceMapLock sync.RWMutex
deviceToKeyMap map[string]string
- deviceToKeyMapLock *sync.RWMutex
- ownershipLock *sync.RWMutex
+ deviceToKeyMapLock sync.RWMutex
+ ownershipLock sync.RWMutex
}
func NewDeviceOwnership(id string, kvClient kvstore.Client, deviceMgr *DeviceManager, logicalDeviceMgr *LogicalDeviceManager, ownershipPrefix string, reservationTimeout int64) *DeviceOwnership {
@@ -63,10 +63,10 @@
deviceOwnership.ownershipPrefix = ownershipPrefix
deviceOwnership.reservationTimeout = reservationTimeout
deviceOwnership.deviceMap = make(map[string]*ownership)
- deviceOwnership.deviceMapLock = &sync.RWMutex{}
+ deviceOwnership.deviceMapLock = sync.RWMutex{}
deviceOwnership.deviceToKeyMap = make(map[string]string)
- deviceOwnership.deviceToKeyMapLock = &sync.RWMutex{}
- deviceOwnership.ownershipLock = &sync.RWMutex{}
+ deviceOwnership.deviceToKeyMapLock = sync.RWMutex{}
+ deviceOwnership.ownershipLock = sync.RWMutex{}
return &deviceOwnership
}
diff --git a/rw_core/core/grpc_nbi_api_handler.go b/rw_core/core/grpc_nbi_api_handler.go
index 8f7b328..d5e0305 100755
--- a/rw_core/core/grpc_nbi_api_handler.go
+++ b/rw_core/core/grpc_nbi_api_handler.go
@@ -121,8 +121,9 @@
return handler.coreInCompetingMode
}
-// acquireRequestForList handles transaction processing for list requests, i.e. when there are no specific id requested.
-func (handler *APIHandler) acquireRequestForList(ctx context.Context, maxTimeout ...int64) (*KVTransaction, error) {
+// acquireRequest handles transaction processing for device creation and list requests, i.e. when there are no
+// specific id requested (list scenario) or id present in the request (creation use case).
+func (handler *APIHandler) acquireRequest(ctx context.Context, maxTimeout ...int64) (*KVTransaction, error) {
timeout := handler.defaultRequestTimeout
if len(maxTimeout) > 0 {
timeout = maxTimeout[0]
@@ -138,37 +139,6 @@
}
}
-// acquireRequest handles transaction processing for creation of new devices
-func (handler *APIHandler) acquireRequest(ctx context.Context, id interface{}, maxTimeout ...int64) (*KVTransaction, error) {
- timeout := handler.defaultRequestTimeout
- if len(maxTimeout) > 0 {
- timeout = maxTimeout[0]
- }
- log.Debugw("transaction-timeout", log.Fields{"timeout": timeout})
- txn, err := handler.createKvTransaction(ctx)
- if txn == nil {
- return nil, err
- } else if txn.Acquired(timeout) {
- return txn, nil
- } else {
- if id != nil {
- // The id can either be a device Id or a logical device id.
- if dId, ok := id.(*utils.DeviceID); ok {
- // Since this core has not processed this request, let's load the device, along with its extended
- // family (parents and children) in memory. This will keep this core in-sync with its paired core as
- // much as possible. The watch feature in the core model will ensure that the contents of those objects in
- // memory are in sync.
- time.Sleep(2 * time.Second)
- go handler.deviceMgr.load(dId.Id)
- } else if ldId, ok := id.(*utils.LogicalDeviceID); ok {
- // This will load the logical device along with its children and grandchildren
- go handler.logicalDeviceMgr.load(ldId.Id)
- }
- }
- return nil, errors.New("failed-to-seize-request")
- }
-}
-
// takeRequestOwnership creates a transaction in the dB for this request and handles the logic of transaction
// acquisition. If the device is owned by this Core (in a core-pair) then acquire the transaction with a
// timeout value (in the event of a timeout the other Core in the core-pair will proceed with the transaction). If the
@@ -404,7 +374,7 @@
func (handler *APIHandler) ListLogicalDevices(ctx context.Context, empty *empty.Empty) (*voltha.LogicalDevices, error) {
log.Debug("ListLogicalDevices-request")
if handler.competeForTransaction() {
- if txn, err := handler.acquireRequestForList(ctx); err != nil {
+ if txn, err := handler.acquireRequest(ctx); err != nil {
return &voltha.LogicalDevices{}, err
} else {
defer txn.Close()
@@ -468,7 +438,8 @@
}
if handler.competeForTransaction() {
- if txn, err := handler.acquireRequest(ctx, &utils.DeviceID{Id: device.Id}); err != nil {
+ // There are no device Id present in this function.
+ if txn, err := handler.acquireRequest(ctx); err != nil {
return &voltha.Device{}, err
} else {
defer txn.Close()
diff --git a/rw_core/main.go b/rw_core/main.go
index fc135f6..9ace2fb 100644
--- a/rw_core/main.go
+++ b/rw_core/main.go
@@ -26,6 +26,7 @@
"github.com/opencord/voltha-go/kafka"
"github.com/opencord/voltha-go/rw_core/config"
c "github.com/opencord/voltha-go/rw_core/core"
+ "github.com/opencord/voltha-go/rw_core/utils"
ic "github.com/opencord/voltha-protos/go/inter_container"
"os"
"os/signal"
@@ -117,7 +118,7 @@
}
}
-func (rw *rwCore) start(ctx context.Context) {
+func (rw *rwCore) start(ctx context.Context, instanceId string) {
log.Info("Starting RW Core components")
// Setup KV Client
@@ -126,7 +127,7 @@
if err == nil {
// Setup KV transaction context
txnPrefix := rw.config.KVStoreDataPrefix + "/transactions/"
- if err = c.SetTransactionContext(rw.config.InstanceID,
+ if err = c.SetTransactionContext(instanceId,
txnPrefix,
rw.kvClient,
rw.config.KVStoreTimeout,
@@ -137,12 +138,12 @@
}
// Setup Kafka Client
- if rw.kafkaClient, err = newKafkaClient("sarama", rw.config.KafkaAdapterHost, rw.config.KafkaAdapterPort, rw.config.InstanceID); err != nil {
+ if rw.kafkaClient, err = newKafkaClient("sarama", rw.config.KafkaAdapterHost, rw.config.KafkaAdapterPort, instanceId); err != nil {
log.Fatal("Unsupported-kafka-client")
}
// Create the core service
- rw.core = c.NewCore(rw.config.InstanceID, rw.config, rw.kvClient, rw.kafkaClient)
+ rw.core = c.NewCore(instanceId, rw.config, rw.kvClient, rw.kafkaClient)
// start the core
rw.core.Start(ctx)
@@ -222,29 +223,33 @@
cf := config.NewRWCoreFlags()
cf.ParseCommandArguments()
- // Setup logging
+ // Set the instance ID as the hostname
+ var instanceId string
+ hostName := utils.GetHostName()
+ if len(hostName) > 0 {
+ instanceId = hostName
+ } else {
+ log.Fatal("HOSTNAME not set")
+ }
//Setup default logger - applies for packages that do not have specific logger set
- if _, err := log.SetDefaultLogger(log.JSON, cf.LogLevel, log.Fields{"instanceId": cf.InstanceID}); err != nil {
+ if _, err := log.SetDefaultLogger(log.JSON, cf.LogLevel, log.Fields{"instanceId": instanceId}); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
}
- // Update all loggers (provisionned via init) with a common field
- if err := log.UpdateAllLoggers(log.Fields{"instanceId": cf.InstanceID}); err != nil {
+ // Update all loggers (provisioned via init) with a common field
+ if err := log.UpdateAllLoggers(log.Fields{"instanceId": instanceId}); err != nil {
log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
}
- //log.SetAllLogLevel(log.ErrorLevel)
+ // Update all loggers to log level specified as input parameter
+ log.SetAllLogLevel(cf.LogLevel)
- log.SetPackageLogLevel("github.com/opencord/voltha-go/rw_core/core", log.DebugLevel)
- log.SetPackageLogLevel("github.com/opencord/voltha-go/rw_core/flow_decomposition", log.DebugLevel)
- log.SetPackageLogLevel("github.com/opencord/voltha-go/rw_core/graph", log.DebugLevel)
- //log.SetPackageLogLevel("github.com/opencord/voltha-go/kafka", log.DebugLevel)
- //log.SetPackageLogLevel("github.com/opencord/voltha-go/db/model", log.DebugLevel)
+ //log.SetPackageLogLevel("github.com/opencord/voltha-go/rw_core/core", log.DebugLevel)
defer log.CleanUp()
- // Print verison / build information and exit
+ // Print version / build information and exit
if cf.DisplayVersionOnly {
printVersion()
return
@@ -261,7 +266,7 @@
defer cancel()
rw := newRWCore(cf)
- go rw.start(ctx)
+ go rw.start(ctx, instanceId)
code := waitForExit()
log.Infow("received-a-closing-signal", log.Fields{"code": code})
@@ -270,5 +275,5 @@
rw.stop()
elapsed := time.Since(start)
- log.Infow("rw-core-run-time", log.Fields{"core": rw.config.InstanceID, "time": elapsed / time.Second})
+ log.Infow("rw-core-run-time", log.Fields{"core": instanceId, "time": elapsed / time.Second})
}
diff --git a/rw_core/utils/core_utils.go b/rw_core/utils/core_utils.go
index 813c978..c9cd56d 100644
--- a/rw_core/utils/core_utils.go
+++ b/rw_core/utils/core_utils.go
@@ -18,6 +18,7 @@
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+ "os"
"reflect"
"time"
)
@@ -30,6 +31,10 @@
Id string
}
+func GetHostName() string {
+ return os.Getenv("HOSTNAME")
+}
+
//WaitForNilOrErrorResponses waits on a variadic number of channels for either a nil response or an error
//response. If an error is received from a given channel then the returned error array will contain that error.
//The error will be at the index corresponding to the order in which the channel appear in the parameter list.