blob: 2df118738d7117bed171b9b079f9b23b795fb30d [file] [log] [blame]
/*
* Copyright 2018-2024 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core
import (
"context"
"errors"
"time"
"github.com/opencord/voltha-lib-go/v7/pkg/db"
"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
"github.com/opencord/voltha-lib-go/v7/pkg/log"
"github.com/opencord/voltha-lib-go/v7/pkg/probe"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
switch storeType {
case "etcd":
return kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)
case "redis":
return kvstore.NewRedisClient(address, timeout, false)
case "redis-sentinel":
return kvstore.NewRedisClient(address, timeout, true)
}
return nil, errors.New("unsupported-kv-store")
}
func stopKVClient(ctx context.Context, kvClient kvstore.Client) {
// Release all reservations
if err := kvClient.ReleaseAllReservations(ctx); err != nil {
logger.Infow(ctx, "fail-to-release-all-reservations", log.Fields{"error": err})
}
// Close the DB connection
kvClient.Close(ctx)
}
// waitUntilKVStoreReachableOrMaxTries will wait until it can connect to a KV store or until maxtries has been reached
func waitUntilKVStoreReachableOrMaxTries(ctx context.Context, kvClient kvstore.Client, maxRetries int, retryInterval time.Duration, serviceName string) error {
logger.Infow(ctx, "verifying-KV-store-connectivity", log.Fields{"retries": maxRetries, "retryInterval": retryInterval})
count := 0
for {
if !kvClient.IsConnectionUp(ctx) {
logger.Info(ctx, "KV-store-unreachable")
if maxRetries != -1 {
if count >= maxRetries {
return status.Error(codes.Unavailable, "kv store unreachable")
}
}
count++
// Take a nap before retrying
select {
case <-ctx.Done():
//ctx canceled
return ctx.Err()
case <-time.After(retryInterval):
}
logger.Infow(ctx, "retry-KV-store-connectivity", log.Fields{"retryCount": count, "maxRetries": maxRetries, "retryInterval": retryInterval})
} else {
break
}
}
probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusRunning)
logger.Info(ctx, "KV-store-reachable")
return nil
}
/*
* Thread to monitor kvstore Liveness (connection status)
*
* This function constantly monitors Liveness State of kvstore as reported
* periodically by backend and updates the Status of kv-store service registered
* with rw_core probe.
*
* If no liveness event has been seen within a timeout, then the thread will
* perform a "liveness" check attempt, which will in turn trigger a liveness event on
* the liveness channel, true or false depending on whether the attempt succeeded.
*
* The gRPC server in turn monitors the state of the readiness probe and will
* start issuing UNAVAILABLE response while the probe is not ready.
*/
func monitorKVStoreLiveness(ctx context.Context, backend *db.Backend, serviceName string, liveProbeInterval, notLiveProbeInterval time.Duration) {
logger.Info(ctx, "start-monitoring-kvstore-liveness")
// Instruct backend to create Liveness channel for transporting state updates
livenessChannel := backend.EnableLivenessChannel(ctx)
logger.Infow(ctx, "enabled-liveness-channel", log.Fields{"service-name": serviceName})
// Default state for kvstore is alive for rw_core
timeout := liveProbeInterval
loop:
for {
timeoutTimer := time.NewTimer(timeout)
select {
case liveness := <-livenessChannel:
logger.Debugw(ctx, "received-liveness-change-notification", log.Fields{"liveness": liveness, "service-name": serviceName})
if !liveness {
probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusNotReady)
logger.Infow(ctx, "service-not-ready", log.Fields{"service-name": serviceName})
timeout = notLiveProbeInterval
} else {
probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusRunning)
logger.Infow(ctx, "service-ready", log.Fields{"service-name": serviceName})
timeout = liveProbeInterval
}
if !timeoutTimer.Stop() {
<-timeoutTimer.C
}
case <-ctx.Done():
break loop
case <-timeoutTimer.C:
logger.Infow(ctx, "perform-liveness-check-on-timeout", log.Fields{"service-name": serviceName})
// Trigger Liveness check if no liveness update received within the timeout period.
// The Liveness check will push Live state to same channel which this routine is
// reading and processing. This, do it asynchronously to avoid blocking for
// backend response and avoid any possibility of deadlock
go backend.PerformLivenessCheck(ctx)
}
}
}