blob: 7cf9f9816967cbbe51d4a0b03c572c49fd5bc854 [file] [log] [blame]
khenaidoob9203542018-09-17 22:56:37 -04001/*
2 * Copyright 2018-present Open Networking Foundation
3
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7
8 * http://www.apache.org/licenses/LICENSE-2.0
9
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
npujar1d86a522019-11-14 17:11:16 +053016
khenaidoob9203542018-09-17 22:56:37 -040017package core
18
19import (
20 "context"
Scott Baker2d87ee32020-03-03 13:04:01 -080021 "sync"
npujar1d86a522019-11-14 17:11:16 +053022 "time"
23
sbarbari17d7e222019-11-05 10:02:29 -050024 "github.com/opencord/voltha-go/db/model"
khenaidoob9203542018-09-17 22:56:37 -040025 "github.com/opencord/voltha-go/rw_core/config"
Kent Hagerman2b216042020-04-03 18:28:56 -040026 "github.com/opencord/voltha-go/rw_core/core/adapter"
27 "github.com/opencord/voltha-go/rw_core/core/api"
28 "github.com/opencord/voltha-go/rw_core/core/device"
serkant.uluderya2ae470f2020-01-21 11:13:09 -080029 "github.com/opencord/voltha-lib-go/v3/pkg/db"
30 "github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
31 grpcserver "github.com/opencord/voltha-lib-go/v3/pkg/grpc"
32 "github.com/opencord/voltha-lib-go/v3/pkg/kafka"
33 "github.com/opencord/voltha-lib-go/v3/pkg/log"
34 "github.com/opencord/voltha-lib-go/v3/pkg/probe"
35 "github.com/opencord/voltha-protos/v3/go/voltha"
khenaidoob9203542018-09-17 22:56:37 -040036 "google.golang.org/grpc"
khenaidoob3244212019-08-27 14:32:27 -040037 "google.golang.org/grpc/codes"
38 "google.golang.org/grpc/status"
khenaidoob9203542018-09-17 22:56:37 -040039)
40
npujar1d86a522019-11-14 17:11:16 +053041// Core represent read,write core attributes
khenaidoob9203542018-09-17 22:56:37 -040042type Core struct {
npujar1d86a522019-11-14 17:11:16 +053043 instanceID string
Kent Hagerman2b216042020-04-03 18:28:56 -040044 deviceMgr *device.Manager
45 logicalDeviceMgr *device.LogicalManager
khenaidoob9203542018-09-17 22:56:37 -040046 grpcServer *grpcserver.GrpcServer
Kent Hagerman2b216042020-04-03 18:28:56 -040047 grpcNBIAPIHandler *api.NBIHandler
48 adapterMgr *adapter.Manager
khenaidoob9203542018-09-17 22:56:37 -040049 config *config.RWCoreFlags
npujar467fe752020-01-16 20:17:45 +053050 kmp kafka.InterContainerProxy
khenaidoob9203542018-09-17 22:56:37 -040051 clusterDataProxy *model.Proxy
52 localDataProxy *model.Proxy
Scott Baker2d87ee32020-03-03 13:04:01 -080053 exitChannel chan struct{}
54 stopOnce sync.Once
Richard Jankowskie4d77662018-10-17 13:53:21 -040055 kvClient kvstore.Client
Girish Kumar4d3887d2019-11-22 14:22:05 +000056 backend db.Backend
khenaidoo43c82122018-11-22 18:38:28 -050057 kafkaClient kafka.Client
khenaidoob9203542018-09-17 22:56:37 -040058}
59
npujar1d86a522019-11-14 17:11:16 +053060// NewCore creates instance of rw core
Thomas Lee Se5a44012019-11-07 20:32:24 +053061func NewCore(ctx context.Context, id string, cf *config.RWCoreFlags, kvClient kvstore.Client, kafkaClient kafka.Client) *Core {
khenaidoob9203542018-09-17 22:56:37 -040062 var core Core
npujar1d86a522019-11-14 17:11:16 +053063 core.instanceID = id
Scott Baker2d87ee32020-03-03 13:04:01 -080064 core.exitChannel = make(chan struct{})
khenaidoob9203542018-09-17 22:56:37 -040065 core.config = cf
Richard Jankowskie4d77662018-10-17 13:53:21 -040066 core.kvClient = kvClient
khenaidoo43c82122018-11-22 18:38:28 -050067 core.kafkaClient = kafkaClient
Richard Jankowskie4d77662018-10-17 13:53:21 -040068
Girish Kumar4d3887d2019-11-22 14:22:05 +000069 // Configure backend to push Liveness Status at least every (cf.LiveProbeInterval / 2) seconds
70 // so as to avoid trigger of Liveness check (due to Liveness timeout) when backend is alive
71 livenessChannelInterval := cf.LiveProbeInterval / 2
72
Richard Jankowskie4d77662018-10-17 13:53:21 -040073 // Setup the KV store
Girish Kumar4d3887d2019-11-22 14:22:05 +000074 core.backend = db.Backend{
75 Client: kvClient,
76 StoreType: cf.KVStoreType,
77 Host: cf.KVStoreHost,
78 Port: cf.KVStorePort,
79 Timeout: cf.KVStoreTimeout,
80 LivenessChannelInterval: livenessChannelInterval,
81 PathPrefix: cf.KVStoreDataPrefix}
khenaidoob9203542018-09-17 22:56:37 -040082 return &core
83}
84
npujar1d86a522019-11-14 17:11:16 +053085// Start brings up core services
Thomas Lee Se5a44012019-11-07 20:32:24 +053086func (core *Core) Start(ctx context.Context) error {
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -070087
88 // If the context has a probe then fetch it and register our services
89 var p *probe.Probe
90 if value := ctx.Value(probe.ProbeContextKey); value != nil {
91 if _, ok := value.(*probe.Probe); ok {
92 p = value.(*probe.Probe)
93 p.RegisterService(
94 "message-bus",
95 "kv-store",
96 "device-manager",
97 "logical-device-manager",
98 "adapter-manager",
99 "grpc-service",
100 )
101 }
102 }
103
Girish Kumarf56a4682020-03-20 20:07:46 +0000104 logger.Info("starting-core-services", log.Fields{"coreId": core.instanceID})
khenaidoob3244212019-08-27 14:32:27 -0400105
106 // Wait until connection to KV Store is up
107 if err := core.waitUntilKVStoreReachableOrMaxTries(ctx, core.config.MaxConnectionRetries, core.config.ConnectionRetryInterval); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000108 logger.Fatal("Unable-to-connect-to-KV-store")
khenaidoob3244212019-08-27 14:32:27 -0400109 }
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -0700110 if p != nil {
111 p.UpdateStatus("kv-store", probe.ServiceStatusRunning)
112 }
Thomas Lee Se5a44012019-11-07 20:32:24 +0530113
Kent Hagerman2b216042020-04-03 18:28:56 -0400114 endpointMgr := kafka.NewEndpointManager(&core.backend)
115
Kent Hagerman4f355f52020-03-30 16:01:33 -0400116 core.clusterDataProxy = model.NewProxy(&core.backend, "/")
117 core.localDataProxy = model.NewProxy(&core.backend, "/")
khenaidoob3244212019-08-27 14:32:27 -0400118
Scott Bakeree6a0872019-10-29 15:59:52 -0700119 // core.kmp must be created before deviceMgr and adapterMgr, as they will make
120 // private copies of the poiner to core.kmp.
npujar467fe752020-01-16 20:17:45 +0530121 core.initKafkaManager(ctx)
khenaidoob3244212019-08-27 14:32:27 -0400122
Girish Kumarf56a4682020-03-20 20:07:46 +0000123 logger.Debugw("values", log.Fields{"kmp": core.kmp})
Kent Hagerman2b216042020-04-03 18:28:56 -0400124 core.adapterMgr = adapter.NewAdapterManager(core.clusterDataProxy, core.instanceID, core.kafkaClient)
Kent Hagerman45a13e42020-04-13 12:23:50 -0400125 core.deviceMgr, core.logicalDeviceMgr = device.NewManagers(core.clusterDataProxy, core.adapterMgr, core.kmp, endpointMgr, core.config.CorePairTopic, core.instanceID, core.config.DefaultCoreTimeout)
khenaidoo54e0ddf2019-02-27 16:21:33 -0500126
Scott Bakeree6a0872019-10-29 15:59:52 -0700127 // Start the KafkaManager. This must be done after the deviceMgr, adapterMgr, and
128 // logicalDeviceMgr have been created, as once the kmp is started, it will register
129 // the above with the kmp.
130
131 go core.startKafkaManager(ctx,
132 core.config.ConnectionRetryInterval,
133 core.config.LiveProbeInterval,
134 core.config.NotLiveProbeInterval)
khenaidoob3244212019-08-27 14:32:27 -0400135
khenaidoob9203542018-09-17 22:56:37 -0400136 go core.startDeviceManager(ctx)
137 go core.startLogicalDeviceManager(ctx)
138 go core.startGRPCService(ctx)
khenaidoo21d51152019-02-01 13:48:37 -0500139 go core.startAdapterManager(ctx)
Girish Kumar4d3887d2019-11-22 14:22:05 +0000140 go core.monitorKvstoreLiveness(ctx)
khenaidoob9203542018-09-17 22:56:37 -0400141
Girish Kumarf56a4682020-03-20 20:07:46 +0000142 logger.Info("core-services-started")
Thomas Lee Se5a44012019-11-07 20:32:24 +0530143 return nil
khenaidoob9203542018-09-17 22:56:37 -0400144}
145
npujar1d86a522019-11-14 17:11:16 +0530146// Stop brings down core services
khenaidoob9203542018-09-17 22:56:37 -0400147func (core *Core) Stop(ctx context.Context) {
Scott Baker2d87ee32020-03-03 13:04:01 -0800148 core.stopOnce.Do(func() {
Girish Kumarf56a4682020-03-20 20:07:46 +0000149 logger.Info("stopping-adaptercore")
Scott Baker2d87ee32020-03-03 13:04:01 -0800150 // Signal to the KVStoreMonitor that we are stopping.
151 close(core.exitChannel)
152 // Stop all the started services
153 if core.grpcServer != nil {
154 core.grpcServer.Stop()
155 }
156 if core.logicalDeviceMgr != nil {
Kent Hagerman2b216042020-04-03 18:28:56 -0400157 core.logicalDeviceMgr.Stop(ctx)
Scott Baker2d87ee32020-03-03 13:04:01 -0800158 }
159 if core.deviceMgr != nil {
Kent Hagerman2b216042020-04-03 18:28:56 -0400160 core.deviceMgr.Stop(ctx)
Scott Baker2d87ee32020-03-03 13:04:01 -0800161 }
162 if core.kmp != nil {
163 core.kmp.Stop()
164 }
Girish Kumarf56a4682020-03-20 20:07:46 +0000165 logger.Info("adaptercore-stopped")
Scott Baker2d87ee32020-03-03 13:04:01 -0800166 })
khenaidoob9203542018-09-17 22:56:37 -0400167}
168
khenaidoo631fe542019-05-31 15:44:43 -0400169//startGRPCService creates the grpc service handlers, registers it to the grpc server and starts the server
khenaidoob9203542018-09-17 22:56:37 -0400170func (core *Core) startGRPCService(ctx context.Context) {
171 // create an insecure gserver server
Scott Bakeree6a0872019-10-29 15:59:52 -0700172 core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false, probe.GetProbeFromContext(ctx))
Girish Kumarf56a4682020-03-20 20:07:46 +0000173 logger.Info("grpc-server-created")
khenaidoob9203542018-09-17 22:56:37 -0400174
Kent Hagerman45a13e42020-04-13 12:23:50 -0400175 core.grpcNBIAPIHandler = api.NewNBIHandler(core.deviceMgr, core.logicalDeviceMgr, core.adapterMgr)
Girish Kumarf56a4682020-03-20 20:07:46 +0000176 logger.Infow("grpc-handler", log.Fields{"core_binding_key": core.config.CoreBindingKey})
khenaidoob9203542018-09-17 22:56:37 -0400177 // Create a function to register the core GRPC service with the GRPC server
178 f := func(gs *grpc.Server) {
179 voltha.RegisterVolthaServiceServer(
180 gs,
Richard Jankowskidbab94a2018-12-06 16:20:25 -0500181 core.grpcNBIAPIHandler,
khenaidoob9203542018-09-17 22:56:37 -0400182 )
183 }
184
185 core.grpcServer.AddService(f)
Girish Kumarf56a4682020-03-20 20:07:46 +0000186 logger.Info("grpc-service-added")
khenaidoob9203542018-09-17 22:56:37 -0400187
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -0700188 /*
189 * Start the GRPC server
190 *
191 * This is a bit sub-optimal here as the grpcServer.Start call does not return (blocks)
192 * until something fails, but we want to send a "start" status update. As written this
193 * means that we are actually sending the "start" status update before the server is
194 * started, which means it is possible that the status is "running" before it actually is.
195 *
196 * This means that there is a small window in which the core could return its status as
197 * ready, when it really isn't.
198 */
199 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusRunning)
Girish Kumarf56a4682020-03-20 20:07:46 +0000200 logger.Info("grpc-server-started")
npujar467fe752020-01-16 20:17:45 +0530201 core.grpcServer.Start(ctx)
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -0700202 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusStopped)
khenaidoob9203542018-09-17 22:56:37 -0400203}
204
Scott Bakeree6a0872019-10-29 15:59:52 -0700205// Initialize the kafka manager, but we will start it later
npujar467fe752020-01-16 20:17:45 +0530206func (core *Core) initKafkaManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000207 logger.Infow("initialize-kafka-manager", log.Fields{"host": core.config.KafkaAdapterHost,
khenaidoob9203542018-09-17 22:56:37 -0400208 "port": core.config.KafkaAdapterPort, "topic": core.config.CoreTopic})
Scott Bakeree6a0872019-10-29 15:59:52 -0700209
210 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusPreparing)
211
212 // create the proxy
npujar467fe752020-01-16 20:17:45 +0530213 core.kmp = kafka.NewInterContainerProxy(
khenaidoo43c82122018-11-22 18:38:28 -0500214 kafka.InterContainerHost(core.config.KafkaAdapterHost),
215 kafka.InterContainerPort(core.config.KafkaAdapterPort),
216 kafka.MsgClient(core.kafkaClient),
khenaidoo79232702018-12-04 11:00:41 -0500217 kafka.DefaultTopic(&kafka.Topic{Name: core.config.CoreTopic}),
npujar467fe752020-01-16 20:17:45 +0530218 kafka.DeviceDiscoveryTopic(&kafka.Topic{Name: core.config.AffinityRouterTopic}))
Scott Bakeree6a0872019-10-29 15:59:52 -0700219
220 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusPrepared)
Scott Bakeree6a0872019-10-29 15:59:52 -0700221}
222
223/*
224 * KafkaMonitorThread
225 *
npujar1d86a522019-11-14 17:11:16 +0530226 * Responsible for starting the Kafka Interadapter Proxy and monitoring its liveness
Scott Bakeree6a0872019-10-29 15:59:52 -0700227 * state.
228 *
229 * Any producer that fails to send will cause KafkaInterContainerProxy to
230 * post a false event on its liveness channel. Any producer that succeeds in sending
231 * will cause KafkaInterContainerProxy to post a true event on its liveness
npujar1d86a522019-11-14 17:11:16 +0530232 * channel. Group receivers also update liveness state, and a receiver will typically
Scott Bakeree6a0872019-10-29 15:59:52 -0700233 * indicate a loss of liveness within 3-5 seconds of Kafka going down. Receivers
234 * only indicate restoration of liveness if a message is received. During normal
235 * operation, messages will be routinely produced and received, automatically
236 * indicating liveness state. These routine liveness indications are rate-limited
237 * inside sarama_client.
238 *
239 * This thread monitors the status of KafkaInterContainerProxy's liveness and pushes
240 * that state to the core's readiness probes. If no liveness event has been seen
241 * within a timeout, then the thread will make an attempt to produce a "liveness"
242 * message, which will in turn trigger a liveness event on the liveness channel, true
243 * or false depending on whether the attempt succeeded.
244 *
245 * The gRPC server in turn monitors the state of the readiness probe and will
246 * start issuing UNAVAILABLE response while the probe is not ready.
247 *
248 * startupRetryInterval -- interval between attempts to start
249 * liveProbeInterval -- interval between liveness checks when in a live state
250 * notLiveProbeInterval -- interval between liveness checks when in a notLive state
251 *
252 * liveProbeInterval and notLiveProbeInterval can be configured separately,
253 * though the current default is that both are set to 60 seconds.
254 */
255
Girish Kumar4d3887d2019-11-22 14:22:05 +0000256func (core *Core) startKafkaManager(ctx context.Context, startupRetryInterval time.Duration, liveProbeInterval time.Duration, notLiveProbeInterval time.Duration) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000257 logger.Infow("starting-kafka-manager-thread", log.Fields{"host": core.config.KafkaAdapterHost,
Scott Bakeree6a0872019-10-29 15:59:52 -0700258 "port": core.config.KafkaAdapterPort, "topic": core.config.CoreTopic})
259
260 started := false
261 for !started {
262 // If we haven't started yet, then try to start
Girish Kumarf56a4682020-03-20 20:07:46 +0000263 logger.Infow("starting-kafka-proxy", log.Fields{})
Scott Bakeree6a0872019-10-29 15:59:52 -0700264 if err := core.kmp.Start(); err != nil {
265 // We failed to start. Delay and then try again later.
266 // Don't worry about liveness, as we can't be live until we've started.
267 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
Girish Kumarf56a4682020-03-20 20:07:46 +0000268 logger.Infow("error-starting-kafka-messaging-proxy", log.Fields{"error": err})
Girish Kumar4d3887d2019-11-22 14:22:05 +0000269 time.Sleep(startupRetryInterval)
khenaidoob3244212019-08-27 14:32:27 -0400270 } else {
Scott Bakeree6a0872019-10-29 15:59:52 -0700271 // We started. We only need to do this once.
272 // Next we'll fall through and start checking liveness.
Girish Kumarf56a4682020-03-20 20:07:46 +0000273 logger.Infow("started-kafka-proxy", log.Fields{})
Scott Bakeree6a0872019-10-29 15:59:52 -0700274
275 // cannot do this until after the kmp is started
npujar1d86a522019-11-14 17:11:16 +0530276 if err := core.registerAdapterRequestHandlers(ctx, core.instanceID, core.deviceMgr, core.logicalDeviceMgr, core.adapterMgr, core.clusterDataProxy, core.localDataProxy); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000277 logger.Fatal("Failure-registering-adapterRequestHandler")
Scott Bakeree6a0872019-10-29 15:59:52 -0700278 }
279
280 started = true
khenaidoob3244212019-08-27 14:32:27 -0400281 }
khenaidoob9203542018-09-17 22:56:37 -0400282 }
Scott Bakeree6a0872019-10-29 15:59:52 -0700283
Girish Kumarf56a4682020-03-20 20:07:46 +0000284 logger.Info("started-kafka-message-proxy")
Scott Bakeree6a0872019-10-29 15:59:52 -0700285
286 livenessChannel := core.kmp.EnableLivenessChannel(true)
287
Girish Kumarf56a4682020-03-20 20:07:46 +0000288 logger.Info("enabled-kafka-liveness-channel")
Scott Bakeree6a0872019-10-29 15:59:52 -0700289
Girish Kumar4d3887d2019-11-22 14:22:05 +0000290 timeout := liveProbeInterval
Scott Bakeree6a0872019-10-29 15:59:52 -0700291 for {
292 timeoutTimer := time.NewTimer(timeout)
293 select {
294 case liveness := <-livenessChannel:
Girish Kumarf56a4682020-03-20 20:07:46 +0000295 logger.Infow("kafka-manager-thread-liveness-event", log.Fields{"liveness": liveness})
Scott Bakeree6a0872019-10-29 15:59:52 -0700296 // there was a state change in Kafka liveness
297 if !liveness {
298 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
299
300 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000301 logger.Info("kafka-manager-thread-set-server-notready")
Scott Bakeree6a0872019-10-29 15:59:52 -0700302 }
303
304 // retry frequently while life is bad
Girish Kumar4d3887d2019-11-22 14:22:05 +0000305 timeout = notLiveProbeInterval
Scott Bakeree6a0872019-10-29 15:59:52 -0700306 } else {
307 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusRunning)
308
309 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000310 logger.Info("kafka-manager-thread-set-server-ready")
Scott Bakeree6a0872019-10-29 15:59:52 -0700311 }
312
313 // retry infrequently while life is good
Girish Kumar4d3887d2019-11-22 14:22:05 +0000314 timeout = liveProbeInterval
Scott Bakeree6a0872019-10-29 15:59:52 -0700315 }
316 if !timeoutTimer.Stop() {
317 <-timeoutTimer.C
318 }
319 case <-timeoutTimer.C:
Girish Kumarf56a4682020-03-20 20:07:46 +0000320 logger.Info("kafka-proxy-liveness-recheck")
Scott Bakeree6a0872019-10-29 15:59:52 -0700321 // send the liveness probe in a goroutine; we don't want to deadlock ourselves as
322 // the liveness probe may wait (and block) writing to our channel.
323 go func() {
324 err := core.kmp.SendLiveness()
325 if err != nil {
326 // Catch possible error case if sending liveness after Sarama has been stopped.
Girish Kumarf56a4682020-03-20 20:07:46 +0000327 logger.Warnw("error-kafka-send-liveness", log.Fields{"error": err})
Scott Bakeree6a0872019-10-29 15:59:52 -0700328 }
329 }()
330 }
331 }
khenaidoob9203542018-09-17 22:56:37 -0400332}
333
khenaidoob3244212019-08-27 14:32:27 -0400334// waitUntilKVStoreReachableOrMaxTries will wait until it can connect to a KV store or until maxtries has been reached
Girish Kumar4d3887d2019-11-22 14:22:05 +0000335func (core *Core) waitUntilKVStoreReachableOrMaxTries(ctx context.Context, maxRetries int, retryInterval time.Duration) error {
Girish Kumarf56a4682020-03-20 20:07:46 +0000336 logger.Infow("verifying-KV-store-connectivity", log.Fields{"host": core.config.KVStoreHost,
khenaidoob3244212019-08-27 14:32:27 -0400337 "port": core.config.KVStorePort, "retries": maxRetries, "retryInterval": retryInterval})
khenaidoob3244212019-08-27 14:32:27 -0400338 count := 0
339 for {
npujar467fe752020-01-16 20:17:45 +0530340 if !core.kvClient.IsConnectionUp(ctx) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000341 logger.Info("KV-store-unreachable")
khenaidoob3244212019-08-27 14:32:27 -0400342 if maxRetries != -1 {
343 if count >= maxRetries {
344 return status.Error(codes.Unavailable, "kv store unreachable")
345 }
346 }
npujar1d86a522019-11-14 17:11:16 +0530347 count++
khenaidoob3244212019-08-27 14:32:27 -0400348 // Take a nap before retrying
Girish Kumar4d3887d2019-11-22 14:22:05 +0000349 time.Sleep(retryInterval)
Girish Kumarf56a4682020-03-20 20:07:46 +0000350 logger.Infow("retry-KV-store-connectivity", log.Fields{"retryCount": count, "maxRetries": maxRetries, "retryInterval": retryInterval})
khenaidoob3244212019-08-27 14:32:27 -0400351
352 } else {
353 break
354 }
355 }
Girish Kumarf56a4682020-03-20 20:07:46 +0000356 logger.Info("KV-store-reachable")
khenaidoob3244212019-08-27 14:32:27 -0400357 return nil
358}
359
Kent Hagerman2b216042020-04-03 18:28:56 -0400360func (core *Core) registerAdapterRequestHandlers(ctx context.Context, coreInstanceID string, dMgr *device.Manager,
361 ldMgr *device.LogicalManager, aMgr *adapter.Manager, cdProxy *model.Proxy, ldProxy *model.Proxy,
khenaidoo54e0ddf2019-02-27 16:21:33 -0500362) error {
Kent Hagerman2b216042020-04-03 18:28:56 -0400363 requestProxy := api.NewAdapterRequestHandlerProxy(coreInstanceID, dMgr, aMgr, cdProxy, ldProxy,
David Bainbridged1afd662020-03-26 18:27:41 -0700364 core.config.LongRunningRequestTimeout, core.config.DefaultRequestTimeout)
khenaidoob9203542018-09-17 22:56:37 -0400365
khenaidoo54e0ddf2019-02-27 16:21:33 -0500366 // Register the broadcast topic to handle any core-bound broadcast requests
367 if err := core.kmp.SubscribeWithRequestHandlerInterface(kafka.Topic{Name: core.config.CoreTopic}, requestProxy); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000368 logger.Fatalw("Failed-registering-broadcast-handler", log.Fields{"topic": core.config.CoreTopic})
khenaidoo54e0ddf2019-02-27 16:21:33 -0500369 return err
370 }
371
Kent Hagermana6d0c362019-07-30 12:50:21 -0400372 // Register the core-pair topic to handle core-bound requests destined to the core pair
373 if err := core.kmp.SubscribeWithDefaultRequestHandler(kafka.Topic{Name: core.config.CorePairTopic}, kafka.OffsetNewest); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000374 logger.Fatalw("Failed-registering-pair-handler", log.Fields{"topic": core.config.CorePairTopic})
Kent Hagermana6d0c362019-07-30 12:50:21 -0400375 return err
376 }
377
Girish Kumarf56a4682020-03-20 20:07:46 +0000378 logger.Info("request-handler-registered")
khenaidoob9203542018-09-17 22:56:37 -0400379 return nil
380}
381
382func (core *Core) startDeviceManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000383 logger.Info("DeviceManager-Starting...")
Kent Hagerman2b216042020-04-03 18:28:56 -0400384 core.deviceMgr.Start(ctx)
Girish Kumarf56a4682020-03-20 20:07:46 +0000385 logger.Info("DeviceManager-Started")
khenaidoob9203542018-09-17 22:56:37 -0400386}
387
388func (core *Core) startLogicalDeviceManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000389 logger.Info("Logical-DeviceManager-Starting...")
Kent Hagerman2b216042020-04-03 18:28:56 -0400390 core.logicalDeviceMgr.Start(ctx)
Girish Kumarf56a4682020-03-20 20:07:46 +0000391 logger.Info("Logical-DeviceManager-Started")
khenaidoob9203542018-09-17 22:56:37 -0400392}
khenaidoo21d51152019-02-01 13:48:37 -0500393
394func (core *Core) startAdapterManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000395 logger.Info("Adapter-Manager-Starting...")
Kent Hagerman2b216042020-04-03 18:28:56 -0400396 err := core.adapterMgr.Start(ctx)
Thomas Lee Se5a44012019-11-07 20:32:24 +0530397 if err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000398 logger.Fatalf("failed-to-start-adapter-manager: error %v ", err)
Thomas Lee Se5a44012019-11-07 20:32:24 +0530399 }
Girish Kumarf56a4682020-03-20 20:07:46 +0000400 logger.Info("Adapter-Manager-Started")
William Kurkiandaa6bb22019-03-07 12:26:28 -0500401}
Girish Kumar4d3887d2019-11-22 14:22:05 +0000402
403/*
404* Thread to monitor kvstore Liveness (connection status)
405*
406* This function constantly monitors Liveness State of kvstore as reported
407* periodically by backend and updates the Status of kv-store service registered
408* with rw_core probe.
409*
410* If no liveness event has been seen within a timeout, then the thread will
411* perform a "liveness" check attempt, which will in turn trigger a liveness event on
412* the liveness channel, true or false depending on whether the attempt succeeded.
413*
414* The gRPC server in turn monitors the state of the readiness probe and will
415* start issuing UNAVAILABLE response while the probe is not ready.
416 */
417func (core *Core) monitorKvstoreLiveness(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000418 logger.Info("start-monitoring-kvstore-liveness")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000419
420 // Instruct backend to create Liveness channel for transporting state updates
421 livenessChannel := core.backend.EnableLivenessChannel()
422
Girish Kumarf56a4682020-03-20 20:07:46 +0000423 logger.Debug("enabled-kvstore-liveness-channel")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000424
425 // Default state for kvstore is alive for rw_core
426 timeout := core.config.LiveProbeInterval
Scott Baker2d87ee32020-03-03 13:04:01 -0800427loop:
Girish Kumar4d3887d2019-11-22 14:22:05 +0000428 for {
429 timeoutTimer := time.NewTimer(timeout)
430 select {
431
432 case liveness := <-livenessChannel:
Girish Kumarf56a4682020-03-20 20:07:46 +0000433 logger.Debugw("received-liveness-change-notification", log.Fields{"liveness": liveness})
Girish Kumar4d3887d2019-11-22 14:22:05 +0000434
435 if !liveness {
436 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusNotReady)
437
438 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000439 logger.Info("kvstore-set-server-notready")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000440 }
441
442 timeout = core.config.NotLiveProbeInterval
443
444 } else {
445 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusRunning)
446
447 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000448 logger.Info("kvstore-set-server-ready")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000449 }
450
451 timeout = core.config.LiveProbeInterval
452 }
453
454 if !timeoutTimer.Stop() {
455 <-timeoutTimer.C
456 }
457
Scott Baker2d87ee32020-03-03 13:04:01 -0800458 case <-core.exitChannel:
459 break loop
460
Girish Kumar4d3887d2019-11-22 14:22:05 +0000461 case <-timeoutTimer.C:
Girish Kumarf56a4682020-03-20 20:07:46 +0000462 logger.Info("kvstore-perform-liveness-check-on-timeout")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000463
464 // Trigger Liveness check if no liveness update received within the timeout period.
465 // The Liveness check will push Live state to same channel which this routine is
466 // reading and processing. This, do it asynchronously to avoid blocking for
467 // backend response and avoid any possibility of deadlock
npujar467fe752020-01-16 20:17:45 +0530468 go core.backend.PerformLivenessCheck(ctx)
Girish Kumar4d3887d2019-11-22 14:22:05 +0000469 }
470 }
471}