blob: 5043d476c2fbd490f85384b7420f75ee6fb8005d [file] [log] [blame]
khenaidoob9203542018-09-17 22:56:37 -04001/*
2 * Copyright 2018-present Open Networking Foundation
3
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7
8 * http://www.apache.org/licenses/LICENSE-2.0
9
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
npujar1d86a522019-11-14 17:11:16 +053016
khenaidoob9203542018-09-17 22:56:37 -040017package core
18
19import (
20 "context"
Scott Baker2d87ee32020-03-03 13:04:01 -080021 "sync"
npujar1d86a522019-11-14 17:11:16 +053022 "time"
23
sbarbari17d7e222019-11-05 10:02:29 -050024 "github.com/opencord/voltha-go/db/model"
khenaidoob9203542018-09-17 22:56:37 -040025 "github.com/opencord/voltha-go/rw_core/config"
serkant.uluderya2ae470f2020-01-21 11:13:09 -080026 "github.com/opencord/voltha-lib-go/v3/pkg/db"
27 "github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
28 grpcserver "github.com/opencord/voltha-lib-go/v3/pkg/grpc"
29 "github.com/opencord/voltha-lib-go/v3/pkg/kafka"
30 "github.com/opencord/voltha-lib-go/v3/pkg/log"
31 "github.com/opencord/voltha-lib-go/v3/pkg/probe"
32 "github.com/opencord/voltha-protos/v3/go/voltha"
khenaidoob9203542018-09-17 22:56:37 -040033 "google.golang.org/grpc"
khenaidoob3244212019-08-27 14:32:27 -040034 "google.golang.org/grpc/codes"
35 "google.golang.org/grpc/status"
khenaidoob9203542018-09-17 22:56:37 -040036)
37
npujar1d86a522019-11-14 17:11:16 +053038// Core represent read,write core attributes
khenaidoob9203542018-09-17 22:56:37 -040039type Core struct {
npujar1d86a522019-11-14 17:11:16 +053040 instanceID string
khenaidoob9203542018-09-17 22:56:37 -040041 deviceMgr *DeviceManager
42 logicalDeviceMgr *LogicalDeviceManager
43 grpcServer *grpcserver.GrpcServer
Richard Jankowskidbab94a2018-12-06 16:20:25 -050044 grpcNBIAPIHandler *APIHandler
khenaidoo2c6a0992019-04-29 13:46:56 -040045 adapterMgr *AdapterManager
khenaidoob9203542018-09-17 22:56:37 -040046 config *config.RWCoreFlags
npujar467fe752020-01-16 20:17:45 +053047 kmp kafka.InterContainerProxy
khenaidoob9203542018-09-17 22:56:37 -040048 clusterDataProxy *model.Proxy
49 localDataProxy *model.Proxy
Scott Baker2d87ee32020-03-03 13:04:01 -080050 exitChannel chan struct{}
51 stopOnce sync.Once
Richard Jankowskie4d77662018-10-17 13:53:21 -040052 kvClient kvstore.Client
Girish Kumar4d3887d2019-11-22 14:22:05 +000053 backend db.Backend
khenaidoo43c82122018-11-22 18:38:28 -050054 kafkaClient kafka.Client
khenaidoob9203542018-09-17 22:56:37 -040055}
56
npujar1d86a522019-11-14 17:11:16 +053057// NewCore creates instance of rw core
Thomas Lee Se5a44012019-11-07 20:32:24 +053058func NewCore(ctx context.Context, id string, cf *config.RWCoreFlags, kvClient kvstore.Client, kafkaClient kafka.Client) *Core {
khenaidoob9203542018-09-17 22:56:37 -040059 var core Core
npujar1d86a522019-11-14 17:11:16 +053060 core.instanceID = id
Scott Baker2d87ee32020-03-03 13:04:01 -080061 core.exitChannel = make(chan struct{})
khenaidoob9203542018-09-17 22:56:37 -040062 core.config = cf
Richard Jankowskie4d77662018-10-17 13:53:21 -040063 core.kvClient = kvClient
khenaidoo43c82122018-11-22 18:38:28 -050064 core.kafkaClient = kafkaClient
Richard Jankowskie4d77662018-10-17 13:53:21 -040065
Girish Kumar4d3887d2019-11-22 14:22:05 +000066 // Configure backend to push Liveness Status at least every (cf.LiveProbeInterval / 2) seconds
67 // so as to avoid trigger of Liveness check (due to Liveness timeout) when backend is alive
68 livenessChannelInterval := cf.LiveProbeInterval / 2
69
Richard Jankowskie4d77662018-10-17 13:53:21 -040070 // Setup the KV store
Girish Kumar4d3887d2019-11-22 14:22:05 +000071 core.backend = db.Backend{
72 Client: kvClient,
73 StoreType: cf.KVStoreType,
74 Host: cf.KVStoreHost,
75 Port: cf.KVStorePort,
76 Timeout: cf.KVStoreTimeout,
77 LivenessChannelInterval: livenessChannelInterval,
78 PathPrefix: cf.KVStoreDataPrefix}
khenaidoob9203542018-09-17 22:56:37 -040079 return &core
80}
81
npujar1d86a522019-11-14 17:11:16 +053082// Start brings up core services
Thomas Lee Se5a44012019-11-07 20:32:24 +053083func (core *Core) Start(ctx context.Context) error {
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -070084
85 // If the context has a probe then fetch it and register our services
86 var p *probe.Probe
87 if value := ctx.Value(probe.ProbeContextKey); value != nil {
88 if _, ok := value.(*probe.Probe); ok {
89 p = value.(*probe.Probe)
90 p.RegisterService(
91 "message-bus",
92 "kv-store",
93 "device-manager",
94 "logical-device-manager",
95 "adapter-manager",
96 "grpc-service",
97 )
98 }
99 }
100
Girish Kumarf56a4682020-03-20 20:07:46 +0000101 logger.Info("starting-core-services", log.Fields{"coreId": core.instanceID})
khenaidoob3244212019-08-27 14:32:27 -0400102
103 // Wait until connection to KV Store is up
104 if err := core.waitUntilKVStoreReachableOrMaxTries(ctx, core.config.MaxConnectionRetries, core.config.ConnectionRetryInterval); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000105 logger.Fatal("Unable-to-connect-to-KV-store")
khenaidoob3244212019-08-27 14:32:27 -0400106 }
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -0700107 if p != nil {
108 p.UpdateStatus("kv-store", probe.ServiceStatusRunning)
109 }
Thomas Lee Se5a44012019-11-07 20:32:24 +0530110
Kent Hagerman4f355f52020-03-30 16:01:33 -0400111 core.clusterDataProxy = model.NewProxy(&core.backend, "/")
112 core.localDataProxy = model.NewProxy(&core.backend, "/")
khenaidoob3244212019-08-27 14:32:27 -0400113
Scott Bakeree6a0872019-10-29 15:59:52 -0700114 // core.kmp must be created before deviceMgr and adapterMgr, as they will make
115 // private copies of the poiner to core.kmp.
npujar467fe752020-01-16 20:17:45 +0530116 core.initKafkaManager(ctx)
khenaidoob3244212019-08-27 14:32:27 -0400117
Girish Kumarf56a4682020-03-20 20:07:46 +0000118 logger.Debugw("values", log.Fields{"kmp": core.kmp})
Richard Jankowski199fd862019-03-18 14:49:51 -0400119 core.deviceMgr = newDeviceManager(core)
Kent Hagerman16ce36a2019-12-17 13:40:53 -0500120 core.adapterMgr = newAdapterManager(core.clusterDataProxy, core.instanceID, core.kafkaClient, core.deviceMgr)
khenaidooba6b6c42019-08-02 09:11:56 -0400121 core.deviceMgr.adapterMgr = core.adapterMgr
khenaidoo2c6a0992019-04-29 13:46:56 -0400122 core.logicalDeviceMgr = newLogicalDeviceManager(core, core.deviceMgr, core.kmp, core.clusterDataProxy, core.config.DefaultCoreTimeout)
khenaidoo54e0ddf2019-02-27 16:21:33 -0500123
Scott Bakeree6a0872019-10-29 15:59:52 -0700124 // Start the KafkaManager. This must be done after the deviceMgr, adapterMgr, and
125 // logicalDeviceMgr have been created, as once the kmp is started, it will register
126 // the above with the kmp.
127
128 go core.startKafkaManager(ctx,
129 core.config.ConnectionRetryInterval,
130 core.config.LiveProbeInterval,
131 core.config.NotLiveProbeInterval)
khenaidoob3244212019-08-27 14:32:27 -0400132
khenaidoob9203542018-09-17 22:56:37 -0400133 go core.startDeviceManager(ctx)
134 go core.startLogicalDeviceManager(ctx)
135 go core.startGRPCService(ctx)
khenaidoo21d51152019-02-01 13:48:37 -0500136 go core.startAdapterManager(ctx)
Girish Kumar4d3887d2019-11-22 14:22:05 +0000137 go core.monitorKvstoreLiveness(ctx)
khenaidoob9203542018-09-17 22:56:37 -0400138
Girish Kumarf56a4682020-03-20 20:07:46 +0000139 logger.Info("core-services-started")
Thomas Lee Se5a44012019-11-07 20:32:24 +0530140 return nil
khenaidoob9203542018-09-17 22:56:37 -0400141}
142
npujar1d86a522019-11-14 17:11:16 +0530143// Stop brings down core services
khenaidoob9203542018-09-17 22:56:37 -0400144func (core *Core) Stop(ctx context.Context) {
Scott Baker2d87ee32020-03-03 13:04:01 -0800145 core.stopOnce.Do(func() {
Girish Kumarf56a4682020-03-20 20:07:46 +0000146 logger.Info("stopping-adaptercore")
Scott Baker2d87ee32020-03-03 13:04:01 -0800147 // Signal to the KVStoreMonitor that we are stopping.
148 close(core.exitChannel)
149 // Stop all the started services
150 if core.grpcServer != nil {
151 core.grpcServer.Stop()
152 }
153 if core.logicalDeviceMgr != nil {
154 core.logicalDeviceMgr.stop(ctx)
155 }
156 if core.deviceMgr != nil {
157 core.deviceMgr.stop(ctx)
158 }
159 if core.kmp != nil {
160 core.kmp.Stop()
161 }
Girish Kumarf56a4682020-03-20 20:07:46 +0000162 logger.Info("adaptercore-stopped")
Scott Baker2d87ee32020-03-03 13:04:01 -0800163 })
khenaidoob9203542018-09-17 22:56:37 -0400164}
165
khenaidoo631fe542019-05-31 15:44:43 -0400166//startGRPCService creates the grpc service handlers, registers it to the grpc server and starts the server
khenaidoob9203542018-09-17 22:56:37 -0400167func (core *Core) startGRPCService(ctx context.Context) {
168 // create an insecure gserver server
Scott Bakeree6a0872019-10-29 15:59:52 -0700169 core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false, probe.GetProbeFromContext(ctx))
Girish Kumarf56a4682020-03-20 20:07:46 +0000170 logger.Info("grpc-server-created")
khenaidoob9203542018-09-17 22:56:37 -0400171
khenaidoo54e0ddf2019-02-27 16:21:33 -0500172 core.grpcNBIAPIHandler = NewAPIHandler(core)
Girish Kumarf56a4682020-03-20 20:07:46 +0000173 logger.Infow("grpc-handler", log.Fields{"core_binding_key": core.config.CoreBindingKey})
Richard Jankowskidbab94a2018-12-06 16:20:25 -0500174 core.logicalDeviceMgr.setGrpcNbiHandler(core.grpcNBIAPIHandler)
khenaidoob9203542018-09-17 22:56:37 -0400175 // Create a function to register the core GRPC service with the GRPC server
176 f := func(gs *grpc.Server) {
177 voltha.RegisterVolthaServiceServer(
178 gs,
Richard Jankowskidbab94a2018-12-06 16:20:25 -0500179 core.grpcNBIAPIHandler,
khenaidoob9203542018-09-17 22:56:37 -0400180 )
181 }
182
183 core.grpcServer.AddService(f)
Girish Kumarf56a4682020-03-20 20:07:46 +0000184 logger.Info("grpc-service-added")
khenaidoob9203542018-09-17 22:56:37 -0400185
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -0700186 /*
187 * Start the GRPC server
188 *
189 * This is a bit sub-optimal here as the grpcServer.Start call does not return (blocks)
190 * until something fails, but we want to send a "start" status update. As written this
191 * means that we are actually sending the "start" status update before the server is
192 * started, which means it is possible that the status is "running" before it actually is.
193 *
194 * This means that there is a small window in which the core could return its status as
195 * ready, when it really isn't.
196 */
197 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusRunning)
Girish Kumarf56a4682020-03-20 20:07:46 +0000198 logger.Info("grpc-server-started")
npujar467fe752020-01-16 20:17:45 +0530199 core.grpcServer.Start(ctx)
David K. Bainbridgeb4a9ab02019-09-20 15:12:16 -0700200 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusStopped)
khenaidoob9203542018-09-17 22:56:37 -0400201}
202
Scott Bakeree6a0872019-10-29 15:59:52 -0700203// Initialize the kafka manager, but we will start it later
npujar467fe752020-01-16 20:17:45 +0530204func (core *Core) initKafkaManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000205 logger.Infow("initialize-kafka-manager", log.Fields{"host": core.config.KafkaAdapterHost,
khenaidoob9203542018-09-17 22:56:37 -0400206 "port": core.config.KafkaAdapterPort, "topic": core.config.CoreTopic})
Scott Bakeree6a0872019-10-29 15:59:52 -0700207
208 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusPreparing)
209
210 // create the proxy
npujar467fe752020-01-16 20:17:45 +0530211 core.kmp = kafka.NewInterContainerProxy(
khenaidoo43c82122018-11-22 18:38:28 -0500212 kafka.InterContainerHost(core.config.KafkaAdapterHost),
213 kafka.InterContainerPort(core.config.KafkaAdapterPort),
214 kafka.MsgClient(core.kafkaClient),
khenaidoo79232702018-12-04 11:00:41 -0500215 kafka.DefaultTopic(&kafka.Topic{Name: core.config.CoreTopic}),
npujar467fe752020-01-16 20:17:45 +0530216 kafka.DeviceDiscoveryTopic(&kafka.Topic{Name: core.config.AffinityRouterTopic}))
Scott Bakeree6a0872019-10-29 15:59:52 -0700217
218 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusPrepared)
Scott Bakeree6a0872019-10-29 15:59:52 -0700219}
220
221/*
222 * KafkaMonitorThread
223 *
npujar1d86a522019-11-14 17:11:16 +0530224 * Responsible for starting the Kafka Interadapter Proxy and monitoring its liveness
Scott Bakeree6a0872019-10-29 15:59:52 -0700225 * state.
226 *
227 * Any producer that fails to send will cause KafkaInterContainerProxy to
228 * post a false event on its liveness channel. Any producer that succeeds in sending
229 * will cause KafkaInterContainerProxy to post a true event on its liveness
npujar1d86a522019-11-14 17:11:16 +0530230 * channel. Group receivers also update liveness state, and a receiver will typically
Scott Bakeree6a0872019-10-29 15:59:52 -0700231 * indicate a loss of liveness within 3-5 seconds of Kafka going down. Receivers
232 * only indicate restoration of liveness if a message is received. During normal
233 * operation, messages will be routinely produced and received, automatically
234 * indicating liveness state. These routine liveness indications are rate-limited
235 * inside sarama_client.
236 *
237 * This thread monitors the status of KafkaInterContainerProxy's liveness and pushes
238 * that state to the core's readiness probes. If no liveness event has been seen
239 * within a timeout, then the thread will make an attempt to produce a "liveness"
240 * message, which will in turn trigger a liveness event on the liveness channel, true
241 * or false depending on whether the attempt succeeded.
242 *
243 * The gRPC server in turn monitors the state of the readiness probe and will
244 * start issuing UNAVAILABLE response while the probe is not ready.
245 *
246 * startupRetryInterval -- interval between attempts to start
247 * liveProbeInterval -- interval between liveness checks when in a live state
248 * notLiveProbeInterval -- interval between liveness checks when in a notLive state
249 *
250 * liveProbeInterval and notLiveProbeInterval can be configured separately,
251 * though the current default is that both are set to 60 seconds.
252 */
253
Girish Kumar4d3887d2019-11-22 14:22:05 +0000254func (core *Core) startKafkaManager(ctx context.Context, startupRetryInterval time.Duration, liveProbeInterval time.Duration, notLiveProbeInterval time.Duration) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000255 logger.Infow("starting-kafka-manager-thread", log.Fields{"host": core.config.KafkaAdapterHost,
Scott Bakeree6a0872019-10-29 15:59:52 -0700256 "port": core.config.KafkaAdapterPort, "topic": core.config.CoreTopic})
257
258 started := false
259 for !started {
260 // If we haven't started yet, then try to start
Girish Kumarf56a4682020-03-20 20:07:46 +0000261 logger.Infow("starting-kafka-proxy", log.Fields{})
Scott Bakeree6a0872019-10-29 15:59:52 -0700262 if err := core.kmp.Start(); err != nil {
263 // We failed to start. Delay and then try again later.
264 // Don't worry about liveness, as we can't be live until we've started.
265 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
Girish Kumarf56a4682020-03-20 20:07:46 +0000266 logger.Infow("error-starting-kafka-messaging-proxy", log.Fields{"error": err})
Girish Kumar4d3887d2019-11-22 14:22:05 +0000267 time.Sleep(startupRetryInterval)
khenaidoob3244212019-08-27 14:32:27 -0400268 } else {
Scott Bakeree6a0872019-10-29 15:59:52 -0700269 // We started. We only need to do this once.
270 // Next we'll fall through and start checking liveness.
Girish Kumarf56a4682020-03-20 20:07:46 +0000271 logger.Infow("started-kafka-proxy", log.Fields{})
Scott Bakeree6a0872019-10-29 15:59:52 -0700272
273 // cannot do this until after the kmp is started
npujar1d86a522019-11-14 17:11:16 +0530274 if err := core.registerAdapterRequestHandlers(ctx, core.instanceID, core.deviceMgr, core.logicalDeviceMgr, core.adapterMgr, core.clusterDataProxy, core.localDataProxy); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000275 logger.Fatal("Failure-registering-adapterRequestHandler")
Scott Bakeree6a0872019-10-29 15:59:52 -0700276 }
277
278 started = true
khenaidoob3244212019-08-27 14:32:27 -0400279 }
khenaidoob9203542018-09-17 22:56:37 -0400280 }
Scott Bakeree6a0872019-10-29 15:59:52 -0700281
Girish Kumarf56a4682020-03-20 20:07:46 +0000282 logger.Info("started-kafka-message-proxy")
Scott Bakeree6a0872019-10-29 15:59:52 -0700283
284 livenessChannel := core.kmp.EnableLivenessChannel(true)
285
Girish Kumarf56a4682020-03-20 20:07:46 +0000286 logger.Info("enabled-kafka-liveness-channel")
Scott Bakeree6a0872019-10-29 15:59:52 -0700287
Girish Kumar4d3887d2019-11-22 14:22:05 +0000288 timeout := liveProbeInterval
Scott Bakeree6a0872019-10-29 15:59:52 -0700289 for {
290 timeoutTimer := time.NewTimer(timeout)
291 select {
292 case liveness := <-livenessChannel:
Girish Kumarf56a4682020-03-20 20:07:46 +0000293 logger.Infow("kafka-manager-thread-liveness-event", log.Fields{"liveness": liveness})
Scott Bakeree6a0872019-10-29 15:59:52 -0700294 // there was a state change in Kafka liveness
295 if !liveness {
296 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
297
298 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000299 logger.Info("kafka-manager-thread-set-server-notready")
Scott Bakeree6a0872019-10-29 15:59:52 -0700300 }
301
302 // retry frequently while life is bad
Girish Kumar4d3887d2019-11-22 14:22:05 +0000303 timeout = notLiveProbeInterval
Scott Bakeree6a0872019-10-29 15:59:52 -0700304 } else {
305 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusRunning)
306
307 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000308 logger.Info("kafka-manager-thread-set-server-ready")
Scott Bakeree6a0872019-10-29 15:59:52 -0700309 }
310
311 // retry infrequently while life is good
Girish Kumar4d3887d2019-11-22 14:22:05 +0000312 timeout = liveProbeInterval
Scott Bakeree6a0872019-10-29 15:59:52 -0700313 }
314 if !timeoutTimer.Stop() {
315 <-timeoutTimer.C
316 }
317 case <-timeoutTimer.C:
Girish Kumarf56a4682020-03-20 20:07:46 +0000318 logger.Info("kafka-proxy-liveness-recheck")
Scott Bakeree6a0872019-10-29 15:59:52 -0700319 // send the liveness probe in a goroutine; we don't want to deadlock ourselves as
320 // the liveness probe may wait (and block) writing to our channel.
321 go func() {
322 err := core.kmp.SendLiveness()
323 if err != nil {
324 // Catch possible error case if sending liveness after Sarama has been stopped.
Girish Kumarf56a4682020-03-20 20:07:46 +0000325 logger.Warnw("error-kafka-send-liveness", log.Fields{"error": err})
Scott Bakeree6a0872019-10-29 15:59:52 -0700326 }
327 }()
328 }
329 }
khenaidoob9203542018-09-17 22:56:37 -0400330}
331
khenaidoob3244212019-08-27 14:32:27 -0400332// waitUntilKVStoreReachableOrMaxTries will wait until it can connect to a KV store or until maxtries has been reached
Girish Kumar4d3887d2019-11-22 14:22:05 +0000333func (core *Core) waitUntilKVStoreReachableOrMaxTries(ctx context.Context, maxRetries int, retryInterval time.Duration) error {
Girish Kumarf56a4682020-03-20 20:07:46 +0000334 logger.Infow("verifying-KV-store-connectivity", log.Fields{"host": core.config.KVStoreHost,
khenaidoob3244212019-08-27 14:32:27 -0400335 "port": core.config.KVStorePort, "retries": maxRetries, "retryInterval": retryInterval})
khenaidoob3244212019-08-27 14:32:27 -0400336 count := 0
337 for {
npujar467fe752020-01-16 20:17:45 +0530338 if !core.kvClient.IsConnectionUp(ctx) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000339 logger.Info("KV-store-unreachable")
khenaidoob3244212019-08-27 14:32:27 -0400340 if maxRetries != -1 {
341 if count >= maxRetries {
342 return status.Error(codes.Unavailable, "kv store unreachable")
343 }
344 }
npujar1d86a522019-11-14 17:11:16 +0530345 count++
khenaidoob3244212019-08-27 14:32:27 -0400346 // Take a nap before retrying
Girish Kumar4d3887d2019-11-22 14:22:05 +0000347 time.Sleep(retryInterval)
Girish Kumarf56a4682020-03-20 20:07:46 +0000348 logger.Infow("retry-KV-store-connectivity", log.Fields{"retryCount": count, "maxRetries": maxRetries, "retryInterval": retryInterval})
khenaidoob3244212019-08-27 14:32:27 -0400349
350 } else {
351 break
352 }
353 }
Girish Kumarf56a4682020-03-20 20:07:46 +0000354 logger.Info("KV-store-reachable")
khenaidoob3244212019-08-27 14:32:27 -0400355 return nil
356}
357
npujar1d86a522019-11-14 17:11:16 +0530358func (core *Core) registerAdapterRequestHandlers(ctx context.Context, coreInstanceID string, dMgr *DeviceManager,
khenaidoo297cd252019-02-07 22:10:23 -0500359 ldMgr *LogicalDeviceManager, aMgr *AdapterManager, cdProxy *model.Proxy, ldProxy *model.Proxy,
khenaidoo54e0ddf2019-02-27 16:21:33 -0500360) error {
npujar1d86a522019-11-14 17:11:16 +0530361 requestProxy := NewAdapterRequestHandlerProxy(core, coreInstanceID, dMgr, ldMgr, aMgr, cdProxy, ldProxy,
David Bainbridged1afd662020-03-26 18:27:41 -0700362 core.config.LongRunningRequestTimeout, core.config.DefaultRequestTimeout)
khenaidoob9203542018-09-17 22:56:37 -0400363
khenaidoo54e0ddf2019-02-27 16:21:33 -0500364 // Register the broadcast topic to handle any core-bound broadcast requests
365 if err := core.kmp.SubscribeWithRequestHandlerInterface(kafka.Topic{Name: core.config.CoreTopic}, requestProxy); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000366 logger.Fatalw("Failed-registering-broadcast-handler", log.Fields{"topic": core.config.CoreTopic})
khenaidoo54e0ddf2019-02-27 16:21:33 -0500367 return err
368 }
369
Kent Hagermana6d0c362019-07-30 12:50:21 -0400370 // Register the core-pair topic to handle core-bound requests destined to the core pair
371 if err := core.kmp.SubscribeWithDefaultRequestHandler(kafka.Topic{Name: core.config.CorePairTopic}, kafka.OffsetNewest); err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000372 logger.Fatalw("Failed-registering-pair-handler", log.Fields{"topic": core.config.CorePairTopic})
Kent Hagermana6d0c362019-07-30 12:50:21 -0400373 return err
374 }
375
Girish Kumarf56a4682020-03-20 20:07:46 +0000376 logger.Info("request-handler-registered")
khenaidoob9203542018-09-17 22:56:37 -0400377 return nil
378}
379
380func (core *Core) startDeviceManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000381 logger.Info("DeviceManager-Starting...")
khenaidoo4d4802d2018-10-04 21:59:49 -0400382 core.deviceMgr.start(ctx, core.logicalDeviceMgr)
Girish Kumarf56a4682020-03-20 20:07:46 +0000383 logger.Info("DeviceManager-Started")
khenaidoob9203542018-09-17 22:56:37 -0400384}
385
386func (core *Core) startLogicalDeviceManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000387 logger.Info("Logical-DeviceManager-Starting...")
khenaidoo4d4802d2018-10-04 21:59:49 -0400388 core.logicalDeviceMgr.start(ctx)
Girish Kumarf56a4682020-03-20 20:07:46 +0000389 logger.Info("Logical-DeviceManager-Started")
khenaidoob9203542018-09-17 22:56:37 -0400390}
khenaidoo21d51152019-02-01 13:48:37 -0500391
392func (core *Core) startAdapterManager(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000393 logger.Info("Adapter-Manager-Starting...")
Thomas Lee Se5a44012019-11-07 20:32:24 +0530394 err := core.adapterMgr.start(ctx)
395 if err != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000396 logger.Fatalf("failed-to-start-adapter-manager: error %v ", err)
Thomas Lee Se5a44012019-11-07 20:32:24 +0530397 }
Girish Kumarf56a4682020-03-20 20:07:46 +0000398 logger.Info("Adapter-Manager-Started")
William Kurkiandaa6bb22019-03-07 12:26:28 -0500399}
Girish Kumar4d3887d2019-11-22 14:22:05 +0000400
401/*
402* Thread to monitor kvstore Liveness (connection status)
403*
404* This function constantly monitors Liveness State of kvstore as reported
405* periodically by backend and updates the Status of kv-store service registered
406* with rw_core probe.
407*
408* If no liveness event has been seen within a timeout, then the thread will
409* perform a "liveness" check attempt, which will in turn trigger a liveness event on
410* the liveness channel, true or false depending on whether the attempt succeeded.
411*
412* The gRPC server in turn monitors the state of the readiness probe and will
413* start issuing UNAVAILABLE response while the probe is not ready.
414 */
415func (core *Core) monitorKvstoreLiveness(ctx context.Context) {
Girish Kumarf56a4682020-03-20 20:07:46 +0000416 logger.Info("start-monitoring-kvstore-liveness")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000417
418 // Instruct backend to create Liveness channel for transporting state updates
419 livenessChannel := core.backend.EnableLivenessChannel()
420
Girish Kumarf56a4682020-03-20 20:07:46 +0000421 logger.Debug("enabled-kvstore-liveness-channel")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000422
423 // Default state for kvstore is alive for rw_core
424 timeout := core.config.LiveProbeInterval
Scott Baker2d87ee32020-03-03 13:04:01 -0800425loop:
Girish Kumar4d3887d2019-11-22 14:22:05 +0000426 for {
427 timeoutTimer := time.NewTimer(timeout)
428 select {
429
430 case liveness := <-livenessChannel:
Girish Kumarf56a4682020-03-20 20:07:46 +0000431 logger.Debugw("received-liveness-change-notification", log.Fields{"liveness": liveness})
Girish Kumar4d3887d2019-11-22 14:22:05 +0000432
433 if !liveness {
434 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusNotReady)
435
436 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000437 logger.Info("kvstore-set-server-notready")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000438 }
439
440 timeout = core.config.NotLiveProbeInterval
441
442 } else {
443 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusRunning)
444
445 if core.grpcServer != nil {
Girish Kumarf56a4682020-03-20 20:07:46 +0000446 logger.Info("kvstore-set-server-ready")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000447 }
448
449 timeout = core.config.LiveProbeInterval
450 }
451
452 if !timeoutTimer.Stop() {
453 <-timeoutTimer.C
454 }
455
Scott Baker2d87ee32020-03-03 13:04:01 -0800456 case <-core.exitChannel:
457 break loop
458
Girish Kumar4d3887d2019-11-22 14:22:05 +0000459 case <-timeoutTimer.C:
Girish Kumarf56a4682020-03-20 20:07:46 +0000460 logger.Info("kvstore-perform-liveness-check-on-timeout")
Girish Kumar4d3887d2019-11-22 14:22:05 +0000461
462 // Trigger Liveness check if no liveness update received within the timeout period.
463 // The Liveness check will push Live state to same channel which this routine is
464 // reading and processing. This, do it asynchronously to avoid blocking for
465 // backend response and avoid any possibility of deadlock
npujar467fe752020-01-16 20:17:45 +0530466 go core.backend.PerformLivenessCheck(ctx)
Girish Kumar4d3887d2019-11-22 14:22:05 +0000467 }
468 }
469}