blob: fcdf3402612bb2a092dabf030cd45fc2e578a16d [file] [log] [blame]
Kent Hagerman2f0d0552020-04-23 17:28:52 -04001/*
2 * Copyright 2018-present Open Networking Foundation
3
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7
8 * http://www.apache.org/licenses/LICENSE-2.0
9
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package core
18
19import (
20 "context"
21 "time"
22
23 "github.com/opencord/voltha-go/rw_core/core/adapter"
24 "github.com/opencord/voltha-go/rw_core/core/api"
25 "github.com/opencord/voltha-go/rw_core/core/device"
26 "github.com/opencord/voltha-lib-go/v3/pkg/kafka"
27 "github.com/opencord/voltha-lib-go/v3/pkg/log"
28 "github.com/opencord/voltha-lib-go/v3/pkg/probe"
29)
30
31// startKafkInterContainerProxy is responsible for starting the Kafka Interadapter Proxy
32func startKafkInterContainerProxy(ctx context.Context, kafkaClient kafka.Client, host string, port int, coreTopic, affinityRouterTopic string, connectionRetryInterval time.Duration) (kafka.InterContainerProxy, error) {
33 logger.Infow("initialize-kafka-manager", log.Fields{"host": host, "port": port, "topic": coreTopic})
34
35 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusPreparing)
36
37 // create the kafka RPC proxy
38 kmp := kafka.NewInterContainerProxy(
39 kafka.InterContainerHost(host),
40 kafka.InterContainerPort(port),
41 kafka.MsgClient(kafkaClient),
42 kafka.DefaultTopic(&kafka.Topic{Name: coreTopic}),
43 kafka.DeviceDiscoveryTopic(&kafka.Topic{Name: affinityRouterTopic}))
44
45 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusPrepared)
46
47 // wait for connectivity
48 logger.Infow("starting-kafka-manager", log.Fields{"host": host,
49 "port": port, "topic": coreTopic})
50
51 for {
52 // If we haven't started yet, then try to start
53 logger.Infow("starting-kafka-proxy", log.Fields{})
54 if err := kmp.Start(); err != nil {
55 // We failed to start. Delay and then try again later.
56 // Don't worry about liveness, as we can't be live until we've started.
57 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
58 logger.Infow("error-starting-kafka-messaging-proxy", log.Fields{"error": err})
59 select {
60 case <-time.After(connectionRetryInterval):
61 case <-ctx.Done():
62 return nil, ctx.Err()
63 }
64 continue
65 }
66 // We started. We only need to do this once.
67 // Next we'll fall through and start checking liveness.
68 logger.Infow("started-kafka-proxy", log.Fields{})
69 break
70 }
71 return kmp, nil
72}
73
74/*
75 * monitorKafkaLiveness is responsible for monitoring the Kafka Interadapter Proxy connectivity state
76 *
77 * Any producer that fails to send will cause KafkaInterContainerProxy to
78 * post a false event on its liveness channel. Any producer that succeeds in sending
79 * will cause KafkaInterContainerProxy to post a true event on its liveness
80 * channel. Group receivers also update liveness state, and a receiver will typically
81 * indicate a loss of liveness within 3-5 seconds of Kafka going down. Receivers
82 * only indicate restoration of liveness if a message is received. During normal
83 * operation, messages will be routinely produced and received, automatically
84 * indicating liveness state. These routine liveness indications are rate-limited
85 * inside sarama_client.
86 *
87 * This thread monitors the status of KafkaInterContainerProxy's liveness and pushes
88 * that state to the core's readiness probes. If no liveness event has been seen
89 * within a timeout, then the thread will make an attempt to produce a "liveness"
90 * message, which will in turn trigger a liveness event on the liveness channel, true
91 * or false depending on whether the attempt succeeded.
92 *
93 * The gRPC server in turn monitors the state of the readiness probe and will
94 * start issuing UNAVAILABLE response while the probe is not ready.
95 *
96 * startupRetryInterval -- interval between attempts to start
97 * liveProbeInterval -- interval between liveness checks when in a live state
98 * notLiveProbeInterval -- interval between liveness checks when in a notLive state
99 *
100 * liveProbeInterval and notLiveProbeInterval can be configured separately,
101 * though the current default is that both are set to 60 seconds.
102 */
103func monitorKafkaLiveness(ctx context.Context, kmp kafka.InterContainerProxy, liveProbeInterval time.Duration, notLiveProbeInterval time.Duration) {
104 logger.Info("started-kafka-message-proxy")
105
106 livenessChannel := kmp.EnableLivenessChannel(true)
107
108 logger.Info("enabled-kafka-liveness-channel")
109
110 timeout := liveProbeInterval
111 for {
112 timeoutTimer := time.NewTimer(timeout)
113 select {
114 case liveness := <-livenessChannel:
115 logger.Infow("kafka-manager-thread-liveness-event", log.Fields{"liveness": liveness})
116 // there was a state change in Kafka liveness
117 if !liveness {
118 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
119 logger.Info("kafka-manager-thread-set-server-notready")
120
121 // retry frequently while life is bad
122 timeout = notLiveProbeInterval
123 } else {
124 probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusRunning)
125 logger.Info("kafka-manager-thread-set-server-ready")
126
127 // retry infrequently while life is good
128 timeout = liveProbeInterval
129 }
130 if !timeoutTimer.Stop() {
131 <-timeoutTimer.C
132 }
133 case <-timeoutTimer.C:
134 logger.Info("kafka-proxy-liveness-recheck")
135 // send the liveness probe in a goroutine; we don't want to deadlock ourselves as
136 // the liveness probe may wait (and block) writing to our channel.
137 go func() {
138 err := kmp.SendLiveness()
139 if err != nil {
140 // Catch possible error case if sending liveness after Sarama has been stopped.
141 logger.Warnw("error-kafka-send-liveness", log.Fields{"error": err})
142 }
143 }()
144 case <-ctx.Done():
145 return // just exit
146 }
147 }
148}
149
150func registerAdapterRequestHandlers(kmp kafka.InterContainerProxy, dMgr *device.Manager, aMgr *adapter.Manager, coreTopic, corePairTopic string) {
151 requestProxy := api.NewAdapterRequestHandlerProxy(dMgr, aMgr)
152
153 // Register the broadcast topic to handle any core-bound broadcast requests
154 if err := kmp.SubscribeWithRequestHandlerInterface(kafka.Topic{Name: coreTopic}, requestProxy); err != nil {
155 logger.Fatalw("Failed-registering-broadcast-handler", log.Fields{"topic": coreTopic})
156 }
157
158 // Register the core-pair topic to handle core-bound requests destined to the core pair
159 if err := kmp.SubscribeWithDefaultRequestHandler(kafka.Topic{Name: corePairTopic}, kafka.OffsetNewest); err != nil {
160 logger.Fatalw("Failed-registering-pair-handler", log.Fields{"topic": corePairTopic})
161 }
162
163 logger.Info("request-handler-registered")
164}