blob: 797c05a5b8789bf0c761097b3abeb908220a7736 [file] [log] [blame]
Stephane Barbariea75791c2019-01-24 10:58:06 -05001/*
2 * Copyright 2018-present Open Networking Foundation
3
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7
8 * http://www.apache.org/licenses/LICENSE-2.0
9
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16package core
17
18import (
19 "context"
sbarbari17d7e222019-11-05 10:02:29 -050020 "github.com/opencord/voltha-go/db/model"
Stephane Barbariea75791c2019-01-24 10:58:06 -050021 "github.com/opencord/voltha-go/ro_core/config"
sbarbari17d7e222019-11-05 10:02:29 -050022 "github.com/opencord/voltha-lib-go/v2/pkg/db"
Scott Baker807addd2019-10-24 15:16:21 -070023 "github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
Scott Baker807addd2019-10-24 15:16:21 -070024 grpcserver "github.com/opencord/voltha-lib-go/v2/pkg/grpc"
25 "github.com/opencord/voltha-lib-go/v2/pkg/log"
26 "github.com/opencord/voltha-lib-go/v2/pkg/probe"
Scott Baker555307d2019-11-04 08:58:01 -080027 "github.com/opencord/voltha-protos/v2/go/voltha"
Stephane Barbariea75791c2019-01-24 10:58:06 -050028 "google.golang.org/grpc"
Divya Desai660dbba2019-10-16 07:06:49 +000029 "google.golang.org/grpc/codes"
30 "google.golang.org/grpc/status"
Girish Kumar91482642019-11-08 11:38:03 +000031 "time"
Stephane Barbariea75791c2019-01-24 10:58:06 -050032)
33
34type Core struct {
35 instanceId string
36 genericMgr *ModelProxyManager
37 deviceMgr *DeviceManager
38 logicalDeviceMgr *LogicalDeviceManager
39 grpcServer *grpcserver.GrpcServer
40 grpcNBIAPIHandler *APIHandler
41 config *config.ROCoreFlags
42 clusterDataRoot model.Root
43 localDataRoot model.Root
44 clusterDataProxy *model.Proxy
45 localDataProxy *model.Proxy
46 exitChannel chan int
47 kvClient kvstore.Client
Girish Kumar91482642019-11-08 11:38:03 +000048 backend db.Backend
Stephane Barbariea75791c2019-01-24 10:58:06 -050049}
50
51func init() {
52 log.AddPackage(log.JSON, log.DebugLevel, nil)
53}
54
55func NewCore(id string, cf *config.ROCoreFlags, kvClient kvstore.Client) *Core {
56 var core Core
57 core.instanceId = id
58 core.exitChannel = make(chan int, 1)
59 core.config = cf
60 core.kvClient = kvClient
61
Girish Kumar91482642019-11-08 11:38:03 +000062 // Configure backend to push Liveness Status at least every cf.LiveProbeInterval / 2 seconds
63 // so as to avoid trigger of Liveness check (due to Liveness timeout) when backend is alive
64 livenessChannelInterval := cf.LiveProbeInterval / 2
65
Stephane Barbariea75791c2019-01-24 10:58:06 -050066 // Setup the KV store
67 // Do not call NewBackend constructor; it creates its own KV client
68 // Commented the backend for now until the issue between the model and the KV store
69 // is resolved.
Girish Kumar91482642019-11-08 11:38:03 +000070 core.backend = db.Backend{
71 Client: kvClient,
72 StoreType: cf.KVStoreType,
73 Host: cf.KVStoreHost,
74 Port: cf.KVStorePort,
75 Timeout: cf.KVStoreTimeout,
76 LivenessChannelInterval: livenessChannelInterval,
77 PathPrefix: "service/voltha"}
78 core.clusterDataRoot = model.NewRoot(&voltha.Voltha{}, &core.backend)
79 core.localDataRoot = model.NewRoot(&voltha.CoreInstance{}, &core.backend)
Stephane Barbarieef6650d2019-07-18 12:15:09 -040080 core.clusterDataProxy = core.clusterDataRoot.CreateProxy(context.Background(), "/", false)
81 core.localDataProxy = core.localDataRoot.CreateProxy(context.Background(), "/", false)
Stephane Barbariea75791c2019-01-24 10:58:06 -050082 return &core
83}
84
Divya Desai660dbba2019-10-16 07:06:49 +000085// waitUntilKVStoreReachableOrMaxTries will wait until it can connect to a KV store or until maxtries has been reached
86func (core *Core) waitUntilKVStoreReachableOrMaxTries(ctx context.Context, maxRetries int, retryInterval time.Duration) error {
87 log.Infow("verifying-KV-store-connectivity", log.Fields{"host": core.config.KVStoreHost,
88 "port": core.config.KVStorePort, "retries": maxRetries, "retryInterval": retryInterval})
89
90 // Get timeout in seconds with 1 second set as minimum
91 timeout := int(core.config.CoreTimeout.Seconds())
92 if timeout < 1 {
93 timeout = 1
94 }
95 count := 0
96 for {
97 if !core.kvClient.IsConnectionUp(timeout) {
98 log.Info("KV-store-unreachable")
99 if maxRetries != -1 {
100 if count >= maxRetries {
101 return status.Error(codes.Unavailable, "kv store unreachable")
102 }
103 }
104 count += 1
105 // Take a nap before retrying
106 time.Sleep(retryInterval)
107 log.Infow("retry-KV-store-connectivity", log.Fields{"retryCount": count, "maxRetries": maxRetries, "retryInterval": retryInterval})
108
109 } else {
110 break
111 }
112 }
113 log.Info("KV-store-reachable")
114 return nil
115}
116
Stephane Barbariea75791c2019-01-24 10:58:06 -0500117func (core *Core) Start(ctx context.Context) {
118 log.Info("starting-adaptercore", log.Fields{"coreId": core.instanceId})
Divya Desai660dbba2019-10-16 07:06:49 +0000119
120 // Wait until connection to KV Store is up
121 if err := core.waitUntilKVStoreReachableOrMaxTries(ctx, core.config.MaxConnectionRetries, core.config.ConnectionRetryInterval); err != nil {
122 log.Fatal("Unable-to-connect-to-KV-store")
123 }
124
125 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusRunning)
126
Stephane Barbariea75791c2019-01-24 10:58:06 -0500127 core.genericMgr = newModelProxyManager(core.clusterDataProxy)
128 core.deviceMgr = newDeviceManager(core.clusterDataProxy, core.instanceId)
129 core.logicalDeviceMgr = newLogicalDeviceManager(core.deviceMgr, core.clusterDataProxy)
130 go core.startDeviceManager(ctx)
131 go core.startLogicalDeviceManager(ctx)
132 go core.startGRPCService(ctx)
Girish Kumar91482642019-11-08 11:38:03 +0000133 go core.monitorKvstoreLiveness(ctx)
Stephane Barbariea75791c2019-01-24 10:58:06 -0500134
135 log.Info("adaptercore-started")
136}
137
138func (core *Core) Stop(ctx context.Context) {
139 log.Info("stopping-adaptercore")
David Bainbridgef794fc52019-10-03 22:37:12 +0000140 if core.exitChannel != nil {
141 core.exitChannel <- 1
142 }
Stephane Barbariea75791c2019-01-24 10:58:06 -0500143 // Stop all the started services
David Bainbridgef794fc52019-10-03 22:37:12 +0000144 if core.grpcServer != nil {
145 core.grpcServer.Stop()
146 }
147 if core.logicalDeviceMgr != nil {
148 core.logicalDeviceMgr.stop(ctx)
149 }
150 if core.deviceMgr != nil {
151 core.deviceMgr.stop(ctx)
152 }
Stephane Barbariea75791c2019-01-24 10:58:06 -0500153 log.Info("adaptercore-stopped")
154}
155
156//startGRPCService creates the grpc service handlers, registers it to the grpc server
157// and starts the server
158func (core *Core) startGRPCService(ctx context.Context) {
159 // create an insecure gserver server
Girish Kumar91482642019-11-08 11:38:03 +0000160 core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false, probe.GetProbeFromContext(ctx))
Stephane Barbariea75791c2019-01-24 10:58:06 -0500161 log.Info("grpc-server-created")
162
163 core.grpcNBIAPIHandler = NewAPIHandler(core.genericMgr, core.deviceMgr, core.logicalDeviceMgr)
164 core.logicalDeviceMgr.setGrpcNbiHandler(core.grpcNBIAPIHandler)
165 // Create a function to register the core GRPC service with the GRPC server
166 f := func(gs *grpc.Server) {
167 voltha.RegisterVolthaServiceServer(
168 gs,
169 core.grpcNBIAPIHandler,
170 )
171 }
172
173 core.grpcServer.AddService(f)
174 log.Info("grpc-service-added")
175
Hardik Windlassdc63dde2019-09-30 07:15:13 +0000176 /*
177 * Start the GRPC server
178 *
179 * This is a bit sub-optimal here as the grpcServer.Start call does not return (blocks)
180 * until something fails, but we want to send a "start" status update. As written this
181 * means that we are actually sending the "start" status update before the server is
182 * started, which means it is possible that the status is "running" before it actually is.
183 *
184 * This means that there is a small window in which the core could return its status as
185 * ready, when it really isn't.
186 */
187 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusRunning)
188
Stephane Barbariea75791c2019-01-24 10:58:06 -0500189 // Start the server
Stephane Barbariea75791c2019-01-24 10:58:06 -0500190 log.Info("grpc-server-started")
Hardik Windlassdc63dde2019-09-30 07:15:13 +0000191 core.grpcServer.Start(context.Background())
192
193 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusStopped)
Stephane Barbariea75791c2019-01-24 10:58:06 -0500194}
195
196func (core *Core) startDeviceManager(ctx context.Context) {
197 // TODO: Interaction between the logicaldevicemanager and devicemanager should mostly occur via
198 // callbacks. For now, until the model is ready, devicemanager will keep a reference to the
199 // logicaldevicemanager to initiate the creation of logical devices
200 log.Info("starting-DeviceManager")
201 core.deviceMgr.start(ctx, core.logicalDeviceMgr)
202 log.Info("started-DeviceManager")
203}
204
205func (core *Core) startLogicalDeviceManager(ctx context.Context) {
206 log.Info("starting-Logical-DeviceManager")
207 core.logicalDeviceMgr.start(ctx)
208 log.Info("started-Logical-DeviceManager")
209}
Girish Kumar91482642019-11-08 11:38:03 +0000210
211/*
212* Thread to monitor kvstore Liveness (connection status)
213*
214* This function constantly monitors Liveness State of kvstore as reported
215* periodically by backend and updates the Status of kv-store service registered
216* with ro_core probe.
217*
218* If no liveness event has been seen within a timeout, then the thread will make
219* an trigger a "liveness" check, which will in turn trigger a liveness event on
220* the liveness channel, true or false depending on whether the attempt succeeded.
221*
222* The gRPC server in turn monitors the state of the readiness probe and will
223* start issuing UNAVAILABLE response while the probe is not ready.
224 */
225func (core *Core) monitorKvstoreLiveness(ctx context.Context) {
226 log.Info("start-monitoring-kvstore-liveness")
227
228 // Instruct backend to create Liveness channel for transporting state updates
229 livenessChannel := core.backend.EnableLivenessChannel()
230
231 log.Debug("enabled-kvstore-liveness-channel")
232
233 // Default state for kvstore is not alive
234 timeout := core.config.NotLiveProbeInterval
235 for {
236 timeoutTimer := time.NewTimer(timeout)
237 select {
238
239 case liveness := <-livenessChannel:
240 log.Debugw("received-liveness-change-notification", log.Fields{"liveness": liveness})
241
242 if !liveness {
243 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusNotReady)
244
245 if core.grpcServer != nil {
246 log.Info("kvstore-set-server-notready")
247 }
248
249 timeout = core.config.NotLiveProbeInterval
250 } else {
251 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusRunning)
252
253 if core.grpcServer != nil {
254 log.Info("kvstore-set-server-ready")
255 }
256
257 timeout = core.config.LiveProbeInterval
258 }
259
260 if !timeoutTimer.Stop() {
261 <-timeoutTimer.C
262 }
263
264 case <-timeoutTimer.C:
265 log.Info("kvstore-perform-liveness-check-on-timeout")
266
267 // Trigger Liveness check if no liveness update received within the timeout period.
268 // The Liveness check will push Live state to same channel which this routine is
269 // reading and processing. This, do it asynchronously to avoid blocking for
270 // backend response and avoid any possibility of deadlock
271 go core.backend.PerformLivenessCheck(core.config.KVStoreTimeout)
272 }
273 }
274}