blob: d022266517351e787c777e200a2508f538cd3c4f [file] [log] [blame]
Stephane Barbariea75791c2019-01-24 10:58:06 -05001/*
2 * Copyright 2018-present Open Networking Foundation
3
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7
8 * http://www.apache.org/licenses/LICENSE-2.0
9
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16package core
17
18import (
19 "context"
sbarbari17d7e222019-11-05 10:02:29 -050020 "github.com/opencord/voltha-go/db/model"
Stephane Barbariea75791c2019-01-24 10:58:06 -050021 "github.com/opencord/voltha-go/ro_core/config"
sbarbari17d7e222019-11-05 10:02:29 -050022 "github.com/opencord/voltha-lib-go/v2/pkg/db"
Scott Baker807addd2019-10-24 15:16:21 -070023 "github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
Scott Baker807addd2019-10-24 15:16:21 -070024 grpcserver "github.com/opencord/voltha-lib-go/v2/pkg/grpc"
25 "github.com/opencord/voltha-lib-go/v2/pkg/log"
26 "github.com/opencord/voltha-lib-go/v2/pkg/probe"
Scott Baker555307d2019-11-04 08:58:01 -080027 "github.com/opencord/voltha-protos/v2/go/voltha"
Stephane Barbariea75791c2019-01-24 10:58:06 -050028 "google.golang.org/grpc"
Girish Kumar91482642019-11-08 11:38:03 +000029 "time"
Stephane Barbariea75791c2019-01-24 10:58:06 -050030)
31
32type Core struct {
33 instanceId string
34 genericMgr *ModelProxyManager
35 deviceMgr *DeviceManager
36 logicalDeviceMgr *LogicalDeviceManager
37 grpcServer *grpcserver.GrpcServer
38 grpcNBIAPIHandler *APIHandler
39 config *config.ROCoreFlags
40 clusterDataRoot model.Root
41 localDataRoot model.Root
42 clusterDataProxy *model.Proxy
43 localDataProxy *model.Proxy
44 exitChannel chan int
45 kvClient kvstore.Client
Girish Kumar91482642019-11-08 11:38:03 +000046 backend db.Backend
Stephane Barbariea75791c2019-01-24 10:58:06 -050047}
48
49func init() {
50 log.AddPackage(log.JSON, log.DebugLevel, nil)
51}
52
53func NewCore(id string, cf *config.ROCoreFlags, kvClient kvstore.Client) *Core {
54 var core Core
55 core.instanceId = id
56 core.exitChannel = make(chan int, 1)
57 core.config = cf
58 core.kvClient = kvClient
59
Girish Kumar91482642019-11-08 11:38:03 +000060 // Configure backend to push Liveness Status at least every cf.LiveProbeInterval / 2 seconds
61 // so as to avoid trigger of Liveness check (due to Liveness timeout) when backend is alive
62 livenessChannelInterval := cf.LiveProbeInterval / 2
63
Stephane Barbariea75791c2019-01-24 10:58:06 -050064 // Setup the KV store
65 // Do not call NewBackend constructor; it creates its own KV client
66 // Commented the backend for now until the issue between the model and the KV store
67 // is resolved.
Girish Kumar91482642019-11-08 11:38:03 +000068 core.backend = db.Backend{
69 Client: kvClient,
70 StoreType: cf.KVStoreType,
71 Host: cf.KVStoreHost,
72 Port: cf.KVStorePort,
73 Timeout: cf.KVStoreTimeout,
74 LivenessChannelInterval: livenessChannelInterval,
75 PathPrefix: "service/voltha"}
76 core.clusterDataRoot = model.NewRoot(&voltha.Voltha{}, &core.backend)
77 core.localDataRoot = model.NewRoot(&voltha.CoreInstance{}, &core.backend)
Stephane Barbarieef6650d2019-07-18 12:15:09 -040078 core.clusterDataProxy = core.clusterDataRoot.CreateProxy(context.Background(), "/", false)
79 core.localDataProxy = core.localDataRoot.CreateProxy(context.Background(), "/", false)
Stephane Barbariea75791c2019-01-24 10:58:06 -050080 return &core
81}
82
83func (core *Core) Start(ctx context.Context) {
84 log.Info("starting-adaptercore", log.Fields{"coreId": core.instanceId})
85 core.genericMgr = newModelProxyManager(core.clusterDataProxy)
86 core.deviceMgr = newDeviceManager(core.clusterDataProxy, core.instanceId)
87 core.logicalDeviceMgr = newLogicalDeviceManager(core.deviceMgr, core.clusterDataProxy)
88 go core.startDeviceManager(ctx)
89 go core.startLogicalDeviceManager(ctx)
90 go core.startGRPCService(ctx)
Girish Kumar91482642019-11-08 11:38:03 +000091 go core.monitorKvstoreLiveness(ctx)
Stephane Barbariea75791c2019-01-24 10:58:06 -050092
93 log.Info("adaptercore-started")
94}
95
96func (core *Core) Stop(ctx context.Context) {
97 log.Info("stopping-adaptercore")
David Bainbridgef794fc52019-10-03 22:37:12 +000098 if core.exitChannel != nil {
99 core.exitChannel <- 1
100 }
Stephane Barbariea75791c2019-01-24 10:58:06 -0500101 // Stop all the started services
David Bainbridgef794fc52019-10-03 22:37:12 +0000102 if core.grpcServer != nil {
103 core.grpcServer.Stop()
104 }
105 if core.logicalDeviceMgr != nil {
106 core.logicalDeviceMgr.stop(ctx)
107 }
108 if core.deviceMgr != nil {
109 core.deviceMgr.stop(ctx)
110 }
Stephane Barbariea75791c2019-01-24 10:58:06 -0500111 log.Info("adaptercore-stopped")
112}
113
114//startGRPCService creates the grpc service handlers, registers it to the grpc server
115// and starts the server
116func (core *Core) startGRPCService(ctx context.Context) {
117 // create an insecure gserver server
Girish Kumar91482642019-11-08 11:38:03 +0000118 core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false, probe.GetProbeFromContext(ctx))
Stephane Barbariea75791c2019-01-24 10:58:06 -0500119 log.Info("grpc-server-created")
120
121 core.grpcNBIAPIHandler = NewAPIHandler(core.genericMgr, core.deviceMgr, core.logicalDeviceMgr)
122 core.logicalDeviceMgr.setGrpcNbiHandler(core.grpcNBIAPIHandler)
123 // Create a function to register the core GRPC service with the GRPC server
124 f := func(gs *grpc.Server) {
125 voltha.RegisterVolthaServiceServer(
126 gs,
127 core.grpcNBIAPIHandler,
128 )
129 }
130
131 core.grpcServer.AddService(f)
132 log.Info("grpc-service-added")
133
Hardik Windlassdc63dde2019-09-30 07:15:13 +0000134 /*
135 * Start the GRPC server
136 *
137 * This is a bit sub-optimal here as the grpcServer.Start call does not return (blocks)
138 * until something fails, but we want to send a "start" status update. As written this
139 * means that we are actually sending the "start" status update before the server is
140 * started, which means it is possible that the status is "running" before it actually is.
141 *
142 * This means that there is a small window in which the core could return its status as
143 * ready, when it really isn't.
144 */
145 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusRunning)
146
Stephane Barbariea75791c2019-01-24 10:58:06 -0500147 // Start the server
Stephane Barbariea75791c2019-01-24 10:58:06 -0500148 log.Info("grpc-server-started")
Hardik Windlassdc63dde2019-09-30 07:15:13 +0000149 core.grpcServer.Start(context.Background())
150
151 probe.UpdateStatusFromContext(ctx, "grpc-service", probe.ServiceStatusStopped)
Stephane Barbariea75791c2019-01-24 10:58:06 -0500152}
153
154func (core *Core) startDeviceManager(ctx context.Context) {
155 // TODO: Interaction between the logicaldevicemanager and devicemanager should mostly occur via
156 // callbacks. For now, until the model is ready, devicemanager will keep a reference to the
157 // logicaldevicemanager to initiate the creation of logical devices
158 log.Info("starting-DeviceManager")
159 core.deviceMgr.start(ctx, core.logicalDeviceMgr)
160 log.Info("started-DeviceManager")
161}
162
163func (core *Core) startLogicalDeviceManager(ctx context.Context) {
164 log.Info("starting-Logical-DeviceManager")
165 core.logicalDeviceMgr.start(ctx)
166 log.Info("started-Logical-DeviceManager")
167}
Girish Kumar91482642019-11-08 11:38:03 +0000168
169/*
170* Thread to monitor kvstore Liveness (connection status)
171*
172* This function constantly monitors Liveness State of kvstore as reported
173* periodically by backend and updates the Status of kv-store service registered
174* with ro_core probe.
175*
176* If no liveness event has been seen within a timeout, then the thread will make
177* an trigger a "liveness" check, which will in turn trigger a liveness event on
178* the liveness channel, true or false depending on whether the attempt succeeded.
179*
180* The gRPC server in turn monitors the state of the readiness probe and will
181* start issuing UNAVAILABLE response while the probe is not ready.
182 */
183func (core *Core) monitorKvstoreLiveness(ctx context.Context) {
184 log.Info("start-monitoring-kvstore-liveness")
185
186 // Instruct backend to create Liveness channel for transporting state updates
187 livenessChannel := core.backend.EnableLivenessChannel()
188
189 log.Debug("enabled-kvstore-liveness-channel")
190
191 // Default state for kvstore is not alive
192 timeout := core.config.NotLiveProbeInterval
193 for {
194 timeoutTimer := time.NewTimer(timeout)
195 select {
196
197 case liveness := <-livenessChannel:
198 log.Debugw("received-liveness-change-notification", log.Fields{"liveness": liveness})
199
200 if !liveness {
201 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusNotReady)
202
203 if core.grpcServer != nil {
204 log.Info("kvstore-set-server-notready")
205 }
206
207 timeout = core.config.NotLiveProbeInterval
208 } else {
209 probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusRunning)
210
211 if core.grpcServer != nil {
212 log.Info("kvstore-set-server-ready")
213 }
214
215 timeout = core.config.LiveProbeInterval
216 }
217
218 if !timeoutTimer.Stop() {
219 <-timeoutTimer.C
220 }
221
222 case <-timeoutTimer.C:
223 log.Info("kvstore-perform-liveness-check-on-timeout")
224
225 // Trigger Liveness check if no liveness update received within the timeout period.
226 // The Liveness check will push Live state to same channel which this routine is
227 // reading and processing. This, do it asynchronously to avoid blocking for
228 // backend response and avoid any possibility of deadlock
229 go core.backend.PerformLivenessCheck(core.config.KVStoreTimeout)
230 }
231 }
232}