Scott Baker | e7144bc | 2019-10-01 14:16:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018-present Open Networking Foundation |
| 3 | |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | package afrouter |
| 18 | |
| 19 | // Backend manager handles redundant connections per backend |
| 20 | |
| 21 | import ( |
| 22 | "errors" |
| 23 | "fmt" |
Scott Baker | f579f13 | 2019-10-24 14:31:41 -0700 | [diff] [blame] | 24 | "github.com/opencord/voltha-lib-go/v2/pkg/log" |
Scott Baker | e7144bc | 2019-10-01 14:16:47 -0700 | [diff] [blame] | 25 | "golang.org/x/net/context" |
| 26 | "google.golang.org/grpc" |
| 27 | "google.golang.org/grpc/codes" |
| 28 | "google.golang.org/grpc/metadata" |
Scott Baker | 4989fe9 | 2019-10-09 17:03:06 -0700 | [diff] [blame] | 29 | "google.golang.org/grpc/status" |
Scott Baker | e7144bc | 2019-10-01 14:16:47 -0700 | [diff] [blame] | 30 | "net/url" |
| 31 | "strconv" |
| 32 | "strings" |
| 33 | "sync" |
| 34 | ) |
| 35 | |
| 36 | // backend represents a collection of backends in a HA configuration |
| 37 | type backend struct { |
| 38 | mutex sync.Mutex |
| 39 | name string |
| 40 | beType backendType |
| 41 | activeAssociation association |
Scott Baker | e7144bc | 2019-10-01 14:16:47 -0700 | [diff] [blame] | 42 | connections map[string]*connection |
| 43 | openConns map[*connection]*grpc.ClientConn |
| 44 | activeRequests map[*request]struct{} |
| 45 | } |
| 46 | |
| 47 | type association struct { |
| 48 | strategy associationStrategy |
| 49 | location associationLocation |
| 50 | field string // Used only if location is protobuf |
| 51 | key string |
| 52 | } |
| 53 | |
| 54 | // splitActiveStreamsUnsafe expects the caller to have already locked the backend mutex |
| 55 | func (be *backend) splitActiveStreamsUnsafe(cn *connection, conn *grpc.ClientConn) { |
| 56 | if len(be.activeRequests) != 0 { |
| 57 | log.Debugf("Creating new streams for %d existing requests", len(be.activeRequests)) |
| 58 | } |
| 59 | for r := range be.activeRequests { |
| 60 | r.mutex.Lock() |
| 61 | if _, have := r.streams[cn.name]; !have { |
| 62 | log.Debugf("Opening southbound stream for existing request '%s'", r.methodInfo.method) |
| 63 | if stream, err := grpc.NewClientStream(r.ctx, clientStreamDescForProxying, conn, r.methodInfo.all); err != nil { |
| 64 | log.Debugf("Failed to create a client stream '%s', %v", cn.name, err) |
| 65 | } else { |
| 66 | go r.catchupRequestStreamThenForwardResponseStream(cn.name, stream) |
| 67 | // new thread will unlock the request mutex |
| 68 | continue |
| 69 | } |
| 70 | } |
| 71 | r.mutex.Unlock() |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | // openSouthboundStreams sets up a connection to each southbound frame |
| 76 | func (be *backend) openSouthboundStreams(srv interface{}, serverStream grpc.ServerStream, nf *requestFrame, sf *responseFrame) (*request, error) { |
| 77 | be.mutex.Lock() |
| 78 | defer be.mutex.Unlock() |
| 79 | |
| 80 | isStreamingRequest, isStreamingResponse := nf.router.IsStreaming(nf.methodInfo.method) |
| 81 | |
| 82 | // Get the metadata from the incoming message on the server |
| 83 | md, ok := metadata.FromIncomingContext(serverStream.Context()) |
| 84 | if !ok { |
| 85 | return nil, errors.New("could not get a server stream metadata") |
| 86 | } |
| 87 | |
| 88 | r := &request{ |
| 89 | // Create an outgoing context that includes the incoming metadata and that will cancel if the server's context is canceled |
| 90 | ctx: metadata.AppendToOutgoingContext(metadata.NewOutgoingContext(serverStream.Context(), md.Copy()), "voltha_serial_number", nf.serialNo), |
| 91 | |
| 92 | streams: make(map[string]grpc.ClientStream), |
| 93 | responseErrChan: make(chan error, 1), |
| 94 | |
| 95 | backend: be, |
| 96 | serverStream: serverStream, |
| 97 | methodInfo: nf.methodInfo, |
| 98 | requestFrame: nf, |
| 99 | responseFrame: sf, |
| 100 | isStreamingRequest: isStreamingRequest, |
| 101 | isStreamingResponse: isStreamingResponse, |
| 102 | } |
| 103 | |
| 104 | log.Debugf("Opening southbound request for method '%s'", nf.methodInfo.method) |
| 105 | |
| 106 | // TODO: Need to check if this is an active/active backend cluster |
| 107 | // with a serial number in the header. |
| 108 | log.Debugf("Serial number for transaction allocated: %s", nf.serialNo) |
| 109 | // If even one stream can be created then proceed. If none can be |
| 110 | // created then report an error because both the primary and redundant |
| 111 | // connections are non-existent. |
| 112 | var atLeastOne = false |
| 113 | var errStr strings.Builder |
| 114 | |
| 115 | log.Debugf("There are %d/%d streams to open", len(be.openConns), len(be.connections)) |
| 116 | if nf.connection != nil { |
| 117 | // Debug statement triggered by source router. Other routers have no connection preference. |
| 118 | log.Debugf("Looking for connection %s", nf.connection.name) |
| 119 | } |
| 120 | for cn, conn := range be.openConns { |
| 121 | // If source-router was used, it will indicate a specific connection to be used |
| 122 | if nf.connection != nil && nf.connection != cn { |
| 123 | continue |
| 124 | } |
| 125 | |
| 126 | log.Debugf("Opening stream for connection '%s'", cn.name) |
| 127 | if stream, err := grpc.NewClientStream(r.ctx, clientStreamDescForProxying, conn, r.methodInfo.all); err != nil { |
| 128 | log.Debugf("Failed to create a client stream '%s', %v", cn.name, err) |
| 129 | } else { |
| 130 | r.streams[cn.name] = stream |
| 131 | go r.forwardResponseStream(cn.name, stream) |
| 132 | atLeastOne = true |
| 133 | } |
| 134 | } |
| 135 | if atLeastOne { |
| 136 | be.activeRequests[r] = struct{}{} |
| 137 | return r, nil |
| 138 | } |
| 139 | fmt.Fprintf(&errStr, "{{No open connections for backend '%s' unable to send}} ", be.name) |
| 140 | log.Error(errStr.String()) |
| 141 | return nil, errors.New(errStr.String()) |
| 142 | } |
| 143 | |
| 144 | func (be *backend) handler(srv interface{}, serverStream grpc.ServerStream, nf *requestFrame, sf *responseFrame) error { |
| 145 | // Set up streams for each open connection |
| 146 | request, err := be.openSouthboundStreams(srv, serverStream, nf, sf) |
| 147 | if err != nil { |
| 148 | log.Errorf("openStreams failed: %v", err) |
| 149 | return err |
| 150 | } |
| 151 | |
| 152 | log.Debug("Starting request stream forwarding") |
| 153 | if s2cErr := request.forwardRequestStream(serverStream); s2cErr != nil { |
| 154 | // exit with an error to the stack |
Scott Baker | 4989fe9 | 2019-10-09 17:03:06 -0700 | [diff] [blame] | 155 | return status.Errorf(codes.Internal, "failed proxying s2c: %v", s2cErr) |
Scott Baker | e7144bc | 2019-10-01 14:16:47 -0700 | [diff] [blame] | 156 | } |
| 157 | // wait for response stream to complete |
| 158 | return <-request.responseErrChan |
| 159 | } |
| 160 | |
| 161 | func newBackend(conf *BackendConfig, clusterName string) (*backend, error) { |
| 162 | var rtrn_err bool = false |
| 163 | |
| 164 | log.Debugf("Configuring the backend with %v", *conf) |
| 165 | // Validate the conifg and configure the backend |
| 166 | be := &backend{ |
| 167 | name: conf.Name, |
| 168 | connections: make(map[string]*connection), |
| 169 | openConns: make(map[*connection]*grpc.ClientConn), |
| 170 | activeRequests: make(map[*request]struct{}), |
| 171 | } |
| 172 | if conf.Type == BackendUndefined { |
| 173 | log.Error("Invalid type specified for backend %s in cluster %s", conf.Name, clusterName) |
| 174 | rtrn_err = true |
| 175 | } |
| 176 | be.beType = conf.Type |
| 177 | |
| 178 | if conf.Association.Strategy == AssociationStrategyUndefined && be.beType == BackendActiveActive { |
| 179 | log.Errorf("An association strategy must be provided if the backend "+ |
| 180 | "type is active/active for backend %s in cluster %s", conf.Name, clusterName) |
| 181 | rtrn_err = true |
| 182 | } |
| 183 | be.activeAssociation.strategy = conf.Association.Strategy |
| 184 | |
| 185 | if conf.Association.Location == AssociationLocationUndefined && be.beType == BackendActiveActive { |
| 186 | log.Errorf("An association location must be provided if the backend "+ |
| 187 | "type is active/active for backend %s in cluster %s", conf.Name, clusterName) |
| 188 | rtrn_err = true |
| 189 | } |
| 190 | be.activeAssociation.location = conf.Association.Location |
| 191 | |
| 192 | if conf.Association.Field == "" && be.activeAssociation.location == AssociationLocationProtobuf { |
| 193 | log.Errorf("An association field must be provided if the backend "+ |
| 194 | "type is active/active and the location is set to protobuf "+ |
| 195 | "for backend %s in cluster %s", conf.Name, clusterName) |
| 196 | rtrn_err = true |
| 197 | } |
| 198 | be.activeAssociation.field = conf.Association.Field |
| 199 | |
| 200 | if conf.Association.Key == "" && be.activeAssociation.location == AssociationLocationHeader { |
| 201 | log.Errorf("An association key must be provided if the backend "+ |
| 202 | "type is active/active and the location is set to header "+ |
| 203 | "for backend %s in cluster %s", conf.Name, clusterName) |
| 204 | rtrn_err = true |
| 205 | } |
| 206 | be.activeAssociation.key = conf.Association.Key |
| 207 | if rtrn_err { |
| 208 | return nil, errors.New("Backend configuration failed") |
| 209 | } |
| 210 | // Configure the connections |
| 211 | // Connections can consist of just a name. This allows for dynamic configuration |
| 212 | // at a later time. |
| 213 | // TODO: validate that there is one connection for all but active/active backends |
| 214 | if len(conf.Connections) > 1 && be.beType != BackendActiveActive { |
| 215 | log.Errorf("Only one connection must be specified if the association " + |
| 216 | "strategy is not set to 'active_active'") |
| 217 | rtrn_err = true |
| 218 | } |
| 219 | if len(conf.Connections) == 0 { |
| 220 | log.Errorf("At least one connection must be specified") |
| 221 | rtrn_err = true |
| 222 | } |
| 223 | for _, cnConf := range conf.Connections { |
| 224 | if cnConf.Name == "" { |
| 225 | log.Errorf("A connection must have a name for backend %s in cluster %s", |
| 226 | conf.Name, clusterName) |
| 227 | } else { |
| 228 | ctx, cancelFunc := context.WithCancel(context.Background()) |
| 229 | be.connections[cnConf.Name] = &connection{name: cnConf.Name, addr: cnConf.Addr, port: cnConf.Port, backend: be, ctx: ctx, close: cancelFunc} |
| 230 | if _, err := url.Parse(cnConf.Addr); err != nil { |
| 231 | log.Errorf("The address for connection %s in backend %s in cluster %s is invalid: %s", |
| 232 | cnConf.Name, conf.Name, clusterName, err) |
| 233 | rtrn_err = true |
| 234 | } |
| 235 | // Validate the port number. This just validtes that it's a non 0 integer |
| 236 | if n, err := strconv.Atoi(cnConf.Port); err != nil || n <= 0 || n > 65535 { |
| 237 | log.Errorf("Port %s for connection %s in backend %s in cluster %s is invalid", |
| 238 | cnConf.Port, cnConf.Name, conf.Name, clusterName) |
| 239 | rtrn_err = true |
| 240 | } else { |
| 241 | if n <= 0 && n > 65535 { |
| 242 | log.Errorf("Port %s for connection %s in backend %s in cluster %s is invalid", |
| 243 | cnConf.Port, cnConf.Name, conf.Name, clusterName) |
| 244 | rtrn_err = true |
| 245 | } |
| 246 | } |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | if rtrn_err { |
| 251 | return nil, errors.New("Connection configuration failed") |
| 252 | } |
| 253 | // All is well start the backend cluster connections |
| 254 | be.connectAll() |
| 255 | |
| 256 | return be, nil |
| 257 | } |
| 258 | |
| 259 | func (be *backend) incConn(cn *connection, conn *grpc.ClientConn) { |
| 260 | be.mutex.Lock() |
| 261 | defer be.mutex.Unlock() |
| 262 | |
| 263 | be.openConns[cn] = conn |
| 264 | be.splitActiveStreamsUnsafe(cn, conn) |
| 265 | } |
| 266 | |
| 267 | func (be *backend) decConn(cn *connection) { |
| 268 | be.mutex.Lock() |
| 269 | defer be.mutex.Unlock() |
| 270 | |
| 271 | delete(be.openConns, cn) |
| 272 | } |
| 273 | |
| 274 | func (be *backend) NumOpenConnections() int { |
| 275 | be.mutex.Lock() |
| 276 | defer be.mutex.Unlock() |
| 277 | |
| 278 | return len(be.openConns) |
| 279 | } |
| 280 | |
| 281 | // Attempts to establish all the connections for a backend |
| 282 | // any failures result in an abort. This should only be called |
| 283 | // on a first attempt to connect. Individual connections should be |
| 284 | // handled after that. |
| 285 | func (be *backend) connectAll() { |
| 286 | for _, cn := range be.connections { |
| 287 | go cn.connect() |
| 288 | } |
| 289 | } |