blob: 080490ae2929e42b5a139b17da958bd8e8a5050e [file] [log] [blame]
William Kurkianea869482019-04-09 15:16:11 -04001// Copyright 2016 The etcd Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
16// fine grained error checking required by write-at-most-once retry semantics of etcd.
17
18package clientv3
19
20import (
21 "context"
22 "io"
23 "sync"
24 "time"
25
26 "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
27 "go.uber.org/zap"
28 "google.golang.org/grpc"
29 "google.golang.org/grpc/codes"
30 "google.golang.org/grpc/metadata"
Abhilash S.L3b494632019-07-16 15:51:09 +053031 "google.golang.org/grpc/status"
William Kurkianea869482019-04-09 15:16:11 -040032)
33
34// unaryClientInterceptor returns a new retrying unary client interceptor.
35//
36// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
37// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
38func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
39 intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
40 return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
41 grpcOpts, retryOpts := filterCallOptions(opts)
42 callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
43 // short circuit for simplicity, and avoiding allocations.
44 if callOpts.max == 0 {
45 return invoker(ctx, method, req, reply, cc, grpcOpts...)
46 }
47 var lastErr error
48 for attempt := uint(0); attempt < callOpts.max; attempt++ {
49 if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
50 return err
51 }
52 logger.Debug(
53 "retrying of unary invoker",
54 zap.String("target", cc.Target()),
55 zap.Uint("attempt", attempt),
56 )
57 lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
58 if lastErr == nil {
59 return nil
60 }
61 logger.Warn(
62 "retrying of unary invoker failed",
63 zap.String("target", cc.Target()),
64 zap.Uint("attempt", attempt),
65 zap.Error(lastErr),
66 )
67 if isContextError(lastErr) {
68 if ctx.Err() != nil {
69 // its the context deadline or cancellation.
70 return lastErr
71 }
72 // its the callCtx deadline or cancellation, in which case try again.
73 continue
74 }
75 if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
76 gterr := c.getToken(ctx)
77 if gterr != nil {
78 logger.Warn(
79 "retrying of unary invoker failed to fetch new auth token",
80 zap.String("target", cc.Target()),
81 zap.Error(gterr),
82 )
Abhilash S.L3b494632019-07-16 15:51:09 +053083 return gterr // lastErr must be invalid auth token
William Kurkianea869482019-04-09 15:16:11 -040084 }
85 continue
86 }
87 if !isSafeRetry(c.lg, lastErr, callOpts) {
88 return lastErr
89 }
90 }
91 return lastErr
92 }
93}
94
95// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
96//
97// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
98// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
99//
100// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
101// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
102// BidiStreams), the retry interceptor will fail the call.
103func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
104 intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
105 return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
106 grpcOpts, retryOpts := filterCallOptions(opts)
107 callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
108 // short circuit for simplicity, and avoiding allocations.
109 if callOpts.max == 0 {
110 return streamer(ctx, desc, cc, method, grpcOpts...)
111 }
112 if desc.ClientStreams {
Abhilash S.L3b494632019-07-16 15:51:09 +0530113 return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
William Kurkianea869482019-04-09 15:16:11 -0400114 }
115 newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
116 logger.Warn("retry stream intercept", zap.Error(err))
117 if err != nil {
118 // TODO(mwitkow): Maybe dial and transport errors should be retriable?
119 return nil, err
120 }
121 retryingStreamer := &serverStreamingRetryingStream{
122 client: c,
123 ClientStream: newStreamer,
124 callOpts: callOpts,
125 ctx: ctx,
126 streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
127 return streamer(ctx, desc, cc, method, grpcOpts...)
128 },
129 }
130 return retryingStreamer, nil
131 }
132}
133
134// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
135// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
136// a new ClientStream according to the retry policy.
137type serverStreamingRetryingStream struct {
138 grpc.ClientStream
139 client *Client
140 bufferedSends []interface{} // single message that the client can sen
141 receivedGood bool // indicates whether any prior receives were successful
142 wasClosedSend bool // indicates that CloseSend was closed
143 ctx context.Context
144 callOpts *options
145 streamerCall func(ctx context.Context) (grpc.ClientStream, error)
146 mu sync.RWMutex
147}
148
149func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
150 s.mu.Lock()
151 s.ClientStream = clientStream
152 s.mu.Unlock()
153}
154
155func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
156 s.mu.RLock()
157 defer s.mu.RUnlock()
158 return s.ClientStream
159}
160
161func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
162 s.mu.Lock()
163 s.bufferedSends = append(s.bufferedSends, m)
164 s.mu.Unlock()
165 return s.getStream().SendMsg(m)
166}
167
168func (s *serverStreamingRetryingStream) CloseSend() error {
169 s.mu.Lock()
170 s.wasClosedSend = true
171 s.mu.Unlock()
172 return s.getStream().CloseSend()
173}
174
175func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
176 return s.getStream().Header()
177}
178
179func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
180 return s.getStream().Trailer()
181}
182
183func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
184 attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
185 if !attemptRetry {
186 return lastErr // success or hard failure
187 }
188 // We start off from attempt 1, because zeroth was already made on normal SendMsg().
189 for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
190 if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
191 return err
192 }
193 newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
194 if err != nil {
195 // TODO(mwitkow): Maybe dial and transport errors should be retriable?
196 return err
197 }
198 s.setStream(newStream)
199 attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
200 //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
201 if !attemptRetry {
202 return lastErr
203 }
204 }
205 return lastErr
206}
207
208func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
209 s.mu.RLock()
210 wasGood := s.receivedGood
211 s.mu.RUnlock()
212 err := s.getStream().RecvMsg(m)
213 if err == nil || err == io.EOF {
214 s.mu.Lock()
215 s.receivedGood = true
216 s.mu.Unlock()
217 return false, err
218 } else if wasGood {
219 // previous RecvMsg in the stream succeeded, no retry logic should interfere
220 return false, err
221 }
222 if isContextError(err) {
223 if s.ctx.Err() != nil {
224 return false, err
225 }
226 // its the callCtx deadline or cancellation, in which case try again.
227 return true, err
228 }
229 if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
230 gterr := s.client.getToken(s.ctx)
231 if gterr != nil {
232 s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
233 return false, err // return the original error for simplicity
234 }
235 return true, err
236
237 }
238 return isSafeRetry(s.client.lg, err, s.callOpts), err
239}
240
241func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
242 s.mu.RLock()
243 bufferedSends := s.bufferedSends
244 s.mu.RUnlock()
245 newStream, err := s.streamerCall(callCtx)
246 if err != nil {
247 return nil, err
248 }
249 for _, msg := range bufferedSends {
250 if err := newStream.SendMsg(msg); err != nil {
251 return nil, err
252 }
253 }
254 if err := newStream.CloseSend(); err != nil {
255 return nil, err
256 }
257 return newStream, nil
258}
259
260func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
261 waitTime := time.Duration(0)
262 if attempt > 0 {
263 waitTime = callOpts.backoffFunc(attempt)
264 }
265 if waitTime > 0 {
266 timer := time.NewTimer(waitTime)
267 select {
268 case <-ctx.Done():
269 timer.Stop()
270 return contextErrToGrpcErr(ctx.Err())
271 case <-timer.C:
272 }
273 }
274 return nil
275}
276
277// isSafeRetry returns "true", if request is safe for retry with the given error.
278func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
279 if isContextError(err) {
280 return false
281 }
282 switch callOpts.retryPolicy {
283 case repeatable:
284 return isSafeRetryImmutableRPC(err)
285 case nonRepeatable:
286 return isSafeRetryMutableRPC(err)
287 default:
288 lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
289 return false
290 }
291}
292
293func isContextError(err error) bool {
294 return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
295}
296
297func contextErrToGrpcErr(err error) error {
298 switch err {
299 case context.DeadlineExceeded:
Abhilash S.L3b494632019-07-16 15:51:09 +0530300 return status.Errorf(codes.DeadlineExceeded, err.Error())
William Kurkianea869482019-04-09 15:16:11 -0400301 case context.Canceled:
Abhilash S.L3b494632019-07-16 15:51:09 +0530302 return status.Errorf(codes.Canceled, err.Error())
William Kurkianea869482019-04-09 15:16:11 -0400303 default:
Abhilash S.L3b494632019-07-16 15:51:09 +0530304 return status.Errorf(codes.Unknown, err.Error())
William Kurkianea869482019-04-09 15:16:11 -0400305 }
306}
307
308var (
309 defaultOptions = &options{
310 retryPolicy: nonRepeatable,
311 max: 0, // disable
312 backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
313 retryAuth: true,
314 }
315)
316
317// backoffFunc denotes a family of functions that control the backoff duration between call retries.
318//
319// They are called with an identifier of the attempt, and should return a time the system client should
320// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
321// the deadline of the request takes precedence and the wait will be interrupted before proceeding
322// with the next iteration.
323type backoffFunc func(attempt uint) time.Duration
324
325// withRetryPolicy sets the retry policy of this call.
326func withRetryPolicy(rp retryPolicy) retryOption {
327 return retryOption{applyFunc: func(o *options) {
328 o.retryPolicy = rp
329 }}
330}
331
William Kurkianea869482019-04-09 15:16:11 -0400332// withMax sets the maximum number of retries on this call, or this interceptor.
333func withMax(maxRetries uint) retryOption {
334 return retryOption{applyFunc: func(o *options) {
335 o.max = maxRetries
336 }}
337}
338
339// WithBackoff sets the `BackoffFunc `used to control time between retries.
340func withBackoff(bf backoffFunc) retryOption {
341 return retryOption{applyFunc: func(o *options) {
342 o.backoffFunc = bf
343 }}
344}
345
346type options struct {
347 retryPolicy retryPolicy
348 max uint
349 backoffFunc backoffFunc
350 retryAuth bool
351}
352
353// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
354type retryOption struct {
355 grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
356 applyFunc func(opt *options)
357}
358
359func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
360 if len(retryOptions) == 0 {
361 return opt
362 }
363 optCopy := &options{}
364 *optCopy = *opt
365 for _, f := range retryOptions {
366 f.applyFunc(optCopy)
367 }
368 return optCopy
369}
370
371func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
372 for _, opt := range callOptions {
373 if co, ok := opt.(retryOption); ok {
374 retryOptions = append(retryOptions, co)
375 } else {
376 grpcOptions = append(grpcOptions, opt)
377 }
378 }
379 return grpcOptions, retryOptions
380}
381
382// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
383//
384// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
385func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
386 return func(attempt uint) time.Duration {
387 return jitterUp(waitBetween, jitterFraction)
388 }
389}