sslobodr | d046be8 | 2019-01-16 10:02:22 -0500 | [diff] [blame] | 1 | /* |
| 2 | * |
| 3 | * Copyright 2018 gRPC authors. |
| 4 | * |
| 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
| 8 | * |
| 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | * |
| 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | package health |
| 20 | |
| 21 | import ( |
| 22 | "context" |
| 23 | "fmt" |
| 24 | "io" |
| 25 | "time" |
| 26 | |
| 27 | "google.golang.org/grpc" |
| 28 | "google.golang.org/grpc/codes" |
| 29 | healthpb "google.golang.org/grpc/health/grpc_health_v1" |
| 30 | "google.golang.org/grpc/internal" |
| 31 | "google.golang.org/grpc/internal/backoff" |
| 32 | "google.golang.org/grpc/status" |
| 33 | ) |
| 34 | |
| 35 | const maxDelay = 120 * time.Second |
| 36 | |
| 37 | var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay} |
| 38 | var backoffFunc = func(ctx context.Context, retries int) bool { |
| 39 | d := backoffStrategy.Backoff(retries) |
| 40 | timer := time.NewTimer(d) |
| 41 | select { |
| 42 | case <-timer.C: |
| 43 | return true |
| 44 | case <-ctx.Done(): |
| 45 | timer.Stop() |
| 46 | return false |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | func init() { |
| 51 | internal.HealthCheckFunc = clientHealthCheck |
| 52 | } |
| 53 | |
| 54 | func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error { |
| 55 | tryCnt := 0 |
| 56 | |
| 57 | retryConnection: |
| 58 | for { |
| 59 | // Backs off if the connection has failed in some way without receiving a message in the previous retry. |
| 60 | if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { |
| 61 | return nil |
| 62 | } |
| 63 | tryCnt++ |
| 64 | |
| 65 | if ctx.Err() != nil { |
| 66 | return nil |
| 67 | } |
| 68 | rawS, err := newStream() |
| 69 | if err != nil { |
| 70 | continue retryConnection |
| 71 | } |
| 72 | |
| 73 | s, ok := rawS.(grpc.ClientStream) |
| 74 | // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. |
| 75 | if !ok { |
| 76 | reportHealth(true) |
| 77 | return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) |
| 78 | } |
| 79 | |
| 80 | if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { |
| 81 | // Stream should have been closed, so we can safely continue to create a new stream. |
| 82 | continue retryConnection |
| 83 | } |
| 84 | s.CloseSend() |
| 85 | |
| 86 | resp := new(healthpb.HealthCheckResponse) |
| 87 | for { |
| 88 | err = s.RecvMsg(resp) |
| 89 | |
| 90 | // Reports healthy for the LBing purposes if health check is not implemented in the server. |
| 91 | if status.Code(err) == codes.Unimplemented { |
| 92 | reportHealth(true) |
| 93 | return err |
| 94 | } |
| 95 | |
| 96 | // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. |
| 97 | if err != nil { |
| 98 | reportHealth(false) |
| 99 | continue retryConnection |
| 100 | } |
| 101 | |
| 102 | // As a message has been received, removes the need for backoff for the next retry by reseting the try count. |
| 103 | tryCnt = 0 |
| 104 | reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING) |
| 105 | } |
| 106 | } |
| 107 | } |