VOL-2112 move to voltha-lib-go
Change-Id: Ic1af08003c1d2c698c0cce371e64f47b47b8d875
diff --git a/vendor/go.etcd.io/etcd/clientv3/README.md b/vendor/go.etcd.io/etcd/clientv3/README.md
index 0b2dc9a..6c6fe7c 100644
--- a/vendor/go.etcd.io/etcd/clientv3/README.md
+++ b/vendor/go.etcd.io/etcd/clientv3/README.md
@@ -1,12 +1,10 @@
# etcd/clientv3
-[![Docs](https://readthedocs.org/projects/etcd/badge/?version=latest&style=flat-square)](https://etcd.readthedocs.io/en/latest/?badge=latest)
+[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3.
-See https://etcd.readthedocs.io/en/latest for latest client architecture.
-
## Install
```bash
diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go
index 921f50f..c954f1b 100644
--- a/vendor/go.etcd.io/etcd/clientv3/auth.go
+++ b/vendor/go.etcd.io/etcd/clientv3/auth.go
@@ -52,6 +52,8 @@
PermReadWrite = authpb.READWRITE
)
+type UserAddOptions authpb.UserAddOptions
+
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
@@ -62,6 +64,9 @@
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
+ // UserAddWithOptions adds a new user to an etcd cluster with some options.
+ UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
+
// UserDelete deletes a user from an etcd cluster.
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
@@ -123,7 +128,12 @@
}
func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
- resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
+ resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
+ return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
+ resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
index 3c44e70..d02a7ee 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
@@ -12,24 +12,45 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package balancer implements client balancer.
package balancer
import (
- "fmt"
"strconv"
"sync"
"time"
+ "go.etcd.io/etcd/clientv3/balancer/connectivity"
"go.etcd.io/etcd/clientv3/balancer/picker"
"go.uber.org/zap"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
+ grpcconnectivity "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/resolver"
_ "google.golang.org/grpc/resolver/dns" // register DNS resolver
_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
)
+// Config defines balancer configurations.
+type Config struct {
+ // Policy configures balancer policy.
+ Policy picker.Policy
+
+ // Picker implements gRPC picker.
+ // Leave empty if "Policy" field is not custom.
+ // TODO: currently custom policy is not supported.
+ // Picker picker.Picker
+
+ // Name defines an additional name for balancer.
+ // Useful for balancer testing to avoid register conflicts.
+ // If empty, defaults to policy name.
+ Name string
+
+ // Logger configures balancer logging.
+ // If nil, logs are discarded.
+ Logger *zap.Logger
+}
+
// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
// must be invoked at initialization time.
func RegisterBuilder(cfg Config) {
@@ -54,24 +75,18 @@
bb := &baseBalancer{
id: strconv.FormatInt(time.Now().UnixNano(), 36),
policy: b.cfg.Policy,
- name: b.cfg.Policy.String(),
+ name: b.cfg.Name,
lg: b.cfg.Logger,
addrToSc: make(map[resolver.Address]balancer.SubConn),
scToAddr: make(map[balancer.SubConn]resolver.Address),
- scToSt: make(map[balancer.SubConn]connectivity.State),
+ scToSt: make(map[balancer.SubConn]grpcconnectivity.State),
- currentConn: nil,
- csEvltr: &connectivityStateEvaluator{},
+ currentConn: nil,
+ connectivityRecorder: connectivity.New(b.cfg.Logger),
// initialize picker always returns "ErrNoSubConnAvailable"
- Picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
- }
- if b.cfg.Name != "" {
- bb.name = b.cfg.Name
- }
- if bb.lg == nil {
- bb.lg = zap.NewNop()
+ picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
}
// TODO: support multiple connections
@@ -115,13 +130,12 @@
addrToSc map[resolver.Address]balancer.SubConn
scToAddr map[balancer.SubConn]resolver.Address
- scToSt map[balancer.SubConn]connectivity.State
+ scToSt map[balancer.SubConn]grpcconnectivity.State
- currentConn balancer.ClientConn
- currentState connectivity.State
- csEvltr *connectivityStateEvaluator
+ currentConn balancer.ClientConn
+ connectivityRecorder connectivity.Recorder
- picker.Picker
+ picker picker.Picker
}
// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
@@ -131,7 +145,11 @@
bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
return
}
- bb.lg.Info("resolved", zap.String("balancer-id", bb.id), zap.Strings("addresses", addrsToStrings(addrs)))
+ bb.lg.Info("resolved",
+ zap.String("picker", bb.picker.String()),
+ zap.String("balancer-id", bb.id),
+ zap.Strings("addresses", addrsToStrings(addrs)),
+ )
bb.mu.Lock()
defer bb.mu.Unlock()
@@ -142,12 +160,13 @@
if _, ok := bb.addrToSc[addr]; !ok {
sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
if err != nil {
- bb.lg.Warn("NewSubConn failed", zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
+ bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
continue
}
+ bb.lg.Info("created subconn", zap.String("address", addr.Addr))
bb.addrToSc[addr] = sc
bb.scToAddr[sc] = addr
- bb.scToSt[sc] = connectivity.Idle
+ bb.scToSt[sc] = grpcconnectivity.Idle
sc.Connect()
}
}
@@ -160,6 +179,7 @@
bb.lg.Info(
"removed subconn",
+ zap.String("picker", bb.picker.String()),
zap.String("balancer-id", bb.id),
zap.String("address", addr.Addr),
zap.String("subconn", scToString(sc)),
@@ -174,7 +194,7 @@
}
// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
-func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
bb.mu.Lock()
defer bb.mu.Unlock()
@@ -182,8 +202,10 @@
if !ok {
bb.lg.Warn(
"state change for an unknown subconn",
+ zap.String("picker", bb.picker.String()),
zap.String("balancer-id", bb.id),
zap.String("subconn", scToString(sc)),
+ zap.Int("subconn-size", len(bb.scToAddr)),
zap.String("state", s.String()),
)
return
@@ -191,9 +213,11 @@
bb.lg.Info(
"state changed",
+ zap.String("picker", bb.picker.String()),
zap.String("balancer-id", bb.id),
- zap.Bool("connected", s == connectivity.Ready),
+ zap.Bool("connected", s == grpcconnectivity.Ready),
zap.String("subconn", scToString(sc)),
+ zap.Int("subconn-size", len(bb.scToAddr)),
zap.String("address", bb.scToAddr[sc].Addr),
zap.String("old-state", old.String()),
zap.String("new-state", s.String()),
@@ -201,69 +225,63 @@
bb.scToSt[sc] = s
switch s {
- case connectivity.Idle:
+ case grpcconnectivity.Idle:
sc.Connect()
- case connectivity.Shutdown:
+ case grpcconnectivity.Shutdown:
// When an address was removed by resolver, b called RemoveSubConn but
// kept the sc's state in scToSt. Remove state for this sc here.
delete(bb.scToAddr, sc)
delete(bb.scToSt, sc)
}
- oldAggrState := bb.currentState
- bb.currentState = bb.csEvltr.recordTransition(old, s)
+ oldAggrState := bb.connectivityRecorder.GetCurrentState()
+ bb.connectivityRecorder.RecordTransition(old, s)
- // Regenerate picker when one of the following happens:
+ // Update balancer picker when one of the following happens:
// - this sc became ready from not-ready
// - this sc became not-ready from ready
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
- if (s == connectivity.Ready) != (old == connectivity.Ready) ||
- (bb.currentState == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
- bb.regeneratePicker()
+ if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
+ (bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
+ bb.updatePicker()
}
- bb.currentConn.UpdateBalancerState(bb.currentState, bb.Picker)
- return
+ bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
}
-func (bb *baseBalancer) regeneratePicker() {
- if bb.currentState == connectivity.TransientFailure {
+func (bb *baseBalancer) updatePicker() {
+ if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
+ bb.picker = picker.NewErr(balancer.ErrTransientFailure)
bb.lg.Info(
- "generated transient error picker",
+ "updated picker to transient error picker",
+ zap.String("picker", bb.picker.String()),
zap.String("balancer-id", bb.id),
zap.String("policy", bb.policy.String()),
)
- bb.Picker = picker.NewErr(balancer.ErrTransientFailure)
return
}
// only pass ready subconns to picker
- scs := make([]balancer.SubConn, 0)
- addrToSc := make(map[resolver.Address]balancer.SubConn)
scToAddr := make(map[balancer.SubConn]resolver.Address)
for addr, sc := range bb.addrToSc {
- if st, ok := bb.scToSt[sc]; ok && st == connectivity.Ready {
- scs = append(scs, sc)
- addrToSc[addr] = sc
+ if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
scToAddr[sc] = addr
}
}
- switch bb.policy {
- case picker.RoundrobinBalanced:
- bb.Picker = picker.NewRoundrobinBalanced(bb.lg, scs, addrToSc, scToAddr)
-
- default:
- panic(fmt.Errorf("invalid balancer picker policy (%d)", bb.policy))
- }
-
+ bb.picker = picker.New(picker.Config{
+ Policy: bb.policy,
+ Logger: bb.lg,
+ SubConnToResolverAddress: scToAddr,
+ })
bb.lg.Info(
- "generated picker",
+ "updated picker",
+ zap.String("picker", bb.picker.String()),
zap.String("balancer-id", bb.id),
zap.String("policy", bb.policy.String()),
- zap.Strings("subconn-ready", scsToStrings(addrToSc)),
- zap.Int("subconn-size", len(addrToSc)),
+ zap.Strings("subconn-ready", scsToStrings(scToAddr)),
+ zap.Int("subconn-size", len(scToAddr)),
)
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/config.go b/vendor/go.etcd.io/etcd/clientv3/balancer/config.go
deleted file mode 100644
index 0339a84..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/config.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package balancer
-
-import (
- "go.etcd.io/etcd/clientv3/balancer/picker"
-
- "go.uber.org/zap"
-)
-
-// Config defines balancer configurations.
-type Config struct {
- // Policy configures balancer policy.
- Policy picker.Policy
-
- // Name defines an additional name for balancer.
- // Useful for balancer testing to avoid register conflicts.
- // If empty, defaults to policy name.
- Name string
-
- // Logger configures balancer logging.
- // If nil, logs are discarded.
- Logger *zap.Logger
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity.go
deleted file mode 100644
index 6cdeb3f..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package balancer
-
-import "google.golang.org/grpc/connectivity"
-
-// connectivityStateEvaluator gets updated by addrConns when their
-// states transition, based on which it evaluates the state of
-// ClientConn.
-type connectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// recordTransition records state change happening in every subConn and based on
-// that it evaluates what aggregated state should be.
-// It can only transition between Ready, Connecting and TransientFailure. Other states,
-// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
-// before any subConn is created ClientConn is in idle state. In the end when ClientConn
-// closes it is in Shutdown state.
-//
-// recordTransition should only be called synchronously from the same goroutine.
-func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
- // Update counters.
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- cse.numReady += updateVal
- case connectivity.Connecting:
- cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
- }
- }
-
- // Evaluate.
- if cse.numReady > 0 {
- return connectivity.Ready
- }
- if cse.numConnecting > 0 {
- return connectivity.Connecting
- }
- return connectivity.TransientFailure
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go
new file mode 100644
index 0000000..4c4ad36
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package connectivity implements client connectivity operations.
+package connectivity
+
+import (
+ "sync"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc/connectivity"
+)
+
+// Recorder records gRPC connectivity.
+type Recorder interface {
+ GetCurrentState() connectivity.State
+ RecordTransition(oldState, newState connectivity.State)
+}
+
+// New returns a new Recorder.
+func New(lg *zap.Logger) Recorder {
+ return &recorder{lg: lg}
+}
+
+// recorder takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+type recorder struct {
+ lg *zap.Logger
+
+ mu sync.RWMutex
+
+ cur connectivity.State
+
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+func (rc *recorder) GetCurrentState() (state connectivity.State) {
+ rc.mu.RLock()
+ defer rc.mu.RUnlock()
+ return rc.cur
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+// - If at least one SubConn in Ready, the aggregated state is Ready;
+// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+// - Else the aggregated state is TransientFailure.
+//
+// Idle and Shutdown are not considered.
+//
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
+ rc.mu.Lock()
+ defer rc.mu.Unlock()
+
+ for idx, state := range []connectivity.State{oldState, newState} {
+ updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+ switch state {
+ case connectivity.Ready:
+ rc.numReady += updateVal
+ case connectivity.Connecting:
+ rc.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ rc.numTransientFailure += updateVal
+ default:
+ rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
+ }
+ }
+
+ switch { // must be exclusive, no overlap
+ case rc.numReady > 0:
+ rc.cur = connectivity.Ready
+ case rc.numConnecting > 0:
+ rc.cur = connectivity.Connecting
+ default:
+ rc.cur = connectivity.TransientFailure
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/doc.go
deleted file mode 100644
index 45af5e9..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package balancer implements client balancer.
-package balancer
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/grpc1.7-health.go b/vendor/go.etcd.io/etcd/clientv3/balancer/grpc1.7-health.go
deleted file mode 100644
index 2153767..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/grpc1.7-health.go
+++ /dev/null
@@ -1,657 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package balancer
-
-import (
- "context"
- "errors"
- "io/ioutil"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- healthpb "google.golang.org/grpc/health/grpc_health_v1"
- "google.golang.org/grpc/status"
-)
-
-// TODO: replace with something better
-var lg = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
-
-const (
- minHealthRetryDuration = 3 * time.Second
- unknownService = "unknown service grpc.health.v1.Health"
-)
-
-// ErrNoAddrAvailable is returned by Get() when the balancer does not have
-// any active connection to endpoints at the time.
-// This error is returned only when opts.BlockingWait is true.
-var ErrNoAddrAvailable = status.Error(codes.Unavailable, "there is no address available")
-
-type NotifyMsg int
-
-const (
- NotifyReset NotifyMsg = iota
- NotifyNext
-)
-
-// GRPC17Health does the bare minimum to expose multiple eps
-// to the grpc reconnection code path
-type GRPC17Health struct {
- // addrs are the client's endpoint addresses for grpc
- addrs []grpc.Address
-
- // eps holds the raw endpoints from the client
- eps []string
-
- // notifyCh notifies grpc of the set of addresses for connecting
- notifyCh chan []grpc.Address
-
- // readyc closes once the first connection is up
- readyc chan struct{}
- readyOnce sync.Once
-
- // healthCheck checks an endpoint's health.
- healthCheck func(ep string) (bool, error)
- healthCheckTimeout time.Duration
-
- unhealthyMu sync.RWMutex
- unhealthyHostPorts map[string]time.Time
-
- // mu protects all fields below.
- mu sync.RWMutex
-
- // upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
- upc chan struct{}
-
- // downc closes when grpc calls down() on pinAddr
- downc chan struct{}
-
- // stopc is closed to signal updateNotifyLoop should stop.
- stopc chan struct{}
- stopOnce sync.Once
- wg sync.WaitGroup
-
- // donec closes when all goroutines are exited
- donec chan struct{}
-
- // updateAddrsC notifies updateNotifyLoop to update addrs.
- updateAddrsC chan NotifyMsg
-
- // grpc issues TLS cert checks using the string passed into dial so
- // that string must be the host. To recover the full scheme://host URL,
- // have a map from hosts to the original endpoint.
- hostPort2ep map[string]string
-
- // pinAddr is the currently pinned address; set to the empty string on
- // initialization and shutdown.
- pinAddr string
-
- closed bool
-}
-
-// DialFunc defines gRPC dial function.
-type DialFunc func(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error)
-
-// NewGRPC17Health returns a new health balancer with gRPC v1.7.
-func NewGRPC17Health(
- eps []string,
- timeout time.Duration,
- dialFunc DialFunc,
-) *GRPC17Health {
- notifyCh := make(chan []grpc.Address)
- addrs := eps2addrs(eps)
- hb := &GRPC17Health{
- addrs: addrs,
- eps: eps,
- notifyCh: notifyCh,
- readyc: make(chan struct{}),
- healthCheck: func(ep string) (bool, error) { return grpcHealthCheck(ep, dialFunc) },
- unhealthyHostPorts: make(map[string]time.Time),
- upc: make(chan struct{}),
- stopc: make(chan struct{}),
- downc: make(chan struct{}),
- donec: make(chan struct{}),
- updateAddrsC: make(chan NotifyMsg),
- hostPort2ep: getHostPort2ep(eps),
- }
- if timeout < minHealthRetryDuration {
- timeout = minHealthRetryDuration
- }
- hb.healthCheckTimeout = timeout
-
- close(hb.downc)
- go hb.updateNotifyLoop()
- hb.wg.Add(1)
- go func() {
- defer hb.wg.Done()
- hb.updateUnhealthy()
- }()
- return hb
-}
-
-func (b *GRPC17Health) Start(target string, config grpc.BalancerConfig) error { return nil }
-
-func (b *GRPC17Health) ConnectNotify() <-chan struct{} {
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.upc
-}
-
-func (b *GRPC17Health) UpdateAddrsC() chan NotifyMsg { return b.updateAddrsC }
-func (b *GRPC17Health) StopC() chan struct{} { return b.stopc }
-
-func (b *GRPC17Health) Ready() <-chan struct{} { return b.readyc }
-
-func (b *GRPC17Health) Endpoint(hostPort string) string {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.hostPort2ep[hostPort]
-}
-
-func (b *GRPC17Health) Pinned() string {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.pinAddr
-}
-
-func (b *GRPC17Health) HostPortError(hostPort string, err error) {
- if b.Endpoint(hostPort) == "" {
- lg.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
- return
- }
-
- b.unhealthyMu.Lock()
- b.unhealthyHostPorts[hostPort] = time.Now()
- b.unhealthyMu.Unlock()
- lg.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
-}
-
-func (b *GRPC17Health) removeUnhealthy(hostPort, msg string) {
- if b.Endpoint(hostPort) == "" {
- lg.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
- return
- }
-
- b.unhealthyMu.Lock()
- delete(b.unhealthyHostPorts, hostPort)
- b.unhealthyMu.Unlock()
- lg.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
-}
-
-func (b *GRPC17Health) countUnhealthy() (count int) {
- b.unhealthyMu.RLock()
- count = len(b.unhealthyHostPorts)
- b.unhealthyMu.RUnlock()
- return count
-}
-
-func (b *GRPC17Health) isUnhealthy(hostPort string) (unhealthy bool) {
- b.unhealthyMu.RLock()
- _, unhealthy = b.unhealthyHostPorts[hostPort]
- b.unhealthyMu.RUnlock()
- return unhealthy
-}
-
-func (b *GRPC17Health) cleanupUnhealthy() {
- b.unhealthyMu.Lock()
- for k, v := range b.unhealthyHostPorts {
- if time.Since(v) > b.healthCheckTimeout {
- delete(b.unhealthyHostPorts, k)
- lg.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
- }
- }
- b.unhealthyMu.Unlock()
-}
-
-func (b *GRPC17Health) liveAddrs() ([]grpc.Address, map[string]struct{}) {
- unhealthyCnt := b.countUnhealthy()
-
- b.mu.RLock()
- defer b.mu.RUnlock()
-
- hbAddrs := b.addrs
- if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
- liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
- for k := range b.hostPort2ep {
- liveHostPorts[k] = struct{}{}
- }
- return hbAddrs, liveHostPorts
- }
-
- addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
- liveHostPorts := make(map[string]struct{}, len(addrs))
- for _, addr := range b.addrs {
- if !b.isUnhealthy(addr.Addr) {
- addrs = append(addrs, addr)
- liveHostPorts[addr.Addr] = struct{}{}
- }
- }
- return addrs, liveHostPorts
-}
-
-func (b *GRPC17Health) updateUnhealthy() {
- for {
- select {
- case <-time.After(b.healthCheckTimeout):
- b.cleanupUnhealthy()
- pinned := b.Pinned()
- if pinned == "" || b.isUnhealthy(pinned) {
- select {
- case b.updateAddrsC <- NotifyNext:
- case <-b.stopc:
- return
- }
- }
- case <-b.stopc:
- return
- }
- }
-}
-
-// NeedUpdate returns true if all connections are down or
-// addresses do not include current pinned address.
-func (b *GRPC17Health) NeedUpdate() bool {
- // updating notifyCh can trigger new connections,
- // need update addrs if all connections are down
- // or addrs does not include pinAddr.
- b.mu.RLock()
- update := !hasAddr(b.addrs, b.pinAddr)
- b.mu.RUnlock()
- return update
-}
-
-func (b *GRPC17Health) UpdateAddrs(eps ...string) {
- np := getHostPort2ep(eps)
-
- b.mu.Lock()
- defer b.mu.Unlock()
-
- match := len(np) == len(b.hostPort2ep)
- if match {
- for k, v := range np {
- if b.hostPort2ep[k] != v {
- match = false
- break
- }
- }
- }
- if match {
- // same endpoints, so no need to update address
- return
- }
-
- b.hostPort2ep = np
- b.addrs, b.eps = eps2addrs(eps), eps
-
- b.unhealthyMu.Lock()
- b.unhealthyHostPorts = make(map[string]time.Time)
- b.unhealthyMu.Unlock()
-}
-
-func (b *GRPC17Health) Next() {
- b.mu.RLock()
- downc := b.downc
- b.mu.RUnlock()
- select {
- case b.updateAddrsC <- NotifyNext:
- case <-b.stopc:
- }
- // wait until disconnect so new RPCs are not issued on old connection
- select {
- case <-downc:
- case <-b.stopc:
- }
-}
-
-func (b *GRPC17Health) updateNotifyLoop() {
- defer close(b.donec)
-
- for {
- b.mu.RLock()
- upc, downc, addr := b.upc, b.downc, b.pinAddr
- b.mu.RUnlock()
- // downc or upc should be closed
- select {
- case <-downc:
- downc = nil
- default:
- }
- select {
- case <-upc:
- upc = nil
- default:
- }
- switch {
- case downc == nil && upc == nil:
- // stale
- select {
- case <-b.stopc:
- return
- default:
- }
- case downc == nil:
- b.notifyAddrs(NotifyReset)
- select {
- case <-upc:
- case msg := <-b.updateAddrsC:
- b.notifyAddrs(msg)
- case <-b.stopc:
- return
- }
- case upc == nil:
- select {
- // close connections that are not the pinned address
- case b.notifyCh <- []grpc.Address{{Addr: addr}}:
- case <-downc:
- case <-b.stopc:
- return
- }
- select {
- case <-downc:
- b.notifyAddrs(NotifyReset)
- case msg := <-b.updateAddrsC:
- b.notifyAddrs(msg)
- case <-b.stopc:
- return
- }
- }
- }
-}
-
-func (b *GRPC17Health) notifyAddrs(msg NotifyMsg) {
- if msg == NotifyNext {
- select {
- case b.notifyCh <- []grpc.Address{}:
- case <-b.stopc:
- return
- }
- }
- b.mu.RLock()
- pinAddr := b.pinAddr
- downc := b.downc
- b.mu.RUnlock()
- addrs, hostPorts := b.liveAddrs()
-
- var waitDown bool
- if pinAddr != "" {
- _, ok := hostPorts[pinAddr]
- waitDown = !ok
- }
-
- select {
- case b.notifyCh <- addrs:
- if waitDown {
- select {
- case <-downc:
- case <-b.stopc:
- }
- }
- case <-b.stopc:
- }
-}
-
-func (b *GRPC17Health) Up(addr grpc.Address) func(error) {
- if !b.mayPin(addr) {
- return func(err error) {}
- }
-
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // gRPC might call Up after it called Close. We add this check
- // to "fix" it up at application layer. Otherwise, will panic
- // if b.upc is already closed.
- if b.closed {
- return func(err error) {}
- }
-
- // gRPC might call Up on a stale address.
- // Prevent updating pinAddr with a stale address.
- if !hasAddr(b.addrs, addr.Addr) {
- return func(err error) {}
- }
-
- if b.pinAddr != "" {
- lg.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
- return func(err error) {}
- }
-
- // notify waiting Get()s and pin first connected address
- close(b.upc)
- b.downc = make(chan struct{})
- b.pinAddr = addr.Addr
- lg.Infof("clientv3/balancer: pin %q", addr.Addr)
-
- // notify client that a connection is up
- b.readyOnce.Do(func() { close(b.readyc) })
-
- return func(err error) {
- // If connected to a black hole endpoint or a killed server, the gRPC ping
- // timeout will induce a network I/O error, and retrying until success;
- // finding healthy endpoint on retry could take several timeouts and redials.
- // To avoid wasting retries, gray-list unhealthy endpoints.
- b.HostPortError(addr.Addr, err)
-
- b.mu.Lock()
- b.upc = make(chan struct{})
- close(b.downc)
- b.pinAddr = ""
- b.mu.Unlock()
- lg.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
- }
-}
-
-func (b *GRPC17Health) mayPin(addr grpc.Address) bool {
- if b.Endpoint(addr.Addr) == "" { // stale host:port
- return false
- }
-
- b.unhealthyMu.RLock()
- unhealthyCnt := len(b.unhealthyHostPorts)
- failedTime, bad := b.unhealthyHostPorts[addr.Addr]
- b.unhealthyMu.RUnlock()
-
- b.mu.RLock()
- skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
- b.mu.RUnlock()
- if skip || !bad {
- return true
- }
-
- // prevent isolated member's endpoint from being infinitely retried, as follows:
- // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
- // 2. balancer 'Up' unpins with grpc: failed with network I/O error
- // 3. grpc-healthcheck still SERVING, thus retry to pin
- // instead, return before grpc-healthcheck if failed within healthcheck timeout
- if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
- lg.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
- return false
- }
-
- if ok, _ := b.healthCheck(addr.Addr); ok {
- b.removeUnhealthy(addr.Addr, "health check success")
- return true
- }
-
- b.HostPortError(addr.Addr, errors.New("health check failed"))
- return false
-}
-
-func (b *GRPC17Health) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
- var (
- addr string
- closed bool
- )
-
- // If opts.BlockingWait is false (for fail-fast RPCs), it should return
- // an address it has notified via Notify immediately instead of blocking.
- if !opts.BlockingWait {
- b.mu.RLock()
- closed = b.closed
- addr = b.pinAddr
- b.mu.RUnlock()
- if closed {
- return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
- }
- if addr == "" {
- return grpc.Address{Addr: ""}, nil, ErrNoAddrAvailable
- }
- return grpc.Address{Addr: addr}, func() {}, nil
- }
-
- for {
- b.mu.RLock()
- ch := b.upc
- b.mu.RUnlock()
- select {
- case <-ch:
- case <-b.donec:
- return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
- case <-ctx.Done():
- return grpc.Address{Addr: ""}, nil, ctx.Err()
- }
- b.mu.RLock()
- closed = b.closed
- addr = b.pinAddr
- b.mu.RUnlock()
- // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
- if closed {
- return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
- }
- if addr != "" {
- break
- }
- }
- return grpc.Address{Addr: addr}, func() {}, nil
-}
-
-func (b *GRPC17Health) Notify() <-chan []grpc.Address { return b.notifyCh }
-
-func (b *GRPC17Health) Close() error {
- b.mu.Lock()
- // In case gRPC calls close twice. TODO: remove the checking
- // when we are sure that gRPC wont call close twice.
- if b.closed {
- b.mu.Unlock()
- <-b.donec
- return nil
- }
- b.closed = true
- b.stopOnce.Do(func() { close(b.stopc) })
- b.pinAddr = ""
-
- // In the case of following scenario:
- // 1. upc is not closed; no pinned address
- // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
- // 3. client.conn.Close() calls balancer.Close(); closed = true
- // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
- // we must close upc so Get() exits from blocking on upc
- select {
- case <-b.upc:
- default:
- // terminate all waiting Get()s
- close(b.upc)
- }
-
- b.mu.Unlock()
- b.wg.Wait()
-
- // wait for updateNotifyLoop to finish
- <-b.donec
- close(b.notifyCh)
-
- return nil
-}
-
-func grpcHealthCheck(ep string, dialFunc func(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error)) (bool, error) {
- conn, err := dialFunc(ep)
- if err != nil {
- return false, err
- }
- defer conn.Close()
- cli := healthpb.NewHealthClient(conn)
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
- cancel()
- if err != nil {
- if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
- if s.Message() == unknownService { // etcd < v3.3.0
- return true, nil
- }
- }
- return false, err
- }
- return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
-}
-
-func hasAddr(addrs []grpc.Address, targetAddr string) bool {
- for _, addr := range addrs {
- if targetAddr == addr.Addr {
- return true
- }
- }
- return false
-}
-
-func getHost(ep string) string {
- url, uerr := url.Parse(ep)
- if uerr != nil || !strings.Contains(ep, "://") {
- return ep
- }
- return url.Host
-}
-
-func eps2addrs(eps []string) []grpc.Address {
- addrs := make([]grpc.Address, len(eps))
- for i := range eps {
- addrs[i].Addr = getHost(eps[i])
- }
- return addrs
-}
-
-func getHostPort2ep(eps []string) map[string]string {
- hm := make(map[string]string, len(eps))
- for i := range eps {
- _, host, _ := parseEndpoint(eps[i])
- hm[host] = eps[i]
- }
- return hm
-}
-
-func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
- proto = "tcp"
- host = endpoint
- url, uerr := url.Parse(endpoint)
- if uerr != nil || !strings.Contains(endpoint, "://") {
- return proto, host, scheme
- }
- scheme = url.Scheme
-
- // strip scheme:// prefix since grpc dials by host
- host = url.Host
- switch url.Scheme {
- case "http", "https":
- case "unix", "unixs":
- proto = "unix"
- host = url.Host + url.Path
- default:
- proto, host = "", ""
- }
- return proto, host, scheme
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
index c70ce15..9e04378 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
@@ -22,13 +22,18 @@
// NewErr returns a picker that always returns err on "Pick".
func NewErr(err error) Picker {
- return &errPicker{err: err}
+ return &errPicker{p: Error, err: err}
}
type errPicker struct {
+ p Policy
err error
}
-func (p *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- return nil, nil, p.err
+func (ep *errPicker) String() string {
+ return ep.p.String()
+}
+
+func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ return nil, nil, ep.err
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
index 7ea761b..bd1a5d2 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
@@ -15,10 +15,77 @@
package picker
import (
+ "fmt"
+
+ "go.uber.org/zap"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/resolver"
)
// Picker defines balancer Picker methods.
type Picker interface {
balancer.Picker
+ String() string
+}
+
+// Config defines picker configuration.
+type Config struct {
+ // Policy specifies etcd clientv3's built in balancer policy.
+ Policy Policy
+
+ // Logger defines picker logging object.
+ Logger *zap.Logger
+
+ // SubConnToResolverAddress maps each gRPC sub-connection to an address.
+ // Basically, it is a list of addresses that the Picker can pick from.
+ SubConnToResolverAddress map[balancer.SubConn]resolver.Address
+}
+
+// Policy defines balancer picker policy.
+type Policy uint8
+
+const (
+ // Error is error picker policy.
+ Error Policy = iota
+
+ // RoundrobinBalanced balances loads over multiple endpoints
+ // and implements failover in roundrobin fashion.
+ RoundrobinBalanced
+
+ // Custom defines custom balancer picker.
+ // TODO: custom picker is not supported yet.
+ Custom
+)
+
+func (p Policy) String() string {
+ switch p {
+ case Error:
+ return "picker-error"
+
+ case RoundrobinBalanced:
+ return "picker-roundrobin-balanced"
+
+ case Custom:
+ panic("'custom' picker policy is not supported yet")
+
+ default:
+ panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
+ }
+}
+
+// New creates a new Picker.
+func New(cfg Config) Picker {
+ switch cfg.Policy {
+ case Error:
+ panic("'error' picker policy is not supported here; use 'picker.NewErr'")
+
+ case RoundrobinBalanced:
+ return newRoundrobinBalanced(cfg)
+
+ case Custom:
+ panic("'custom' picker policy is not supported yet")
+
+ default:
+ panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
+ }
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker_policy.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker_policy.go
deleted file mode 100644
index 463ddc2..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker_policy.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import "fmt"
-
-// Policy defines balancer picker policy.
-type Policy uint8
-
-const (
- // TODO: custom picker is not supported yet.
- // custom defines custom balancer picker.
- custom Policy = iota
-
- // RoundrobinBalanced balance loads over multiple endpoints
- // and implements failover in roundrobin fashion.
- RoundrobinBalanced Policy = iota
-
- // TODO: only send loads to pinned address "RoundrobinFailover"
- // just like how 3.3 client works
- //
- // TODO: priotize leader
- // TODO: health-check
- // TODO: weighted roundrobin
- // TODO: power of two random choice
-)
-
-func (p Policy) String() string {
- switch p {
- case custom:
- panic("'custom' picker policy is not supported yet")
- case RoundrobinBalanced:
- return "etcd-client-roundrobin-balanced"
- default:
- panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
- }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
index b043d57..1b8b285 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
@@ -24,32 +24,33 @@
"google.golang.org/grpc/resolver"
)
-// NewRoundrobinBalanced returns a new roundrobin balanced picker.
-func NewRoundrobinBalanced(
- lg *zap.Logger,
- scs []balancer.SubConn,
- addrToSc map[resolver.Address]balancer.SubConn,
- scToAddr map[balancer.SubConn]resolver.Address,
-) Picker {
+// newRoundrobinBalanced returns a new roundrobin balanced picker.
+func newRoundrobinBalanced(cfg Config) Picker {
+ scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
+ for sc := range cfg.SubConnToResolverAddress {
+ scs = append(scs, sc)
+ }
return &rrBalanced{
- lg: lg,
+ p: RoundrobinBalanced,
+ lg: cfg.Logger,
scs: scs,
- addrToSc: addrToSc,
- scToAddr: scToAddr,
+ scToAddr: cfg.SubConnToResolverAddress,
}
}
type rrBalanced struct {
+ p Policy
+
lg *zap.Logger
- mu sync.RWMutex
- next int
- scs []balancer.SubConn
-
- addrToSc map[resolver.Address]balancer.SubConn
+ mu sync.RWMutex
+ next int
+ scs []balancer.SubConn
scToAddr map[balancer.SubConn]resolver.Address
}
+func (rb *rrBalanced) String() string { return rb.p.String() }
+
// Pick is called for every client request.
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
rb.mu.RLock()
@@ -68,6 +69,7 @@
rb.lg.Debug(
"picked",
+ zap.String("picker", rb.p.String()),
zap.String("address", picked),
zap.Int("subconn-index", cur),
zap.Int("subconn-size", n),
@@ -77,6 +79,7 @@
// TODO: error handling?
fss := []zapcore.Field{
zap.Error(info.Err),
+ zap.String("picker", rb.p.String()),
zap.String("address", picked),
zap.Bool("success", info.Err == nil),
zap.Bool("bytes-sent", info.BytesSent),
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
index a11faeb..48eb875 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
@@ -29,9 +29,9 @@
return fmt.Sprintf("%p", sc)
}
-func scsToStrings(scs map[resolver.Address]balancer.SubConn) (ss []string) {
+func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
ss = make([]string, 0, len(scs))
- for a, sc := range scs {
+ for sc, a := range scs {
ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
}
sort.Strings(ss)
diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go
index 445ecfe..d6000a8 100644
--- a/vendor/go.etcd.io/etcd/clientv3/client.go
+++ b/vendor/go.etcd.io/etcd/clientv3/client.go
@@ -16,7 +16,6 @@
import (
"context"
- "crypto/tls"
"errors"
"fmt"
"net"
@@ -30,12 +29,13 @@
"go.etcd.io/etcd/clientv3/balancer"
"go.etcd.io/etcd/clientv3/balancer/picker"
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
+ "go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/logutil"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
+ grpccredentials "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
@@ -51,12 +51,17 @@
func init() {
lg := zap.NewNop()
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
+ lcfg := logutil.DefaultZapLoggerConfig
+ lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+
var err error
- lg, err = zap.NewProductionConfig().Build() // info level logging
+ lg, err = lcfg.Build() // info level logging
if err != nil {
panic(err)
}
}
+
+ // TODO: support custom balancer
balancer.RegisterBuilder(balancer.Config{
Policy: picker.RoundrobinBalanced,
Name: roundRobinBalancerName,
@@ -76,10 +81,9 @@
conn *grpc.ClientConn
cfg Config
- creds *credentials.TransportCredentials
- balancer balancer.Balancer
+ creds grpccredentials.TransportCredentials
resolverGroup *endpoint.ResolverGroup
- mu *sync.Mutex
+ mu *sync.RWMutex
ctx context.Context
cancel context.CancelFunc
@@ -87,9 +91,8 @@
// Username is a user name for authentication.
Username string
// Password is a password for authentication.
- Password string
- // tokenCred is an instance of WithPerRPCCredentials()'s argument
- tokenCred *authTokenCredential
+ Password string
+ authTokenBundle credentials.Bundle
callOpts []grpc.CallOption
@@ -126,8 +129,12 @@
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
- c.Watcher.Close()
- c.Lease.Close()
+ if c.Watcher != nil {
+ c.Watcher.Close()
+ }
+ if c.Lease != nil {
+ c.Lease.Close()
+ }
if c.resolverGroup != nil {
c.resolverGroup.Close()
}
@@ -143,11 +150,13 @@
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() (eps []string) {
+func (c *Client) Endpoints() []string {
// copy the slice; protect original endpoints from being changed
- eps = make([]string, len(c.cfg.Endpoints))
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ eps := make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
- return
+ return eps
}
// SetEndpoints updates client's endpoints.
@@ -192,24 +201,7 @@
}
}
-type authTokenCredential struct {
- token string
- tokenMu *sync.RWMutex
-}
-
-func (cred authTokenCredential) RequireTransportSecurity() bool {
- return false
-}
-
-func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
- cred.tokenMu.RLock()
- defer cred.tokenMu.RUnlock()
- return map[string]string{
- rpctypes.TokenFieldNameGRPC: cred.token,
- }, nil
-}
-
-func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
+func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
@@ -219,9 +211,7 @@
if creds != nil {
break
}
- tlsconfig := &tls.Config{}
- emptyCreds := credentials.NewTLS(tlsconfig)
- creds = &emptyCreds
+ creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
default:
creds = nil
}
@@ -229,7 +219,7 @@
}
// dialSetupOpts gives the dial opts prior to any authentication.
-func (c *Client) dialSetupOpts(creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
+func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
@@ -254,7 +244,7 @@
opts = append(opts, grpc.WithDialer(f))
if creds != nil {
- opts = append(opts, grpc.WithTransportCredentials(*creds))
+ opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
@@ -288,8 +278,8 @@
var err error // return last error in a case of fail
var auth *authenticator
- for i := 0; i < len(c.cfg.Endpoints); i++ {
- ep := c.cfg.Endpoints[i]
+ eps := c.Endpoints()
+ for _, ep := range eps {
// use dial options without dopts to avoid reusing the client balancer
var dOpts []grpc.DialOption
_, host, _ := endpoint.ParseEndpoint(ep)
@@ -317,10 +307,7 @@
continue
}
- c.tokenCred.tokenMu.Lock()
- c.tokenCred.token = resp.Token
- c.tokenCred.tokenMu.Unlock()
-
+ c.authTokenBundle.UpdateAuthToken(resp.Token)
return nil
}
@@ -337,16 +324,14 @@
}
// dial configures and dials any grpc balancer target.
-func (c *Client) dial(target string, creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts, err := c.dialSetupOpts(creds, dopts...)
if err != nil {
return nil, fmt.Errorf("failed to configure dialer: %v", err)
}
if c.Username != "" && c.Password != "" {
- c.tokenCred = &authTokenCredential{
- tokenMu: &sync.RWMutex{},
- }
+ c.authTokenBundle = credentials.NewBundle(credentials.Config{})
ctx, cancel := c.ctx, func() {}
if c.cfg.DialTimeout > 0 {
@@ -363,7 +348,7 @@
return nil, err
}
} else {
- opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
+ opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
}
cancel()
}
@@ -384,26 +369,25 @@
return conn, nil
}
-func (c *Client) directDialCreds(ep string) *credentials.TransportCredentials {
+func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials {
_, hostPort, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
if creds != nil {
- c := *creds
- clone := c.Clone()
+ clone := creds.Clone()
// Set the server name must to the endpoint hostname without port since grpc
// otherwise attempts to check if x509 cert is valid for the full endpoint
// including the scheme and port, which fails.
host, _ := endpoint.ParseHostPort(hostPort)
clone.OverrideServerName(host)
- creds = &clone
+ creds = clone
}
}
return creds
}
-func (c *Client) dialWithBalancerCreds(ep string) *credentials.TransportCredentials {
+func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
_, _, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
@@ -423,10 +407,9 @@
if cfg == nil {
cfg = &Config{}
}
- var creds *credentials.TransportCredentials
+ var creds grpccredentials.TransportCredentials
if cfg.TLS != nil {
- c := credentials.NewTLS(cfg.TLS)
- creds = &c
+ creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
}
// use a temporary skeleton client to bootstrap first connection
@@ -442,7 +425,7 @@
creds: creds,
ctx: ctx,
cancel: cancel,
- mu: new(sync.Mutex),
+ mu: new(sync.RWMutex),
callOpts: defaultCallOpts,
}
@@ -540,13 +523,17 @@
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
- errc := make(chan error, len(c.cfg.Endpoints))
+
+ eps := c.Endpoints()
+ errc := make(chan error, len(eps))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
+ cancel()
+ ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
}
- wg.Add(len(c.cfg.Endpoints))
- for _, ep := range c.cfg.Endpoints {
+
+ wg.Add(len(eps))
+ for _, ep := range eps {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
@@ -558,8 +545,15 @@
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
- maj, _ = strconv.Atoi(vs[0])
- min, rerr = strconv.Atoi(vs[1])
+ var serr error
+ if maj, serr = strconv.Atoi(vs[0]); serr != nil {
+ errc <- serr
+ return
+ }
+ if min, serr = strconv.Atoi(vs[1]); serr != nil {
+ errc <- serr
+ return
+ }
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
@@ -568,7 +562,7 @@
}(ep)
}
// wait for success
- for i := 0; i < len(c.cfg.Endpoints); i++ {
+ for range eps {
if err = <-errc; err == nil {
break
}
@@ -608,10 +602,13 @@
if err == nil {
return false
}
- ev, _ := status.FromError(err)
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- return ev.Code() == codes.Unavailable
+ ev, ok := status.FromError(err)
+ if ok {
+ // Unavailable codes mean the system will be right back.
+ // (e.g., can't connect, lost leader)
+ return ev.Code() == codes.Unavailable
+ }
+ return false
}
func toErr(ctx context.Context, err error) error {
@@ -631,9 +628,6 @@
if ctx.Err() != nil {
err = ctx.Err()
}
- case codes.Unavailable:
- case codes.FailedPrecondition:
- err = grpc.ErrClientConnClosing
}
}
return err
@@ -653,16 +647,19 @@
if err == nil {
return false
}
- // >= gRPC v1.10.x
+
+ // >= gRPC v1.23.x
s, ok := status.FromError(err)
if ok {
// connection is canceled or server has already closed the connection
return s.Code() == codes.Canceled || s.Message() == "transport is closing"
}
+
// >= gRPC v1.10.x
if err == context.Canceled {
return true
}
+
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
return strings.Contains(err.Error(), "grpc: the client connection is closing")
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go
index d497c05..ce97e5c 100644
--- a/vendor/go.etcd.io/etcd/clientv3/cluster.go
+++ b/vendor/go.etcd.io/etcd/clientv3/cluster.go
@@ -24,11 +24,12 @@
)
type (
- Member pb.Member
- MemberListResponse pb.MemberListResponse
- MemberAddResponse pb.MemberAddResponse
- MemberRemoveResponse pb.MemberRemoveResponse
- MemberUpdateResponse pb.MemberUpdateResponse
+ Member pb.Member
+ MemberListResponse pb.MemberListResponse
+ MemberAddResponse pb.MemberAddResponse
+ MemberRemoveResponse pb.MemberRemoveResponse
+ MemberUpdateResponse pb.MemberUpdateResponse
+ MemberPromoteResponse pb.MemberPromoteResponse
)
type Cluster interface {
@@ -38,11 +39,17 @@
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+ // MemberAddAsLearner adds a new learner member into the cluster.
+ MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
// MemberRemove removes an existing member from the cluster.
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
// MemberUpdate updates the peer addresses of the member.
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
+
+ // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+ MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
}
type cluster struct {
@@ -67,12 +74,23 @@
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return c.memberAdd(ctx, peerAddrs, false)
+}
+
+func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return c.memberAdd(ctx, peerAddrs, true)
+}
+
+func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
// fail-fast before panic in rafthttp
if _, err := types.NewURLs(peerAddrs); err != nil {
return nil, err
}
- r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
+ r := &pb.MemberAddRequest{
+ PeerURLs: peerAddrs,
+ IsLearner: isLearner,
+ }
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
@@ -112,3 +130,12 @@
}
return nil, toErr(ctx, err)
}
+
+func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
+ r := &pb.MemberPromoteRequest{ID: id}
+ resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*MemberPromoteResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
index 0135341..306470b 100644
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
@@ -16,6 +16,7 @@
import (
"context"
+ "errors"
"fmt"
"sync"
@@ -23,6 +24,9 @@
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
)
+// ErrLocked is returned by TryLock when Mutex is already locked by another session.
+var ErrLocked = errors.New("mutex: Locked by another session")
+
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
s *Session
@@ -37,9 +41,56 @@
return &Mutex{s, pfx + "/", "", -1, nil}
}
+// TryLock locks the mutex if not already locked by another session.
+// If lock is held by another session, return immediately after attempting necessary cleanup
+// The ctx argument is used for the sending/receiving Txn RPC.
+func (m *Mutex) TryLock(ctx context.Context) error {
+ resp, err := m.tryAcquire(ctx)
+ if err != nil {
+ return err
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+ client := m.s.Client()
+ // Cannot lock, so delete the key
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return ErrLocked
+}
+
// Lock locks the mutex with a cancelable context. If the context is canceled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
+ resp, err := m.tryAcquire(ctx)
+ if err != nil {
+ return err
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+ client := m.s.Client()
+ // wait for deletion revisions prior to myKey
+ hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+ // release lock key if wait failed
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ } else {
+ m.hdr = hdr
+ }
+ return werr
+}
+
+func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
s := m.s
client := m.s.Client()
@@ -53,28 +104,13 @@
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
- return err
+ return nil, err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
-
- // wait for deletion revisions prior to myKey
- hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
- // release lock key if wait failed
- if werr != nil {
- m.Unlock(client.Ctx())
- } else {
- m.hdr = hdr
- }
- return werr
+ return resp, nil
}
func (m *Mutex) Unlock(ctx context.Context) error {
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
index 598ec0e..97eb763 100644
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
@@ -47,7 +47,7 @@
if err != nil {
return nil, err
}
- id = v3.LeaseID(resp.ID)
+ id = resp.ID
}
ctx, cancel := context.WithCancel(ops.ctx)
diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go
index bd03768..11d447d 100644
--- a/vendor/go.etcd.io/etcd/clientv3/config.go
+++ b/vendor/go.etcd.io/etcd/clientv3/config.go
@@ -68,6 +68,8 @@
RejectOldCluster bool `json:"reject-old-cluster"`
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
+ // For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
+ // Without this, Dial returns immediately and connecting the server happens in background.
DialOptions []grpc.DialOption
// Context is the default client context; it can be used to cancel grpc dial out and
@@ -81,4 +83,6 @@
// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
PermitWithoutStream bool `json:"permit-without-stream"`
+
+ // TODO: support custom balancer picker
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go
new file mode 100644
index 0000000..e6fd75c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials implements gRPC credential interface with etcd specific logic.
+// e.g., client handshake with custom authority parameter
+package credentials
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "sync"
+
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ grpccredentials "google.golang.org/grpc/credentials"
+)
+
+// Config defines gRPC credential configuration.
+type Config struct {
+ TLSConfig *tls.Config
+}
+
+// Bundle defines gRPC credential interface.
+type Bundle interface {
+ grpccredentials.Bundle
+ UpdateAuthToken(token string)
+}
+
+// NewBundle constructs a new gRPC credential bundle.
+func NewBundle(cfg Config) Bundle {
+ return &bundle{
+ tc: newTransportCredential(cfg.TLSConfig),
+ rc: newPerRPCCredential(),
+ }
+}
+
+// bundle implements "grpccredentials.Bundle" interface.
+type bundle struct {
+ tc *transportCredential
+ rc *perRPCCredential
+}
+
+func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
+ return b.tc
+}
+
+func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+ return b.rc
+}
+
+func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
+ // no-op
+ return nil, nil
+}
+
+// transportCredential implements "grpccredentials.TransportCredentials" interface.
+type transportCredential struct {
+ gtc grpccredentials.TransportCredentials
+}
+
+func newTransportCredential(cfg *tls.Config) *transportCredential {
+ return &transportCredential{
+ gtc: grpccredentials.NewTLS(cfg),
+ }
+}
+
+func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+ // Only overwrite when authority is an IP address!
+ // Let's say, a server runs SRV records on "etcd.local" that resolves
+ // to "m1.etcd.local", and its SAN field also includes "m1.etcd.local".
+ // But what if SAN does not include its resolved IP address (e.g. 127.0.0.1)?
+ // Then, the server should only authenticate using its DNS hostname "m1.etcd.local",
+ // instead of overwriting it with its IP address.
+ // And we do not overwrite "localhost" either. Only overwrite IP addresses!
+ if isIP(authority) {
+ target := rawConn.RemoteAddr().String()
+ if authority != target {
+ // When user dials with "grpc.WithDialer", "grpc.DialContext" "cc.parsedTarget"
+ // update only happens once. This is problematic, because when TLS is enabled,
+ // retries happen through "grpc.WithDialer" with static "cc.parsedTarget" from
+ // the initial dial call.
+ // If the server authenticates by IP addresses, we want to set a new endpoint as
+ // a new authority. Otherwise
+ // "transport: authentication handshake failed: x509: certificate is valid for 127.0.0.1, 192.168.121.180, not 192.168.223.156"
+ // when the new dial target is "192.168.121.180" whose certificate host name is also "192.168.121.180"
+ // but client tries to authenticate with previously set "cc.parsedTarget" field "192.168.223.156"
+ authority = target
+ }
+ }
+ return tc.gtc.ClientHandshake(ctx, authority, rawConn)
+}
+
+// return true if given string is an IP.
+func isIP(ep string) bool {
+ return net.ParseIP(ep) != nil
+}
+
+func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+ return tc.gtc.ServerHandshake(rawConn)
+}
+
+func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
+ return tc.gtc.Info()
+}
+
+func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
+ return &transportCredential{
+ gtc: tc.gtc.Clone(),
+ }
+}
+
+func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
+ return tc.gtc.OverrideServerName(serverNameOverride)
+}
+
+// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
+type perRPCCredential struct {
+ authToken string
+ authTokenMu sync.RWMutex
+}
+
+func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
+
+func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
+
+func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+ rc.authTokenMu.RLock()
+ authToken := rc.authToken
+ rc.authTokenMu.RUnlock()
+ return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
+}
+
+func (b *bundle) UpdateAuthToken(token string) {
+ if b.rc == nil {
+ return
+ }
+ b.rc.UpdateAuthToken(token)
+}
+
+func (rc *perRPCCredential) UpdateAuthToken(token string) {
+ rc.authTokenMu.Lock()
+ rc.authToken = token
+ rc.authTokenMu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go
index 01a3f59..913cd28 100644
--- a/vendor/go.etcd.io/etcd/clientv3/doc.go
+++ b/vendor/go.etcd.io/etcd/clientv3/doc.go
@@ -90,7 +90,7 @@
// // with etcd clientv3 <= v3.3
// if err == context.Canceled {
// // grpc balancer calls 'Get' with an inflight client.Close
-// } else if err == grpc.ErrClientConnClosing {
+// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
// // grpc balancer calls 'Get' after client.Close.
// }
// // with etcd clientv3 >= v3.4
diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go
index 085dd28..81ae31f 100644
--- a/vendor/go.etcd.io/etcd/clientv3/op.go
+++ b/vendor/go.etcd.io/etcd/clientv3/op.go
@@ -113,13 +113,13 @@
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
// IsSerializable returns true if the serializable field is true.
-func (op Op) IsSerializable() bool { return op.serializable == true }
+func (op Op) IsSerializable() bool { return op.serializable }
// IsKeysOnly returns whether keysOnly is set.
-func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
+func (op Op) IsKeysOnly() bool { return op.keysOnly }
// IsCountOnly returns whether countOnly is set.
-func (op Op) IsCountOnly() bool { return op.countOnly == true }
+func (op Op) IsCountOnly() bool { return op.countOnly }
// MinModRev returns the operation's minimum modify revision.
func (op Op) MinModRev() int64 { return op.minModRev }
diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go
index 4660ace..700714c 100644
--- a/vendor/go.etcd.io/etcd/clientv3/options.go
+++ b/vendor/go.etcd.io/etcd/clientv3/options.go
@@ -47,7 +47,7 @@
// client-side streaming retry limit, only applied to requests where server responds with
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
// If set to 0, retry is disabled.
- defaultStreamMaxRetries = uint(^uint(0)) // max uint
+ defaultStreamMaxRetries = ^uint(0) // max uint
// client-side retry backoff wait between requests.
defaultBackoffWaitBetween = 25 * time.Millisecond
diff --git a/vendor/go.etcd.io/etcd/clientv3/ready_wait.go b/vendor/go.etcd.io/etcd/clientv3/ready_wait.go
deleted file mode 100644
index c6ef585..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/ready_wait.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import "context"
-
-// TODO: remove this when "FailFast=false" is fixed.
-// See https://github.com/grpc/grpc-go/issues/1532.
-func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
- select {
- case <-ready:
- return nil
- case <-rpcCtx.Done():
- return rpcCtx.Err()
- case <-clientCtx.Done():
- return clientCtx.Err()
- }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go
index 38ad00a..7e855de 100644
--- a/vendor/go.etcd.io/etcd/clientv3/retry.go
+++ b/vendor/go.etcd.io/etcd/clientv3/retry.go
@@ -43,10 +43,6 @@
}
}
-type rpcFunc func(ctx context.Context) error
-type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
-type retryStopErrFunc func(error) bool
-
// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
//
// immutable requests (e.g. Get) should be retried unless it's
@@ -183,6 +179,10 @@
return rcc.cc.MemberUpdate(ctx, in, opts...)
}
+func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) {
+ return rcc.cc.MemberPromote(ctx, in, opts...)
+}
+
type retryMaintenanceClient struct {
mc pb.MaintenanceClient
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
index e48a003..080490a 100644
--- a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
+++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
@@ -28,6 +28,7 @@
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
)
// unaryClientInterceptor returns a new retrying unary client interceptor.
@@ -79,7 +80,7 @@
zap.String("target", cc.Target()),
zap.Error(gterr),
)
- return lastErr // return the original error for simplicity
+ return gterr // lastErr must be invalid auth token
}
continue
}
@@ -109,7 +110,7 @@
return streamer(ctx, desc, cc, method, grpcOpts...)
}
if desc.ClientStreams {
- return nil, grpc.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
+ return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
}
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
logger.Warn("retry stream intercept", zap.Error(err))
@@ -296,11 +297,11 @@
func contextErrToGrpcErr(err error) error {
switch err {
case context.DeadlineExceeded:
- return grpc.Errorf(codes.DeadlineExceeded, err.Error())
+ return status.Errorf(codes.DeadlineExceeded, err.Error())
case context.Canceled:
- return grpc.Errorf(codes.Canceled, err.Error())
+ return status.Errorf(codes.Canceled, err.Error())
default:
- return grpc.Errorf(codes.Unknown, err.Error())
+ return status.Errorf(codes.Unknown, err.Error())
}
}
@@ -328,13 +329,6 @@
}}
}
-// withAuthRetry sets enables authentication retries.
-func withAuthRetry(retryAuth bool) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.retryAuth = retryAuth
- }}
-}
-
// withMax sets the maximum number of retries on this call, or this interceptor.
func withMax(maxRetries uint) retryOption {
return retryOption{applyFunc: func(o *options) {
diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go
index d50acbc..87d222d 100644
--- a/vendor/go.etcd.io/etcd/clientv3/watch.go
+++ b/vendor/go.etcd.io/etcd/clientv3/watch.go
@@ -384,6 +384,7 @@
w.mu.Lock()
if w.streams == nil {
+ w.mu.Unlock()
return fmt.Errorf("no stream found for context")
}
wgs := w.streams[ctxKey]