VOL-4019: Initial commit with grpc nbi, sbi, etcd, kafka and hw management rpcs.

Change-Id: I78feaf7da284028fc61f42c5e0c5f56e72fe9e78
diff --git a/vendor/go.etcd.io/etcd/clientv3/README.md b/vendor/go.etcd.io/etcd/clientv3/README.md
new file mode 100644
index 0000000..6c6fe7c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/README.md
@@ -0,0 +1,85 @@
+# etcd/clientv3
+
+[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
+[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3)
+
+`etcd/clientv3` is the official Go etcd client for v3.
+
+## Install
+
+```bash
+go get go.etcd.io/etcd/clientv3
+```
+
+## Get started
+
+Create client using `clientv3.New`:
+
+```go
+cli, err := clientv3.New(clientv3.Config{
+	Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+	DialTimeout: 5 * time.Second,
+})
+if err != nil {
+	// handle error!
+}
+defer cli.Close()
+```
+
+etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses
+[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
+If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
+pass `context.WithTimeout` to APIs:
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), timeout)
+resp, err := cli.Put(ctx, "sample_key", "sample_value")
+cancel()
+if err != nil {
+    // handle error!
+}
+// use the response
+```
+
+For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
+
+## Error Handling
+
+etcd client returns 2 types of errors:
+
+1. context error: canceled or deadline exceeded.
+2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes).
+
+Here is the example code to handle client errors:
+
+```go
+resp, err := cli.Put(ctx, "", "")
+if err != nil {
+	switch err {
+	case context.Canceled:
+		log.Fatalf("ctx is canceled by another routine: %v", err)
+	case context.DeadlineExceeded:
+		log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
+	case rpctypes.ErrEmptyKey:
+		log.Fatalf("client-side error: %v", err)
+	default:
+		log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
+	}
+}
+```
+
+## Metrics
+
+The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go).
+
+## Namespacing
+
+The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
+
+## Request size limit
+
+Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`.
+
+## Examples
+
+More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3).
diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go
new file mode 100644
index 0000000..c954f1b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/auth.go
@@ -0,0 +1,242 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"go.etcd.io/etcd/auth/authpb"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+	"google.golang.org/grpc"
+)
+
+type (
+	AuthEnableResponse               pb.AuthEnableResponse
+	AuthDisableResponse              pb.AuthDisableResponse
+	AuthenticateResponse             pb.AuthenticateResponse
+	AuthUserAddResponse              pb.AuthUserAddResponse
+	AuthUserDeleteResponse           pb.AuthUserDeleteResponse
+	AuthUserChangePasswordResponse   pb.AuthUserChangePasswordResponse
+	AuthUserGrantRoleResponse        pb.AuthUserGrantRoleResponse
+	AuthUserGetResponse              pb.AuthUserGetResponse
+	AuthUserRevokeRoleResponse       pb.AuthUserRevokeRoleResponse
+	AuthRoleAddResponse              pb.AuthRoleAddResponse
+	AuthRoleGrantPermissionResponse  pb.AuthRoleGrantPermissionResponse
+	AuthRoleGetResponse              pb.AuthRoleGetResponse
+	AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
+	AuthRoleDeleteResponse           pb.AuthRoleDeleteResponse
+	AuthUserListResponse             pb.AuthUserListResponse
+	AuthRoleListResponse             pb.AuthRoleListResponse
+
+	PermissionType authpb.Permission_Type
+	Permission     authpb.Permission
+)
+
+const (
+	PermRead      = authpb.READ
+	PermWrite     = authpb.WRITE
+	PermReadWrite = authpb.READWRITE
+)
+
+type UserAddOptions authpb.UserAddOptions
+
+type Auth interface {
+	// AuthEnable enables auth of an etcd cluster.
+	AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
+
+	// AuthDisable disables auth of an etcd cluster.
+	AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
+
+	// UserAdd adds a new user to an etcd cluster.
+	UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
+
+	// UserAddWithOptions adds a new user to an etcd cluster with some options.
+	UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
+
+	// UserDelete deletes a user from an etcd cluster.
+	UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
+
+	// UserChangePassword changes a password of a user.
+	UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
+
+	// UserGrantRole grants a role to a user.
+	UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
+
+	// UserGet gets a detailed information of a user.
+	UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
+
+	// UserList gets a list of all users.
+	UserList(ctx context.Context) (*AuthUserListResponse, error)
+
+	// UserRevokeRole revokes a role of a user.
+	UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
+
+	// RoleAdd adds a new role to an etcd cluster.
+	RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
+
+	// RoleGrantPermission grants a permission to a role.
+	RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
+
+	// RoleGet gets a detailed information of a role.
+	RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
+
+	// RoleList gets a list of all roles.
+	RoleList(ctx context.Context) (*AuthRoleListResponse, error)
+
+	// RoleRevokePermission revokes a permission from a role.
+	RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
+
+	// RoleDelete deletes a role.
+	RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
+}
+
+type authClient struct {
+	remote   pb.AuthClient
+	callOpts []grpc.CallOption
+}
+
+func NewAuth(c *Client) Auth {
+	api := &authClient{remote: RetryAuthClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
+	resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
+	return (*AuthEnableResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
+	resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
+	return (*AuthDisableResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
+	resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
+	return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
+	resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
+	return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
+	resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
+	return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
+	resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
+	resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
+	return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
+	resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
+	return (*AuthUserGetResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
+	resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
+	return (*AuthUserListResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
+	resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
+	return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
+	resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
+	return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
+	perm := &authpb.Permission{
+		Key:      []byte(key),
+		RangeEnd: []byte(rangeEnd),
+		PermType: authpb.Permission_Type(permType),
+	}
+	resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
+	return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
+	resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
+	return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
+	resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
+	return (*AuthRoleListResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
+	resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
+	return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
+	resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
+	return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
+}
+
+func StrToPermissionType(s string) (PermissionType, error) {
+	val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
+	if ok {
+		return PermissionType(val), nil
+	}
+	return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
+}
+
+type authenticator struct {
+	conn     *grpc.ClientConn // conn in-use
+	remote   pb.AuthClient
+	callOpts []grpc.CallOption
+}
+
+func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
+	resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthenticateResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authenticator) close() {
+	auth.conn.Close()
+}
+
+func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
+	conn, err := grpc.DialContext(ctx, target, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	api := &authenticator{
+		conn:   conn,
+		remote: pb.NewAuthClient(conn),
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api, nil
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
new file mode 100644
index 0000000..d02a7ee
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
@@ -0,0 +1,293 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package balancer implements client balancer.
+package balancer
+
+import (
+	"strconv"
+	"sync"
+	"time"
+
+	"go.etcd.io/etcd/clientv3/balancer/connectivity"
+	"go.etcd.io/etcd/clientv3/balancer/picker"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/balancer"
+	grpcconnectivity "google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+	_ "google.golang.org/grpc/resolver/dns"         // register DNS resolver
+	_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
+)
+
+// Config defines balancer configurations.
+type Config struct {
+	// Policy configures balancer policy.
+	Policy picker.Policy
+
+	// Picker implements gRPC picker.
+	// Leave empty if "Policy" field is not custom.
+	// TODO: currently custom policy is not supported.
+	// Picker picker.Picker
+
+	// Name defines an additional name for balancer.
+	// Useful for balancer testing to avoid register conflicts.
+	// If empty, defaults to policy name.
+	Name string
+
+	// Logger configures balancer logging.
+	// If nil, logs are discarded.
+	Logger *zap.Logger
+}
+
+// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
+// must be invoked at initialization time.
+func RegisterBuilder(cfg Config) {
+	bb := &builder{cfg}
+	balancer.Register(bb)
+
+	bb.cfg.Logger.Debug(
+		"registered balancer",
+		zap.String("policy", bb.cfg.Policy.String()),
+		zap.String("name", bb.cfg.Name),
+	)
+}
+
+type builder struct {
+	cfg Config
+}
+
+// Build is called initially when creating "ccBalancerWrapper".
+// "grpc.Dial" is called to this client connection.
+// Then, resolved addresses will be handled via "HandleResolvedAddrs".
+func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	bb := &baseBalancer{
+		id:     strconv.FormatInt(time.Now().UnixNano(), 36),
+		policy: b.cfg.Policy,
+		name:   b.cfg.Name,
+		lg:     b.cfg.Logger,
+
+		addrToSc: make(map[resolver.Address]balancer.SubConn),
+		scToAddr: make(map[balancer.SubConn]resolver.Address),
+		scToSt:   make(map[balancer.SubConn]grpcconnectivity.State),
+
+		currentConn:          nil,
+		connectivityRecorder: connectivity.New(b.cfg.Logger),
+
+		// initialize picker always returns "ErrNoSubConnAvailable"
+		picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
+	}
+
+	// TODO: support multiple connections
+	bb.mu.Lock()
+	bb.currentConn = cc
+	bb.mu.Unlock()
+
+	bb.lg.Info(
+		"built balancer",
+		zap.String("balancer-id", bb.id),
+		zap.String("policy", bb.policy.String()),
+		zap.String("resolver-target", cc.Target()),
+	)
+	return bb
+}
+
+// Name implements "grpc/balancer.Builder" interface.
+func (b *builder) Name() string { return b.cfg.Name }
+
+// Balancer defines client balancer interface.
+type Balancer interface {
+	// Balancer is called on specified client connection. Client initiates gRPC
+	// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
+	// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
+	// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
+	// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
+	// changes, thus requires failover logic in this method.
+	balancer.Balancer
+
+	// Picker calls "Pick" for every client request.
+	picker.Picker
+}
+
+type baseBalancer struct {
+	id     string
+	policy picker.Policy
+	name   string
+	lg     *zap.Logger
+
+	mu sync.RWMutex
+
+	addrToSc map[resolver.Address]balancer.SubConn
+	scToAddr map[balancer.SubConn]resolver.Address
+	scToSt   map[balancer.SubConn]grpcconnectivity.State
+
+	currentConn          balancer.ClientConn
+	connectivityRecorder connectivity.Recorder
+
+	picker picker.Picker
+}
+
+// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
+// gRPC sends initial or updated resolved addresses from "Build".
+func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+	if err != nil {
+		bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
+		return
+	}
+	bb.lg.Info("resolved",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.Strings("addresses", addrsToStrings(addrs)),
+	)
+
+	bb.mu.Lock()
+	defer bb.mu.Unlock()
+
+	resolved := make(map[resolver.Address]struct{})
+	for _, addr := range addrs {
+		resolved[addr] = struct{}{}
+		if _, ok := bb.addrToSc[addr]; !ok {
+			sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+			if err != nil {
+				bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
+				continue
+			}
+			bb.lg.Info("created subconn", zap.String("address", addr.Addr))
+			bb.addrToSc[addr] = sc
+			bb.scToAddr[sc] = addr
+			bb.scToSt[sc] = grpcconnectivity.Idle
+			sc.Connect()
+		}
+	}
+
+	for addr, sc := range bb.addrToSc {
+		if _, ok := resolved[addr]; !ok {
+			// was removed by resolver or failed to create subconn
+			bb.currentConn.RemoveSubConn(sc)
+			delete(bb.addrToSc, addr)
+
+			bb.lg.Info(
+				"removed subconn",
+				zap.String("picker", bb.picker.String()),
+				zap.String("balancer-id", bb.id),
+				zap.String("address", addr.Addr),
+				zap.String("subconn", scToString(sc)),
+			)
+
+			// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
+			// The entry will be deleted in HandleSubConnStateChange.
+			// (DO NOT) delete(bb.scToAddr, sc)
+			// (DO NOT) delete(bb.scToSt, sc)
+		}
+	}
+}
+
+// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
+func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
+	bb.mu.Lock()
+	defer bb.mu.Unlock()
+
+	old, ok := bb.scToSt[sc]
+	if !ok {
+		bb.lg.Warn(
+			"state change for an unknown subconn",
+			zap.String("picker", bb.picker.String()),
+			zap.String("balancer-id", bb.id),
+			zap.String("subconn", scToString(sc)),
+			zap.Int("subconn-size", len(bb.scToAddr)),
+			zap.String("state", s.String()),
+		)
+		return
+	}
+
+	bb.lg.Info(
+		"state changed",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.Bool("connected", s == grpcconnectivity.Ready),
+		zap.String("subconn", scToString(sc)),
+		zap.Int("subconn-size", len(bb.scToAddr)),
+		zap.String("address", bb.scToAddr[sc].Addr),
+		zap.String("old-state", old.String()),
+		zap.String("new-state", s.String()),
+	)
+
+	bb.scToSt[sc] = s
+	switch s {
+	case grpcconnectivity.Idle:
+		sc.Connect()
+	case grpcconnectivity.Shutdown:
+		// When an address was removed by resolver, b called RemoveSubConn but
+		// kept the sc's state in scToSt. Remove state for this sc here.
+		delete(bb.scToAddr, sc)
+		delete(bb.scToSt, sc)
+	}
+
+	oldAggrState := bb.connectivityRecorder.GetCurrentState()
+	bb.connectivityRecorder.RecordTransition(old, s)
+
+	// Update balancer picker when one of the following happens:
+	//  - this sc became ready from not-ready
+	//  - this sc became not-ready from ready
+	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
+	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
+		(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
+		bb.updatePicker()
+	}
+
+	bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
+}
+
+func (bb *baseBalancer) updatePicker() {
+	if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
+		bb.picker = picker.NewErr(balancer.ErrTransientFailure)
+		bb.lg.Info(
+			"updated picker to transient error picker",
+			zap.String("picker", bb.picker.String()),
+			zap.String("balancer-id", bb.id),
+			zap.String("policy", bb.policy.String()),
+		)
+		return
+	}
+
+	// only pass ready subconns to picker
+	scToAddr := make(map[balancer.SubConn]resolver.Address)
+	for addr, sc := range bb.addrToSc {
+		if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
+			scToAddr[sc] = addr
+		}
+	}
+
+	bb.picker = picker.New(picker.Config{
+		Policy:                   bb.policy,
+		Logger:                   bb.lg,
+		SubConnToResolverAddress: scToAddr,
+	})
+	bb.lg.Info(
+		"updated picker",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.String("policy", bb.policy.String()),
+		zap.Strings("subconn-ready", scsToStrings(scToAddr)),
+		zap.Int("subconn-size", len(scToAddr)),
+	)
+}
+
+// Close implements "grpc/balancer.Balancer" interface.
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (bb *baseBalancer) Close() {
+	// TODO
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go
new file mode 100644
index 0000000..4c4ad36
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package connectivity implements client connectivity operations.
+package connectivity
+
+import (
+	"sync"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/connectivity"
+)
+
+// Recorder records gRPC connectivity.
+type Recorder interface {
+	GetCurrentState() connectivity.State
+	RecordTransition(oldState, newState connectivity.State)
+}
+
+// New returns a new Recorder.
+func New(lg *zap.Logger) Recorder {
+	return &recorder{lg: lg}
+}
+
+// recorder takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+type recorder struct {
+	lg *zap.Logger
+
+	mu sync.RWMutex
+
+	cur connectivity.State
+
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+func (rc *recorder) GetCurrentState() (state connectivity.State) {
+	rc.mu.RLock()
+	defer rc.mu.RUnlock()
+	return rc.cur
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//  - If at least one SubConn in Ready, the aggregated state is Ready;
+//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//  - Else the aggregated state is TransientFailure.
+//
+// Idle and Shutdown are not considered.
+//
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
+	rc.mu.Lock()
+	defer rc.mu.Unlock()
+
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			rc.numReady += updateVal
+		case connectivity.Connecting:
+			rc.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			rc.numTransientFailure += updateVal
+		default:
+			rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
+		}
+	}
+
+	switch { // must be exclusive, no overlap
+	case rc.numReady > 0:
+		rc.cur = connectivity.Ready
+	case rc.numConnecting > 0:
+		rc.cur = connectivity.Connecting
+	default:
+		rc.cur = connectivity.TransientFailure
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go
new file mode 100644
index 0000000..35dabf5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package picker defines/implements client balancer picker policy.
+package picker
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
new file mode 100644
index 0000000..9e04378
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"context"
+
+	"google.golang.org/grpc/balancer"
+)
+
+// NewErr returns a picker that always returns err on "Pick".
+func NewErr(err error) Picker {
+	return &errPicker{p: Error, err: err}
+}
+
+type errPicker struct {
+	p   Policy
+	err error
+}
+
+func (ep *errPicker) String() string {
+	return ep.p.String()
+}
+
+func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	return nil, nil, ep.err
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
new file mode 100644
index 0000000..bd1a5d2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"fmt"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// Picker defines balancer Picker methods.
+type Picker interface {
+	balancer.Picker
+	String() string
+}
+
+// Config defines picker configuration.
+type Config struct {
+	// Policy specifies etcd clientv3's built in balancer policy.
+	Policy Policy
+
+	// Logger defines picker logging object.
+	Logger *zap.Logger
+
+	// SubConnToResolverAddress maps each gRPC sub-connection to an address.
+	// Basically, it is a list of addresses that the Picker can pick from.
+	SubConnToResolverAddress map[balancer.SubConn]resolver.Address
+}
+
+// Policy defines balancer picker policy.
+type Policy uint8
+
+const (
+	// Error is error picker policy.
+	Error Policy = iota
+
+	// RoundrobinBalanced balances loads over multiple endpoints
+	// and implements failover in roundrobin fashion.
+	RoundrobinBalanced
+
+	// Custom defines custom balancer picker.
+	// TODO: custom picker is not supported yet.
+	Custom
+)
+
+func (p Policy) String() string {
+	switch p {
+	case Error:
+		return "picker-error"
+
+	case RoundrobinBalanced:
+		return "picker-roundrobin-balanced"
+
+	case Custom:
+		panic("'custom' picker policy is not supported yet")
+
+	default:
+		panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
+	}
+}
+
+// New creates a new Picker.
+func New(cfg Config) Picker {
+	switch cfg.Policy {
+	case Error:
+		panic("'error' picker policy is not supported here; use 'picker.NewErr'")
+
+	case RoundrobinBalanced:
+		return newRoundrobinBalanced(cfg)
+
+	case Custom:
+		panic("'custom' picker policy is not supported yet")
+
+	default:
+		panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
new file mode 100644
index 0000000..1b8b285
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
@@ -0,0 +1,95 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"context"
+	"sync"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// newRoundrobinBalanced returns a new roundrobin balanced picker.
+func newRoundrobinBalanced(cfg Config) Picker {
+	scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
+	for sc := range cfg.SubConnToResolverAddress {
+		scs = append(scs, sc)
+	}
+	return &rrBalanced{
+		p:        RoundrobinBalanced,
+		lg:       cfg.Logger,
+		scs:      scs,
+		scToAddr: cfg.SubConnToResolverAddress,
+	}
+}
+
+type rrBalanced struct {
+	p Policy
+
+	lg *zap.Logger
+
+	mu       sync.RWMutex
+	next     int
+	scs      []balancer.SubConn
+	scToAddr map[balancer.SubConn]resolver.Address
+}
+
+func (rb *rrBalanced) String() string { return rb.p.String() }
+
+// Pick is called for every client request.
+func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	rb.mu.RLock()
+	n := len(rb.scs)
+	rb.mu.RUnlock()
+	if n == 0 {
+		return nil, nil, balancer.ErrNoSubConnAvailable
+	}
+
+	rb.mu.Lock()
+	cur := rb.next
+	sc := rb.scs[cur]
+	picked := rb.scToAddr[sc].Addr
+	rb.next = (rb.next + 1) % len(rb.scs)
+	rb.mu.Unlock()
+
+	rb.lg.Debug(
+		"picked",
+		zap.String("picker", rb.p.String()),
+		zap.String("address", picked),
+		zap.Int("subconn-index", cur),
+		zap.Int("subconn-size", n),
+	)
+
+	doneFunc := func(info balancer.DoneInfo) {
+		// TODO: error handling?
+		fss := []zapcore.Field{
+			zap.Error(info.Err),
+			zap.String("picker", rb.p.String()),
+			zap.String("address", picked),
+			zap.Bool("success", info.Err == nil),
+			zap.Bool("bytes-sent", info.BytesSent),
+			zap.Bool("bytes-received", info.BytesReceived),
+		}
+		if info.Err == nil {
+			rb.lg.Debug("balancer done", fss...)
+		} else {
+			rb.lg.Warn("balancer failed", fss...)
+		}
+	}
+	return sc, doneFunc, nil
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
new file mode 100644
index 0000000..1f32039
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
@@ -0,0 +1,240 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
+package endpoint
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/resolver"
+)
+
+const scheme = "endpoint"
+
+var (
+	targetPrefix = fmt.Sprintf("%s://", scheme)
+
+	bldr *builder
+)
+
+func init() {
+	bldr = &builder{
+		resolverGroups: make(map[string]*ResolverGroup),
+	}
+	resolver.Register(bldr)
+}
+
+type builder struct {
+	mu             sync.RWMutex
+	resolverGroups map[string]*ResolverGroup
+}
+
+// NewResolverGroup creates a new ResolverGroup with the given id.
+func NewResolverGroup(id string) (*ResolverGroup, error) {
+	return bldr.newResolverGroup(id)
+}
+
+// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
+// up-to-date.
+type ResolverGroup struct {
+	mu        sync.RWMutex
+	id        string
+	endpoints []string
+	resolvers []*Resolver
+}
+
+func (e *ResolverGroup) addResolver(r *Resolver) {
+	e.mu.Lock()
+	addrs := epsToAddrs(e.endpoints...)
+	e.resolvers = append(e.resolvers, r)
+	e.mu.Unlock()
+	r.cc.NewAddress(addrs)
+}
+
+func (e *ResolverGroup) removeResolver(r *Resolver) {
+	e.mu.Lock()
+	for i, er := range e.resolvers {
+		if er == r {
+			e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
+			break
+		}
+	}
+	e.mu.Unlock()
+}
+
+// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
+// immediately with the new endpoints.
+func (e *ResolverGroup) SetEndpoints(endpoints []string) {
+	addrs := epsToAddrs(endpoints...)
+	e.mu.Lock()
+	e.endpoints = endpoints
+	for _, r := range e.resolvers {
+		r.cc.NewAddress(addrs)
+	}
+	e.mu.Unlock()
+}
+
+// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
+func (e *ResolverGroup) Target(endpoint string) string {
+	return Target(e.id, endpoint)
+}
+
+// Target constructs a endpoint resolver target.
+func Target(id, endpoint string) string {
+	return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
+}
+
+// IsTarget checks if a given target string in an endpoint resolver target.
+func IsTarget(target string) bool {
+	return strings.HasPrefix(target, "endpoint://")
+}
+
+func (e *ResolverGroup) Close() {
+	bldr.close(e.id)
+}
+
+// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+	if len(target.Authority) < 1 {
+		return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
+	}
+	id := target.Authority
+	es, err := b.getResolverGroup(id)
+	if err != nil {
+		return nil, fmt.Errorf("failed to build resolver: %v", err)
+	}
+	r := &Resolver{
+		endpointID: id,
+		cc:         cc,
+	}
+	es.addResolver(r)
+	return r, nil
+}
+
+func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
+	b.mu.RLock()
+	_, ok := b.resolverGroups[id]
+	b.mu.RUnlock()
+	if ok {
+		return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
+	}
+
+	es := &ResolverGroup{id: id}
+	b.mu.Lock()
+	b.resolverGroups[id] = es
+	b.mu.Unlock()
+	return es, nil
+}
+
+func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
+	b.mu.RLock()
+	es, ok := b.resolverGroups[id]
+	b.mu.RUnlock()
+	if !ok {
+		return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
+	}
+	return es, nil
+}
+
+func (b *builder) close(id string) {
+	b.mu.Lock()
+	delete(b.resolverGroups, id)
+	b.mu.Unlock()
+}
+
+func (b *builder) Scheme() string {
+	return scheme
+}
+
+// Resolver provides a resolver for a single etcd cluster, identified by name.
+type Resolver struct {
+	endpointID string
+	cc         resolver.ClientConn
+	sync.RWMutex
+}
+
+// TODO: use balancer.epsToAddrs
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+	addrs = make([]resolver.Address, 0, len(eps))
+	for _, ep := range eps {
+		addrs = append(addrs, resolver.Address{Addr: ep})
+	}
+	return addrs
+}
+
+func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
+
+func (r *Resolver) Close() {
+	es, err := bldr.getResolverGroup(r.endpointID)
+	if err != nil {
+		return
+	}
+	es.removeResolver(r)
+}
+
+// ParseEndpoint endpoint parses an endpoint of the form
+// (http|https)://<host>*|(unix|unixs)://<path>)
+// and returns a protocol ('tcp' or 'unix'),
+// host (or filepath if a unix socket),
+// scheme (http, https, unix, unixs).
+func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
+	proto = "tcp"
+	host = endpoint
+	url, uerr := url.Parse(endpoint)
+	if uerr != nil || !strings.Contains(endpoint, "://") {
+		return proto, host, scheme
+	}
+	scheme = url.Scheme
+
+	// strip scheme:// prefix since grpc dials by host
+	host = url.Host
+	switch url.Scheme {
+	case "http", "https":
+	case "unix", "unixs":
+		proto = "unix"
+		host = url.Host + url.Path
+	default:
+		proto, host = "", ""
+	}
+	return proto, host, scheme
+}
+
+// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
+// If the target is malformed, an error is returned.
+func ParseTarget(target string) (string, string, error) {
+	noPrefix := strings.TrimPrefix(target, targetPrefix)
+	if noPrefix == target {
+		return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
+	}
+	parts := strings.SplitN(noPrefix, "/", 2)
+	if len(parts) != 2 {
+		return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
+	}
+	return parts[0], parts[1], nil
+}
+
+// ParseHostPort splits a "<host>:<port>" string into the host and port parts.
+// The port part is optional.
+func ParseHostPort(hostPort string) (host string, port string) {
+	parts := strings.SplitN(hostPort, ":", 2)
+	host = parts[0]
+	if len(parts) > 1 {
+		port = parts[1]
+	}
+	return host, port
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
new file mode 100644
index 0000000..48eb875
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import (
+	"fmt"
+	"net/url"
+	"sort"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+func scToString(sc balancer.SubConn) string {
+	return fmt.Sprintf("%p", sc)
+}
+
+func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
+	ss = make([]string, 0, len(scs))
+	for sc, a := range scs {
+		ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
+	}
+	sort.Strings(ss)
+	return ss
+}
+
+func addrsToStrings(addrs []resolver.Address) (ss []string) {
+	ss = make([]string, len(addrs))
+	for i := range addrs {
+		ss[i] = addrs[i].Addr
+	}
+	sort.Strings(ss)
+	return ss
+}
+
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+	addrs = make([]resolver.Address, 0, len(eps))
+	for _, ep := range eps {
+		u, err := url.Parse(ep)
+		if err != nil {
+			addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
+			continue
+		}
+		addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
+	}
+	return addrs
+}
+
+var genN = new(uint32)
+
+func genName() string {
+	now := time.Now().UnixNano()
+	return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go
new file mode 100644
index 0000000..d6000a8
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/client.go
@@ -0,0 +1,665 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/google/uuid"
+	"go.etcd.io/etcd/clientv3/balancer"
+	"go.etcd.io/etcd/clientv3/balancer/picker"
+	"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
+	"go.etcd.io/etcd/clientv3/credentials"
+	"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	"go.etcd.io/etcd/pkg/logutil"
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	grpccredentials "google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+var (
+	ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
+	ErrOldCluster           = errors.New("etcdclient: old cluster version")
+
+	roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
+)
+
+func init() {
+	lg := zap.NewNop()
+	if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
+		lcfg := logutil.DefaultZapLoggerConfig
+		lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+
+		var err error
+		lg, err = lcfg.Build() // info level logging
+		if err != nil {
+			panic(err)
+		}
+	}
+
+	// TODO: support custom balancer
+	balancer.RegisterBuilder(balancer.Config{
+		Policy: picker.RoundrobinBalanced,
+		Name:   roundRobinBalancerName,
+		Logger: lg,
+	})
+}
+
+// Client provides and manages an etcd v3 client session.
+type Client struct {
+	Cluster
+	KV
+	Lease
+	Watcher
+	Auth
+	Maintenance
+
+	conn *grpc.ClientConn
+
+	cfg           Config
+	creds         grpccredentials.TransportCredentials
+	resolverGroup *endpoint.ResolverGroup
+	mu            *sync.RWMutex
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	// Username is a user name for authentication.
+	Username string
+	// Password is a password for authentication.
+	Password        string
+	authTokenBundle credentials.Bundle
+
+	callOpts []grpc.CallOption
+
+	lg *zap.Logger
+}
+
+// New creates a new etcdv3 client from a given configuration.
+func New(cfg Config) (*Client, error) {
+	if len(cfg.Endpoints) == 0 {
+		return nil, ErrNoAvailableEndpoints
+	}
+
+	return newClient(&cfg)
+}
+
+// NewCtxClient creates a client with a context but no underlying grpc
+// connection. This is useful for embedded cases that override the
+// service interface implementations and do not need connection management.
+func NewCtxClient(ctx context.Context) *Client {
+	cctx, cancel := context.WithCancel(ctx)
+	return &Client{ctx: cctx, cancel: cancel}
+}
+
+// NewFromURL creates a new etcdv3 client from a URL.
+func NewFromURL(url string) (*Client, error) {
+	return New(Config{Endpoints: []string{url}})
+}
+
+// NewFromURLs creates a new etcdv3 client from URLs.
+func NewFromURLs(urls []string) (*Client, error) {
+	return New(Config{Endpoints: urls})
+}
+
+// Close shuts down the client's etcd connections.
+func (c *Client) Close() error {
+	c.cancel()
+	if c.Watcher != nil {
+		c.Watcher.Close()
+	}
+	if c.Lease != nil {
+		c.Lease.Close()
+	}
+	if c.resolverGroup != nil {
+		c.resolverGroup.Close()
+	}
+	if c.conn != nil {
+		return toErr(c.ctx, c.conn.Close())
+	}
+	return c.ctx.Err()
+}
+
+// Ctx is a context for "out of band" messages (e.g., for sending
+// "clean up" message when another context is canceled). It is
+// canceled on client Close().
+func (c *Client) Ctx() context.Context { return c.ctx }
+
+// Endpoints lists the registered endpoints for the client.
+func (c *Client) Endpoints() []string {
+	// copy the slice; protect original endpoints from being changed
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	eps := make([]string, len(c.cfg.Endpoints))
+	copy(eps, c.cfg.Endpoints)
+	return eps
+}
+
+// SetEndpoints updates client's endpoints.
+func (c *Client) SetEndpoints(eps ...string) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.cfg.Endpoints = eps
+	c.resolverGroup.SetEndpoints(eps)
+}
+
+// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
+func (c *Client) Sync(ctx context.Context) error {
+	mresp, err := c.MemberList(ctx)
+	if err != nil {
+		return err
+	}
+	var eps []string
+	for _, m := range mresp.Members {
+		eps = append(eps, m.ClientURLs...)
+	}
+	c.SetEndpoints(eps...)
+	return nil
+}
+
+func (c *Client) autoSync() {
+	if c.cfg.AutoSyncInterval == time.Duration(0) {
+		return
+	}
+
+	for {
+		select {
+		case <-c.ctx.Done():
+			return
+		case <-time.After(c.cfg.AutoSyncInterval):
+			ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
+			err := c.Sync(ctx)
+			cancel()
+			if err != nil && err != c.ctx.Err() {
+				lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
+			}
+		}
+	}
+}
+
+func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
+	creds = c.creds
+	switch scheme {
+	case "unix":
+	case "http":
+		creds = nil
+	case "https", "unixs":
+		if creds != nil {
+			break
+		}
+		creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
+	default:
+		creds = nil
+	}
+	return creds
+}
+
+// dialSetupOpts gives the dial opts prior to any authentication.
+func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
+	if c.cfg.DialKeepAliveTime > 0 {
+		params := keepalive.ClientParameters{
+			Time:                c.cfg.DialKeepAliveTime,
+			Timeout:             c.cfg.DialKeepAliveTimeout,
+			PermitWithoutStream: c.cfg.PermitWithoutStream,
+		}
+		opts = append(opts, grpc.WithKeepaliveParams(params))
+	}
+	opts = append(opts, dopts...)
+
+	// Provide a net dialer that supports cancelation and timeout.
+	f := func(dialEp string, t time.Duration) (net.Conn, error) {
+		proto, host, _ := endpoint.ParseEndpoint(dialEp)
+		select {
+		case <-c.ctx.Done():
+			return nil, c.ctx.Err()
+		default:
+		}
+		dialer := &net.Dialer{Timeout: t}
+		return dialer.DialContext(c.ctx, proto, host)
+	}
+	opts = append(opts, grpc.WithDialer(f))
+
+	if creds != nil {
+		opts = append(opts, grpc.WithTransportCredentials(creds))
+	} else {
+		opts = append(opts, grpc.WithInsecure())
+	}
+
+	// Interceptor retry and backoff.
+	// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
+	// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
+	// once it is available.
+	rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
+	opts = append(opts,
+		// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
+		// Streams that are safe to retry are enabled individually.
+		grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
+		grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
+	)
+
+	return opts, nil
+}
+
+// Dial connects to a single endpoint using the client's config.
+func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
+	creds := c.directDialCreds(ep)
+	// Use the grpc passthrough resolver to directly dial a single endpoint.
+	// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
+	// by etcd without modification, allowing us to directly dial endpoints and
+	// using the same dial functions that we use for load balancer dialing.
+	return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
+}
+
+func (c *Client) getToken(ctx context.Context) error {
+	var err error // return last error in a case of fail
+	var auth *authenticator
+
+	eps := c.Endpoints()
+	for _, ep := range eps {
+		// use dial options without dopts to avoid reusing the client balancer
+		var dOpts []grpc.DialOption
+		_, host, _ := endpoint.ParseEndpoint(ep)
+		target := c.resolverGroup.Target(host)
+		creds := c.dialWithBalancerCreds(ep)
+		dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
+		if err != nil {
+			err = fmt.Errorf("failed to configure auth dialer: %v", err)
+			continue
+		}
+		dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
+		auth, err = newAuthenticator(ctx, target, dOpts, c)
+		if err != nil {
+			continue
+		}
+		defer auth.close()
+
+		var resp *AuthenticateResponse
+		resp, err = auth.authenticate(ctx, c.Username, c.Password)
+		if err != nil {
+			// return err without retrying other endpoints
+			if err == rpctypes.ErrAuthNotEnabled {
+				return err
+			}
+			continue
+		}
+
+		c.authTokenBundle.UpdateAuthToken(resp.Token)
+		return nil
+	}
+
+	return err
+}
+
+// dialWithBalancer dials the client's current load balanced resolver group.  The scheme of the host
+// of the provided endpoint determines the scheme used for all endpoints of the client connection.
+func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	_, host, _ := endpoint.ParseEndpoint(ep)
+	target := c.resolverGroup.Target(host)
+	creds := c.dialWithBalancerCreds(ep)
+	return c.dial(target, creds, dopts...)
+}
+
+// dial configures and dials any grpc balancer target.
+func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	opts, err := c.dialSetupOpts(creds, dopts...)
+	if err != nil {
+		return nil, fmt.Errorf("failed to configure dialer: %v", err)
+	}
+
+	if c.Username != "" && c.Password != "" {
+		c.authTokenBundle = credentials.NewBundle(credentials.Config{})
+
+		ctx, cancel := c.ctx, func() {}
+		if c.cfg.DialTimeout > 0 {
+			ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
+		}
+
+		err = c.getToken(ctx)
+		if err != nil {
+			if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
+				if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
+					err = context.DeadlineExceeded
+				}
+				cancel()
+				return nil, err
+			}
+		} else {
+			opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
+		}
+		cancel()
+	}
+
+	opts = append(opts, c.cfg.DialOptions...)
+
+	dctx := c.ctx
+	if c.cfg.DialTimeout > 0 {
+		var cancel context.CancelFunc
+		dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+		defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
+	}
+
+	conn, err := grpc.DialContext(dctx, target, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return conn, nil
+}
+
+func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials {
+	_, hostPort, scheme := endpoint.ParseEndpoint(ep)
+	creds := c.creds
+	if len(scheme) != 0 {
+		creds = c.processCreds(scheme)
+		if creds != nil {
+			clone := creds.Clone()
+			// Set the server name must to the endpoint hostname without port since grpc
+			// otherwise attempts to check if x509 cert is valid for the full endpoint
+			// including the scheme and port, which fails.
+			host, _ := endpoint.ParseHostPort(hostPort)
+			clone.OverrideServerName(host)
+			creds = clone
+		}
+	}
+	return creds
+}
+
+func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
+	_, _, scheme := endpoint.ParseEndpoint(ep)
+	creds := c.creds
+	if len(scheme) != 0 {
+		creds = c.processCreds(scheme)
+	}
+	return creds
+}
+
+// WithRequireLeader requires client requests to only succeed
+// when the cluster has a leader.
+func WithRequireLeader(ctx context.Context) context.Context {
+	md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+	return metadata.NewOutgoingContext(ctx, md)
+}
+
+func newClient(cfg *Config) (*Client, error) {
+	if cfg == nil {
+		cfg = &Config{}
+	}
+	var creds grpccredentials.TransportCredentials
+	if cfg.TLS != nil {
+		creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
+	}
+
+	// use a temporary skeleton client to bootstrap first connection
+	baseCtx := context.TODO()
+	if cfg.Context != nil {
+		baseCtx = cfg.Context
+	}
+
+	ctx, cancel := context.WithCancel(baseCtx)
+	client := &Client{
+		conn:     nil,
+		cfg:      *cfg,
+		creds:    creds,
+		ctx:      ctx,
+		cancel:   cancel,
+		mu:       new(sync.RWMutex),
+		callOpts: defaultCallOpts,
+	}
+
+	lcfg := logutil.DefaultZapLoggerConfig
+	if cfg.LogConfig != nil {
+		lcfg = *cfg.LogConfig
+	}
+	var err error
+	client.lg, err = lcfg.Build()
+	if err != nil {
+		return nil, err
+	}
+
+	if cfg.Username != "" && cfg.Password != "" {
+		client.Username = cfg.Username
+		client.Password = cfg.Password
+	}
+	if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
+		if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
+			return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
+		}
+		callOpts := []grpc.CallOption{
+			defaultFailFast,
+			defaultMaxCallSendMsgSize,
+			defaultMaxCallRecvMsgSize,
+		}
+		if cfg.MaxCallSendMsgSize > 0 {
+			callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
+		}
+		if cfg.MaxCallRecvMsgSize > 0 {
+			callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
+		}
+		client.callOpts = callOpts
+	}
+
+	// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
+	// to dial so the client knows to use this resolver.
+	client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
+	if err != nil {
+		client.cancel()
+		return nil, err
+	}
+	client.resolverGroup.SetEndpoints(cfg.Endpoints)
+
+	if len(cfg.Endpoints) < 1 {
+		return nil, fmt.Errorf("at least one Endpoint must is required in client config")
+	}
+	dialEndpoint := cfg.Endpoints[0]
+
+	// Use a provided endpoint target so that for https:// without any tls config given, then
+	// grpc will assume the certificate server name is the endpoint host.
+	conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
+	if err != nil {
+		client.cancel()
+		client.resolverGroup.Close()
+		return nil, err
+	}
+	// TODO: With the old grpc balancer interface, we waited until the dial timeout
+	// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
+	client.conn = conn
+
+	client.Cluster = NewCluster(client)
+	client.KV = NewKV(client)
+	client.Lease = NewLease(client)
+	client.Watcher = NewWatcher(client)
+	client.Auth = NewAuth(client)
+	client.Maintenance = NewMaintenance(client)
+
+	if cfg.RejectOldCluster {
+		if err := client.checkVersion(); err != nil {
+			client.Close()
+			return nil, err
+		}
+	}
+
+	go client.autoSync()
+	return client, nil
+}
+
+// roundRobinQuorumBackoff retries against quorum between each backoff.
+// This is intended for use with a round robin load balancer.
+func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+	return func(attempt uint) time.Duration {
+		// after each round robin across quorum, backoff for our wait between duration
+		n := uint(len(c.Endpoints()))
+		quorum := (n/2 + 1)
+		if attempt%quorum == 0 {
+			c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
+			return jitterUp(waitBetween, jitterFraction)
+		}
+		c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
+		return 0
+	}
+}
+
+func (c *Client) checkVersion() (err error) {
+	var wg sync.WaitGroup
+
+	eps := c.Endpoints()
+	errc := make(chan error, len(eps))
+	ctx, cancel := context.WithCancel(c.ctx)
+	if c.cfg.DialTimeout > 0 {
+		cancel()
+		ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+	}
+
+	wg.Add(len(eps))
+	for _, ep := range eps {
+		// if cluster is current, any endpoint gives a recent version
+		go func(e string) {
+			defer wg.Done()
+			resp, rerr := c.Status(ctx, e)
+			if rerr != nil {
+				errc <- rerr
+				return
+			}
+			vs := strings.Split(resp.Version, ".")
+			maj, min := 0, 0
+			if len(vs) >= 2 {
+				var serr error
+				if maj, serr = strconv.Atoi(vs[0]); serr != nil {
+					errc <- serr
+					return
+				}
+				if min, serr = strconv.Atoi(vs[1]); serr != nil {
+					errc <- serr
+					return
+				}
+			}
+			if maj < 3 || (maj == 3 && min < 2) {
+				rerr = ErrOldCluster
+			}
+			errc <- rerr
+		}(ep)
+	}
+	// wait for success
+	for range eps {
+		if err = <-errc; err == nil {
+			break
+		}
+	}
+	cancel()
+	wg.Wait()
+	return err
+}
+
+// ActiveConnection returns the current in-use connection
+func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
+
+// isHaltErr returns true if the given error and context indicate no forward
+// progress can be made, even after reconnecting.
+func isHaltErr(ctx context.Context, err error) bool {
+	if ctx != nil && ctx.Err() != nil {
+		return true
+	}
+	if err == nil {
+		return false
+	}
+	ev, _ := status.FromError(err)
+	// Unavailable codes mean the system will be right back.
+	// (e.g., can't connect, lost leader)
+	// Treat Internal codes as if something failed, leaving the
+	// system in an inconsistent state, but retrying could make progress.
+	// (e.g., failed in middle of send, corrupted frame)
+	// TODO: are permanent Internal errors possible from grpc?
+	return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
+}
+
+// isUnavailableErr returns true if the given error is an unavailable error
+func isUnavailableErr(ctx context.Context, err error) bool {
+	if ctx != nil && ctx.Err() != nil {
+		return false
+	}
+	if err == nil {
+		return false
+	}
+	ev, ok := status.FromError(err)
+	if ok {
+		// Unavailable codes mean the system will be right back.
+		// (e.g., can't connect, lost leader)
+		return ev.Code() == codes.Unavailable
+	}
+	return false
+}
+
+func toErr(ctx context.Context, err error) error {
+	if err == nil {
+		return nil
+	}
+	err = rpctypes.Error(err)
+	if _, ok := err.(rpctypes.EtcdError); ok {
+		return err
+	}
+	if ev, ok := status.FromError(err); ok {
+		code := ev.Code()
+		switch code {
+		case codes.DeadlineExceeded:
+			fallthrough
+		case codes.Canceled:
+			if ctx.Err() != nil {
+				err = ctx.Err()
+			}
+		}
+	}
+	return err
+}
+
+func canceledByCaller(stopCtx context.Context, err error) bool {
+	if stopCtx.Err() == nil || err == nil {
+		return false
+	}
+
+	return err == context.Canceled || err == context.DeadlineExceeded
+}
+
+// IsConnCanceled returns true, if error is from a closed gRPC connection.
+// ref. https://github.com/grpc/grpc-go/pull/1854
+func IsConnCanceled(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	// >= gRPC v1.23.x
+	s, ok := status.FromError(err)
+	if ok {
+		// connection is canceled or server has already closed the connection
+		return s.Code() == codes.Canceled || s.Message() == "transport is closing"
+	}
+
+	// >= gRPC v1.10.x
+	if err == context.Canceled {
+		return true
+	}
+
+	// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
+	return strings.Contains(err.Error(), "grpc: the client connection is closing")
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go
new file mode 100644
index 0000000..ce97e5c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/cluster.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+	"go.etcd.io/etcd/pkg/types"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	Member                pb.Member
+	MemberListResponse    pb.MemberListResponse
+	MemberAddResponse     pb.MemberAddResponse
+	MemberRemoveResponse  pb.MemberRemoveResponse
+	MemberUpdateResponse  pb.MemberUpdateResponse
+	MemberPromoteResponse pb.MemberPromoteResponse
+)
+
+type Cluster interface {
+	// MemberList lists the current cluster membership.
+	MemberList(ctx context.Context) (*MemberListResponse, error)
+
+	// MemberAdd adds a new member into the cluster.
+	MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+	// MemberAddAsLearner adds a new learner member into the cluster.
+	MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
+
+	// MemberUpdate updates the peer addresses of the member.
+	MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
+
+	// MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+	MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
+}
+
+type cluster struct {
+	remote   pb.ClusterClient
+	callOpts []grpc.CallOption
+}
+
+func NewCluster(c *Client) Cluster {
+	api := &cluster{remote: RetryClusterClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
+	api := &cluster{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+	return c.memberAdd(ctx, peerAddrs, false)
+}
+
+func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+	return c.memberAdd(ctx, peerAddrs, true)
+}
+
+func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
+	// fail-fast before panic in rafthttp
+	if _, err := types.NewURLs(peerAddrs); err != nil {
+		return nil, err
+	}
+
+	r := &pb.MemberAddRequest{
+		PeerURLs:  peerAddrs,
+		IsLearner: isLearner,
+	}
+	resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*MemberAddResponse)(resp), nil
+}
+
+func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
+	r := &pb.MemberRemoveRequest{ID: id}
+	resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*MemberRemoveResponse)(resp), nil
+}
+
+func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
+	// fail-fast before panic in rafthttp
+	if _, err := types.NewURLs(peerAddrs); err != nil {
+		return nil, err
+	}
+
+	// it is safe to retry on update.
+	r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
+	resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
+	if err == nil {
+		return (*MemberUpdateResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
+	// it is safe to retry on list.
+	resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
+	if err == nil {
+		return (*MemberListResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
+	r := &pb.MemberPromoteRequest{ID: id}
+	resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*MemberPromoteResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go
new file mode 100644
index 0000000..5779713
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/compact_op.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+// CompactOp represents a compact operation.
+type CompactOp struct {
+	revision int64
+	physical bool
+}
+
+// CompactOption configures compact operation.
+type CompactOption func(*CompactOp)
+
+func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// OpCompact wraps slice CompactOption to create a CompactOp.
+func OpCompact(rev int64, opts ...CompactOption) CompactOp {
+	ret := CompactOp{revision: rev}
+	ret.applyCompactOpts(opts)
+	return ret
+}
+
+func (op CompactOp) toRequest() *pb.CompactionRequest {
+	return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
+}
+
+// WithCompactPhysical makes Compact wait until all compacted entries are
+// removed from the etcd server's storage.
+func WithCompactPhysical() CompactOption {
+	return func(op *CompactOp) { op.physical = true }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go
new file mode 100644
index 0000000..01ed68e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/compare.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+type CompareTarget int
+type CompareResult int
+
+const (
+	CompareVersion CompareTarget = iota
+	CompareCreated
+	CompareModified
+	CompareValue
+)
+
+type Cmp pb.Compare
+
+func Compare(cmp Cmp, result string, v interface{}) Cmp {
+	var r pb.Compare_CompareResult
+
+	switch result {
+	case "=":
+		r = pb.Compare_EQUAL
+	case "!=":
+		r = pb.Compare_NOT_EQUAL
+	case ">":
+		r = pb.Compare_GREATER
+	case "<":
+		r = pb.Compare_LESS
+	default:
+		panic("Unknown result op")
+	}
+
+	cmp.Result = r
+	switch cmp.Target {
+	case pb.Compare_VALUE:
+		val, ok := v.(string)
+		if !ok {
+			panic("bad compare value")
+		}
+		cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
+	case pb.Compare_VERSION:
+		cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
+	case pb.Compare_CREATE:
+		cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
+	case pb.Compare_MOD:
+		cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
+	case pb.Compare_LEASE:
+		cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
+	default:
+		panic("Unknown compare type")
+	}
+	return cmp
+}
+
+func Value(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
+}
+
+func Version(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
+}
+
+func CreateRevision(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
+}
+
+func ModRevision(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
+}
+
+// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
+// LeaseID is 0, otherwise known as `NoLease`.
+func LeaseValue(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
+}
+
+// KeyBytes returns the byte slice holding with the comparison key.
+func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
+
+// WithKeyBytes sets the byte slice for the comparison key.
+func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
+
+// ValueBytes returns the byte slice holding the comparison value, if any.
+func (cmp *Cmp) ValueBytes() []byte {
+	if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
+		return tu.Value
+	}
+	return nil
+}
+
+// WithValueBytes sets the byte slice for the comparison's value.
+func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
+
+// WithRange sets the comparison to scan the range [key, end).
+func (cmp Cmp) WithRange(end string) Cmp {
+	cmp.RangeEnd = []byte(end)
+	return cmp
+}
+
+// WithPrefix sets the comparison to scan all keys prefixed by the key.
+func (cmp Cmp) WithPrefix() Cmp {
+	cmp.RangeEnd = getPrefix(cmp.Key)
+	return cmp
+}
+
+// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
+func mustInt64(val interface{}) int64 {
+	if v, ok := val.(int64); ok {
+		return v
+	}
+	if v, ok := val.(int); ok {
+		return int64(v)
+	}
+	panic("bad value")
+}
+
+// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
+// int64 otherwise.
+func mustInt64orLeaseID(val interface{}) int64 {
+	if v, ok := val.(LeaseID); ok {
+		return int64(v)
+	}
+	return mustInt64(val)
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
new file mode 100644
index 0000000..dcdbf51
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package concurrency implements concurrency operations on top of
+// etcd such as distributed locks, barriers, and elections.
+package concurrency
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
new file mode 100644
index 0000000..2521db6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
@@ -0,0 +1,254 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	v3 "go.etcd.io/etcd/clientv3"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+	"go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+var (
+	ErrElectionNotLeader = errors.New("election: not leader")
+	ErrElectionNoLeader  = errors.New("election: no leader")
+)
+
+type Election struct {
+	session *Session
+
+	keyPrefix string
+
+	leaderKey     string
+	leaderRev     int64
+	leaderSession *Session
+	hdr           *pb.ResponseHeader
+}
+
+// NewElection returns a new election on a given key prefix.
+func NewElection(s *Session, pfx string) *Election {
+	return &Election{session: s, keyPrefix: pfx + "/"}
+}
+
+// ResumeElection initializes an election with a known leader.
+func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
+	return &Election{
+		keyPrefix:     pfx,
+		session:       s,
+		leaderKey:     leaderKey,
+		leaderRev:     leaderRev,
+		leaderSession: s,
+	}
+}
+
+// Campaign puts a value as eligible for the election on the prefix
+// key.
+// Multiple sessions can participate in the election for the
+// same prefix, but only one can be the leader at a time.
+//
+// If the context is 'context.TODO()/context.Background()', the Campaign
+// will continue to be blocked for other keys to be deleted, unless server
+// returns a non-recoverable error (e.g. ErrCompacted).
+// Otherwise, until the context is not cancelled or timed-out, Campaign will
+// continue to be blocked until it becomes the leader.
+func (e *Election) Campaign(ctx context.Context, val string) error {
+	s := e.session
+	client := e.session.Client()
+
+	k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
+	txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
+	txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
+	txn = txn.Else(v3.OpGet(k))
+	resp, err := txn.Commit()
+	if err != nil {
+		return err
+	}
+	e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
+	if !resp.Succeeded {
+		kv := resp.Responses[0].GetResponseRange().Kvs[0]
+		e.leaderRev = kv.CreateRevision
+		if string(kv.Value) != val {
+			if err = e.Proclaim(ctx, val); err != nil {
+				e.Resign(ctx)
+				return err
+			}
+		}
+	}
+
+	_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+	if err != nil {
+		// clean up in case of context cancel
+		select {
+		case <-ctx.Done():
+			e.Resign(client.Ctx())
+		default:
+			e.leaderSession = nil
+		}
+		return err
+	}
+	e.hdr = resp.Header
+
+	return nil
+}
+
+// Proclaim lets the leader announce a new value without another election.
+func (e *Election) Proclaim(ctx context.Context, val string) error {
+	if e.leaderSession == nil {
+		return ErrElectionNotLeader
+	}
+	client := e.session.Client()
+	cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+	txn := client.Txn(ctx).If(cmp)
+	txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
+	tresp, terr := txn.Commit()
+	if terr != nil {
+		return terr
+	}
+	if !tresp.Succeeded {
+		e.leaderKey = ""
+		return ErrElectionNotLeader
+	}
+
+	e.hdr = tresp.Header
+	return nil
+}
+
+// Resign lets a leader start a new election.
+func (e *Election) Resign(ctx context.Context) (err error) {
+	if e.leaderSession == nil {
+		return nil
+	}
+	client := e.session.Client()
+	cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+	resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
+	if err == nil {
+		e.hdr = resp.Header
+	}
+	e.leaderKey = ""
+	e.leaderSession = nil
+	return err
+}
+
+// Leader returns the leader value for the current election.
+func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
+	client := e.session.Client()
+	resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+	if err != nil {
+		return nil, err
+	} else if len(resp.Kvs) == 0 {
+		// no leader currently elected
+		return nil, ErrElectionNoLeader
+	}
+	return resp, nil
+}
+
+// Observe returns a channel that reliably observes ordered leader proposals
+// as GetResponse values on every current elected leader key. It will not
+// necessarily fetch all historical leader updates, but will always post the
+// most recent leader value.
+//
+// The channel closes when the context is canceled or the underlying watcher
+// is otherwise disrupted.
+func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
+	retc := make(chan v3.GetResponse)
+	go e.observe(ctx, retc)
+	return retc
+}
+
+func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
+	client := e.session.Client()
+
+	defer close(ch)
+	for {
+		resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+		if err != nil {
+			return
+		}
+
+		var kv *mvccpb.KeyValue
+		var hdr *pb.ResponseHeader
+
+		if len(resp.Kvs) == 0 {
+			cctx, cancel := context.WithCancel(ctx)
+			// wait for first key put on prefix
+			opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
+			wch := client.Watch(cctx, e.keyPrefix, opts...)
+			for kv == nil {
+				wr, ok := <-wch
+				if !ok || wr.Err() != nil {
+					cancel()
+					return
+				}
+				// only accept puts; a delete will make observe() spin
+				for _, ev := range wr.Events {
+					if ev.Type == mvccpb.PUT {
+						hdr, kv = &wr.Header, ev.Kv
+						// may have multiple revs; hdr.rev = the last rev
+						// set to kv's rev in case batch has multiple Puts
+						hdr.Revision = kv.ModRevision
+						break
+					}
+				}
+			}
+			cancel()
+		} else {
+			hdr, kv = resp.Header, resp.Kvs[0]
+		}
+
+		select {
+		case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
+		case <-ctx.Done():
+			return
+		}
+
+		cctx, cancel := context.WithCancel(ctx)
+		wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
+		keyDeleted := false
+		for !keyDeleted {
+			wr, ok := <-wch
+			if !ok {
+				cancel()
+				return
+			}
+			for _, ev := range wr.Events {
+				if ev.Type == mvccpb.DELETE {
+					keyDeleted = true
+					break
+				}
+				resp.Header = &wr.Header
+				resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
+				select {
+				case ch <- *resp:
+				case <-cctx.Done():
+					cancel()
+					return
+				}
+			}
+		}
+		cancel()
+	}
+}
+
+// Key returns the leader key if elected, empty string otherwise.
+func (e *Election) Key() string { return e.leaderKey }
+
+// Rev returns the leader key's creation revision, if elected.
+func (e *Election) Rev() int64 { return e.leaderRev }
+
+// Header is the response header from the last successful election proposal.
+func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
new file mode 100644
index 0000000..e4cf775
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"fmt"
+
+	v3 "go.etcd.io/etcd/clientv3"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+	"go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
+	cctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	var wr v3.WatchResponse
+	wch := client.Watch(cctx, key, v3.WithRev(rev))
+	for wr = range wch {
+		for _, ev := range wr.Events {
+			if ev.Type == mvccpb.DELETE {
+				return nil
+			}
+		}
+	}
+	if err := wr.Err(); err != nil {
+		return err
+	}
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+	return fmt.Errorf("lost watcher waiting for delete")
+}
+
+// waitDeletes efficiently waits until all keys matching the prefix and no greater
+// than the create revision.
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
+	getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
+	for {
+		resp, err := client.Get(ctx, pfx, getOpts...)
+		if err != nil {
+			return nil, err
+		}
+		if len(resp.Kvs) == 0 {
+			return resp.Header, nil
+		}
+		lastKey := string(resp.Kvs[0].Key)
+		if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
+			return nil, err
+		}
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
new file mode 100644
index 0000000..306470b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
@@ -0,0 +1,153 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+
+	v3 "go.etcd.io/etcd/clientv3"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+// ErrLocked is returned by TryLock when Mutex is already locked by another session.
+var ErrLocked = errors.New("mutex: Locked by another session")
+
+// Mutex implements the sync Locker interface with etcd
+type Mutex struct {
+	s *Session
+
+	pfx   string
+	myKey string
+	myRev int64
+	hdr   *pb.ResponseHeader
+}
+
+func NewMutex(s *Session, pfx string) *Mutex {
+	return &Mutex{s, pfx + "/", "", -1, nil}
+}
+
+// TryLock locks the mutex if not already locked by another session.
+// If lock is held by another session, return immediately after attempting necessary cleanup
+// The ctx argument is used for the sending/receiving Txn RPC.
+func (m *Mutex) TryLock(ctx context.Context) error {
+	resp, err := m.tryAcquire(ctx)
+	if err != nil {
+		return err
+	}
+	// if no key on prefix / the minimum rev is key, already hold the lock
+	ownerKey := resp.Responses[1].GetResponseRange().Kvs
+	if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+		m.hdr = resp.Header
+		return nil
+	}
+	client := m.s.Client()
+	// Cannot lock, so delete the key
+	if _, err := client.Delete(ctx, m.myKey); err != nil {
+		return err
+	}
+	m.myKey = "\x00"
+	m.myRev = -1
+	return ErrLocked
+}
+
+// Lock locks the mutex with a cancelable context. If the context is canceled
+// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
+func (m *Mutex) Lock(ctx context.Context) error {
+	resp, err := m.tryAcquire(ctx)
+	if err != nil {
+		return err
+	}
+	// if no key on prefix / the minimum rev is key, already hold the lock
+	ownerKey := resp.Responses[1].GetResponseRange().Kvs
+	if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+		m.hdr = resp.Header
+		return nil
+	}
+	client := m.s.Client()
+	// wait for deletion revisions prior to myKey
+	hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+	// release lock key if wait failed
+	if werr != nil {
+		m.Unlock(client.Ctx())
+	} else {
+		m.hdr = hdr
+	}
+	return werr
+}
+
+func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
+	s := m.s
+	client := m.s.Client()
+
+	m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
+	cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
+	// put self in lock waiters via myKey; oldest waiter holds lock
+	put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
+	// reuse key in case this session already holds the lock
+	get := v3.OpGet(m.myKey)
+	// fetch current holder to complete uncontended path with only one RPC
+	getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
+	resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
+	if err != nil {
+		return nil, err
+	}
+	m.myRev = resp.Header.Revision
+	if !resp.Succeeded {
+		m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
+	}
+	return resp, nil
+}
+
+func (m *Mutex) Unlock(ctx context.Context) error {
+	client := m.s.Client()
+	if _, err := client.Delete(ctx, m.myKey); err != nil {
+		return err
+	}
+	m.myKey = "\x00"
+	m.myRev = -1
+	return nil
+}
+
+func (m *Mutex) IsOwner() v3.Cmp {
+	return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
+}
+
+func (m *Mutex) Key() string { return m.myKey }
+
+// Header is the response header received from etcd on acquiring the lock.
+func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
+
+type lockerMutex struct{ *Mutex }
+
+func (lm *lockerMutex) Lock() {
+	client := lm.s.Client()
+	if err := lm.Mutex.Lock(client.Ctx()); err != nil {
+		panic(err)
+	}
+}
+func (lm *lockerMutex) Unlock() {
+	client := lm.s.Client()
+	if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
+		panic(err)
+	}
+}
+
+// NewLocker creates a sync.Locker backed by an etcd mutex.
+func NewLocker(s *Session, pfx string) sync.Locker {
+	return &lockerMutex{NewMutex(s, pfx)}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
new file mode 100644
index 0000000..97eb763
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"time"
+
+	v3 "go.etcd.io/etcd/clientv3"
+)
+
+const defaultSessionTTL = 60
+
+// Session represents a lease kept alive for the lifetime of a client.
+// Fault-tolerant applications may use sessions to reason about liveness.
+type Session struct {
+	client *v3.Client
+	opts   *sessionOptions
+	id     v3.LeaseID
+
+	cancel context.CancelFunc
+	donec  <-chan struct{}
+}
+
+// NewSession gets the leased session for a client.
+func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
+	ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
+	for _, opt := range opts {
+		opt(ops)
+	}
+
+	id := ops.leaseID
+	if id == v3.NoLease {
+		resp, err := client.Grant(ops.ctx, int64(ops.ttl))
+		if err != nil {
+			return nil, err
+		}
+		id = resp.ID
+	}
+
+	ctx, cancel := context.WithCancel(ops.ctx)
+	keepAlive, err := client.KeepAlive(ctx, id)
+	if err != nil || keepAlive == nil {
+		cancel()
+		return nil, err
+	}
+
+	donec := make(chan struct{})
+	s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
+
+	// keep the lease alive until client error or cancelled context
+	go func() {
+		defer close(donec)
+		for range keepAlive {
+			// eat messages until keep alive channel closes
+		}
+	}()
+
+	return s, nil
+}
+
+// Client is the etcd client that is attached to the session.
+func (s *Session) Client() *v3.Client {
+	return s.client
+}
+
+// Lease is the lease ID for keys bound to the session.
+func (s *Session) Lease() v3.LeaseID { return s.id }
+
+// Done returns a channel that closes when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Done() <-chan struct{} { return s.donec }
+
+// Orphan ends the refresh for the session lease. This is useful
+// in case the state of the client connection is indeterminate (revoke
+// would fail) or when transferring lease ownership.
+func (s *Session) Orphan() {
+	s.cancel()
+	<-s.donec
+}
+
+// Close orphans the session and revokes the session lease.
+func (s *Session) Close() error {
+	s.Orphan()
+	// if revoke takes longer than the ttl, lease is expired anyway
+	ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
+	_, err := s.client.Revoke(ctx, s.id)
+	cancel()
+	return err
+}
+
+type sessionOptions struct {
+	ttl     int
+	leaseID v3.LeaseID
+	ctx     context.Context
+}
+
+// SessionOption configures Session.
+type SessionOption func(*sessionOptions)
+
+// WithTTL configures the session's TTL in seconds.
+// If TTL is <= 0, the default 60 seconds TTL will be used.
+func WithTTL(ttl int) SessionOption {
+	return func(so *sessionOptions) {
+		if ttl > 0 {
+			so.ttl = ttl
+		}
+	}
+}
+
+// WithLease specifies the existing leaseID to be used for the session.
+// This is useful in process restart scenario, for example, to reclaim
+// leadership from an election prior to restart.
+func WithLease(leaseID v3.LeaseID) SessionOption {
+	return func(so *sessionOptions) {
+		so.leaseID = leaseID
+	}
+}
+
+// WithContext assigns a context to the session instead of defaulting to
+// using the client context. This is useful for canceling NewSession and
+// Close operations immediately without having to close the client. If the
+// context is canceled before Close() completes, the session's lease will be
+// abandoned and left to expire instead of being revoked.
+func WithContext(ctx context.Context) SessionOption {
+	return func(so *sessionOptions) {
+		so.ctx = ctx
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
new file mode 100644
index 0000000..ee11510
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
@@ -0,0 +1,387 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+	"context"
+	"math"
+
+	v3 "go.etcd.io/etcd/clientv3"
+)
+
+// STM is an interface for software transactional memory.
+type STM interface {
+	// Get returns the value for a key and inserts the key in the txn's read set.
+	// If Get fails, it aborts the transaction with an error, never returning.
+	Get(key ...string) string
+	// Put adds a value for a key to the write set.
+	Put(key, val string, opts ...v3.OpOption)
+	// Rev returns the revision of a key in the read set.
+	Rev(key string) int64
+	// Del deletes a key.
+	Del(key string)
+
+	// commit attempts to apply the txn's changes to the server.
+	commit() *v3.TxnResponse
+	reset()
+}
+
+// Isolation is an enumeration of transactional isolation levels which
+// describes how transactions should interfere and conflict.
+type Isolation int
+
+const (
+	// SerializableSnapshot provides serializable isolation and also checks
+	// for write conflicts.
+	SerializableSnapshot Isolation = iota
+	// Serializable reads within the same transaction attempt return data
+	// from the at the revision of the first read.
+	Serializable
+	// RepeatableReads reads within the same transaction attempt always
+	// return the same data.
+	RepeatableReads
+	// ReadCommitted reads keys from any committed revision.
+	ReadCommitted
+)
+
+// stmError safely passes STM errors through panic to the STM error channel.
+type stmError struct{ err error }
+
+type stmOptions struct {
+	iso      Isolation
+	ctx      context.Context
+	prefetch []string
+}
+
+type stmOption func(*stmOptions)
+
+// WithIsolation specifies the transaction isolation level.
+func WithIsolation(lvl Isolation) stmOption {
+	return func(so *stmOptions) { so.iso = lvl }
+}
+
+// WithAbortContext specifies the context for permanently aborting the transaction.
+func WithAbortContext(ctx context.Context) stmOption {
+	return func(so *stmOptions) { so.ctx = ctx }
+}
+
+// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
+// If an STM transaction will unconditionally fetch a set of keys, prefetching
+// those keys will save the round-trip cost from requesting each key one by one
+// with Get().
+func WithPrefetch(keys ...string) stmOption {
+	return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
+}
+
+// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
+func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
+	opts := &stmOptions{ctx: c.Ctx()}
+	for _, f := range so {
+		f(opts)
+	}
+	if len(opts.prefetch) != 0 {
+		f := apply
+		apply = func(s STM) error {
+			s.Get(opts.prefetch...)
+			return f(s)
+		}
+	}
+	return runSTM(mkSTM(c, opts), apply)
+}
+
+func mkSTM(c *v3.Client, opts *stmOptions) STM {
+	switch opts.iso {
+	case SerializableSnapshot:
+		s := &stmSerializable{
+			stm:      stm{client: c, ctx: opts.ctx},
+			prefetch: make(map[string]*v3.GetResponse),
+		}
+		s.conflicts = func() []v3.Cmp {
+			return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
+		}
+		return s
+	case Serializable:
+		s := &stmSerializable{
+			stm:      stm{client: c, ctx: opts.ctx},
+			prefetch: make(map[string]*v3.GetResponse),
+		}
+		s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+		return s
+	case RepeatableReads:
+		s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+		s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+		return s
+	case ReadCommitted:
+		s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+		s.conflicts = func() []v3.Cmp { return nil }
+		return s
+	default:
+		panic("unsupported stm")
+	}
+}
+
+type stmResponse struct {
+	resp *v3.TxnResponse
+	err  error
+}
+
+func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
+	outc := make(chan stmResponse, 1)
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				e, ok := r.(stmError)
+				if !ok {
+					// client apply panicked
+					panic(r)
+				}
+				outc <- stmResponse{nil, e.err}
+			}
+		}()
+		var out stmResponse
+		for {
+			s.reset()
+			if out.err = apply(s); out.err != nil {
+				break
+			}
+			if out.resp = s.commit(); out.resp != nil {
+				break
+			}
+		}
+		outc <- out
+	}()
+	r := <-outc
+	return r.resp, r.err
+}
+
+// stm implements repeatable-read software transactional memory over etcd
+type stm struct {
+	client *v3.Client
+	ctx    context.Context
+	// rset holds read key values and revisions
+	rset readSet
+	// wset holds overwritten keys and their values
+	wset writeSet
+	// getOpts are the opts used for gets
+	getOpts []v3.OpOption
+	// conflicts computes the current conflicts on the txn
+	conflicts func() []v3.Cmp
+}
+
+type stmPut struct {
+	val string
+	op  v3.Op
+}
+
+type readSet map[string]*v3.GetResponse
+
+func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
+	for i, resp := range txnresp.Responses {
+		rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
+	}
+}
+
+// first returns the store revision from the first fetch
+func (rs readSet) first() int64 {
+	ret := int64(math.MaxInt64 - 1)
+	for _, resp := range rs {
+		if rev := resp.Header.Revision; rev < ret {
+			ret = rev
+		}
+	}
+	return ret
+}
+
+// cmps guards the txn from updates to read set
+func (rs readSet) cmps() []v3.Cmp {
+	cmps := make([]v3.Cmp, 0, len(rs))
+	for k, rk := range rs {
+		cmps = append(cmps, isKeyCurrent(k, rk))
+	}
+	return cmps
+}
+
+type writeSet map[string]stmPut
+
+func (ws writeSet) get(keys ...string) *stmPut {
+	for _, key := range keys {
+		if wv, ok := ws[key]; ok {
+			return &wv
+		}
+	}
+	return nil
+}
+
+// cmps returns a cmp list testing no writes have happened past rev
+func (ws writeSet) cmps(rev int64) []v3.Cmp {
+	cmps := make([]v3.Cmp, 0, len(ws))
+	for key := range ws {
+		cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
+	}
+	return cmps
+}
+
+// puts is the list of ops for all pending writes
+func (ws writeSet) puts() []v3.Op {
+	puts := make([]v3.Op, 0, len(ws))
+	for _, v := range ws {
+		puts = append(puts, v.op)
+	}
+	return puts
+}
+
+func (s *stm) Get(keys ...string) string {
+	if wv := s.wset.get(keys...); wv != nil {
+		return wv.val
+	}
+	return respToValue(s.fetch(keys...))
+}
+
+func (s *stm) Put(key, val string, opts ...v3.OpOption) {
+	s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
+}
+
+func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
+
+func (s *stm) Rev(key string) int64 {
+	if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
+		return resp.Kvs[0].ModRevision
+	}
+	return 0
+}
+
+func (s *stm) commit() *v3.TxnResponse {
+	txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
+	if err != nil {
+		panic(stmError{err})
+	}
+	if txnresp.Succeeded {
+		return txnresp
+	}
+	return nil
+}
+
+func (s *stm) fetch(keys ...string) *v3.GetResponse {
+	if len(keys) == 0 {
+		return nil
+	}
+	ops := make([]v3.Op, len(keys))
+	for i, key := range keys {
+		if resp, ok := s.rset[key]; ok {
+			return resp
+		}
+		ops[i] = v3.OpGet(key, s.getOpts...)
+	}
+	txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
+	if err != nil {
+		panic(stmError{err})
+	}
+	s.rset.add(keys, txnresp)
+	return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
+}
+
+func (s *stm) reset() {
+	s.rset = make(map[string]*v3.GetResponse)
+	s.wset = make(map[string]stmPut)
+}
+
+type stmSerializable struct {
+	stm
+	prefetch map[string]*v3.GetResponse
+}
+
+func (s *stmSerializable) Get(keys ...string) string {
+	if wv := s.wset.get(keys...); wv != nil {
+		return wv.val
+	}
+	firstRead := len(s.rset) == 0
+	for _, key := range keys {
+		if resp, ok := s.prefetch[key]; ok {
+			delete(s.prefetch, key)
+			s.rset[key] = resp
+		}
+	}
+	resp := s.stm.fetch(keys...)
+	if firstRead {
+		// txn's base revision is defined by the first read
+		s.getOpts = []v3.OpOption{
+			v3.WithRev(resp.Header.Revision),
+			v3.WithSerializable(),
+		}
+	}
+	return respToValue(resp)
+}
+
+func (s *stmSerializable) Rev(key string) int64 {
+	s.Get(key)
+	return s.stm.Rev(key)
+}
+
+func (s *stmSerializable) gets() ([]string, []v3.Op) {
+	keys := make([]string, 0, len(s.rset))
+	ops := make([]v3.Op, 0, len(s.rset))
+	for k := range s.rset {
+		keys = append(keys, k)
+		ops = append(ops, v3.OpGet(k))
+	}
+	return keys, ops
+}
+
+func (s *stmSerializable) commit() *v3.TxnResponse {
+	keys, getops := s.gets()
+	txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
+	// use Else to prefetch keys in case of conflict to save a round trip
+	txnresp, err := txn.Else(getops...).Commit()
+	if err != nil {
+		panic(stmError{err})
+	}
+	if txnresp.Succeeded {
+		return txnresp
+	}
+	// load prefetch with Else data
+	s.rset.add(keys, txnresp)
+	s.prefetch = s.rset
+	s.getOpts = nil
+	return nil
+}
+
+func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
+	if len(r.Kvs) != 0 {
+		return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
+	}
+	return v3.Compare(v3.ModRevision(k), "=", 0)
+}
+
+func respToValue(resp *v3.GetResponse) string {
+	if resp == nil || len(resp.Kvs) == 0 {
+		return ""
+	}
+	return string(resp.Kvs[0].Value)
+}
+
+// NewSTMRepeatable is deprecated.
+func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
+}
+
+// NewSTMSerializable is deprecated.
+func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
+}
+
+// NewSTMReadCommitted is deprecated.
+func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go
new file mode 100644
index 0000000..11d447d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/config.go
@@ -0,0 +1,88 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"crypto/tls"
+	"time"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+)
+
+type Config struct {
+	// Endpoints is a list of URLs.
+	Endpoints []string `json:"endpoints"`
+
+	// AutoSyncInterval is the interval to update endpoints with its latest members.
+	// 0 disables auto-sync. By default auto-sync is disabled.
+	AutoSyncInterval time.Duration `json:"auto-sync-interval"`
+
+	// DialTimeout is the timeout for failing to establish a connection.
+	DialTimeout time.Duration `json:"dial-timeout"`
+
+	// DialKeepAliveTime is the time after which client pings the server to see if
+	// transport is alive.
+	DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
+
+	// DialKeepAliveTimeout is the time that the client waits for a response for the
+	// keep-alive probe. If the response is not received in this time, the connection is closed.
+	DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
+
+	// MaxCallSendMsgSize is the client-side request send limit in bytes.
+	// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
+	// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
+	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+	MaxCallSendMsgSize int
+
+	// MaxCallRecvMsgSize is the client-side response receive limit.
+	// If 0, it defaults to "math.MaxInt32", because range response can
+	// easily exceed request send limits.
+	// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
+	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+	MaxCallRecvMsgSize int
+
+	// TLS holds the client secure credentials, if any.
+	TLS *tls.Config
+
+	// Username is a user name for authentication.
+	Username string `json:"username"`
+
+	// Password is a password for authentication.
+	Password string `json:"password"`
+
+	// RejectOldCluster when set will refuse to create a client against an outdated cluster.
+	RejectOldCluster bool `json:"reject-old-cluster"`
+
+	// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
+	// For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
+	// Without this, Dial returns immediately and connecting the server happens in background.
+	DialOptions []grpc.DialOption
+
+	// Context is the default client context; it can be used to cancel grpc dial out and
+	// other operations that do not have an explicit context.
+	Context context.Context
+
+	// LogConfig configures client-side logger.
+	// If nil, use the default logger.
+	// TODO: configure gRPC logger
+	LogConfig *zap.Config
+
+	// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
+	PermitWithoutStream bool `json:"permit-without-stream"`
+
+	// TODO: support custom balancer picker
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go
new file mode 100644
index 0000000..e6fd75c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials implements gRPC credential interface with etcd specific logic.
+// e.g., client handshake with custom authority parameter
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"sync"
+
+	"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	grpccredentials "google.golang.org/grpc/credentials"
+)
+
+// Config defines gRPC credential configuration.
+type Config struct {
+	TLSConfig *tls.Config
+}
+
+// Bundle defines gRPC credential interface.
+type Bundle interface {
+	grpccredentials.Bundle
+	UpdateAuthToken(token string)
+}
+
+// NewBundle constructs a new gRPC credential bundle.
+func NewBundle(cfg Config) Bundle {
+	return &bundle{
+		tc: newTransportCredential(cfg.TLSConfig),
+		rc: newPerRPCCredential(),
+	}
+}
+
+// bundle implements "grpccredentials.Bundle" interface.
+type bundle struct {
+	tc *transportCredential
+	rc *perRPCCredential
+}
+
+func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
+	return b.tc
+}
+
+func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+	return b.rc
+}
+
+func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
+	// no-op
+	return nil, nil
+}
+
+// transportCredential implements "grpccredentials.TransportCredentials" interface.
+type transportCredential struct {
+	gtc grpccredentials.TransportCredentials
+}
+
+func newTransportCredential(cfg *tls.Config) *transportCredential {
+	return &transportCredential{
+		gtc: grpccredentials.NewTLS(cfg),
+	}
+}
+
+func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+	// Only overwrite when authority is an IP address!
+	// Let's say, a server runs SRV records on "etcd.local" that resolves
+	// to "m1.etcd.local", and its SAN field also includes "m1.etcd.local".
+	// But what if SAN does not include its resolved IP address (e.g. 127.0.0.1)?
+	// Then, the server should only authenticate using its DNS hostname "m1.etcd.local",
+	// instead of overwriting it with its IP address.
+	// And we do not overwrite "localhost" either. Only overwrite IP addresses!
+	if isIP(authority) {
+		target := rawConn.RemoteAddr().String()
+		if authority != target {
+			// When user dials with "grpc.WithDialer", "grpc.DialContext" "cc.parsedTarget"
+			// update only happens once. This is problematic, because when TLS is enabled,
+			// retries happen through "grpc.WithDialer" with static "cc.parsedTarget" from
+			// the initial dial call.
+			// If the server authenticates by IP addresses, we want to set a new endpoint as
+			// a new authority. Otherwise
+			// "transport: authentication handshake failed: x509: certificate is valid for 127.0.0.1, 192.168.121.180, not 192.168.223.156"
+			// when the new dial target is "192.168.121.180" whose certificate host name is also "192.168.121.180"
+			// but client tries to authenticate with previously set "cc.parsedTarget" field "192.168.223.156"
+			authority = target
+		}
+	}
+	return tc.gtc.ClientHandshake(ctx, authority, rawConn)
+}
+
+// return true if given string is an IP.
+func isIP(ep string) bool {
+	return net.ParseIP(ep) != nil
+}
+
+func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+	return tc.gtc.ServerHandshake(rawConn)
+}
+
+func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
+	return tc.gtc.Info()
+}
+
+func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
+	return &transportCredential{
+		gtc: tc.gtc.Clone(),
+	}
+}
+
+func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
+	return tc.gtc.OverrideServerName(serverNameOverride)
+}
+
+// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
+type perRPCCredential struct {
+	authToken   string
+	authTokenMu sync.RWMutex
+}
+
+func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
+
+func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
+
+func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+	rc.authTokenMu.RLock()
+	authToken := rc.authToken
+	rc.authTokenMu.RUnlock()
+	return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
+}
+
+func (b *bundle) UpdateAuthToken(token string) {
+	if b.rc == nil {
+		return
+	}
+	b.rc.UpdateAuthToken(token)
+}
+
+func (rc *perRPCCredential) UpdateAuthToken(token string) {
+	rc.authTokenMu.Lock()
+	rc.authToken = token
+	rc.authTokenMu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go
new file mode 100644
index 0000000..913cd28
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/doc.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package clientv3 implements the official Go etcd client for v3.
+//
+// Create client using `clientv3.New`:
+//
+//	// expect dial time-out on ipv4 blackhole
+//	_, err := clientv3.New(clientv3.Config{
+//		Endpoints:   []string{"http://254.0.0.1:12345"},
+//		DialTimeout: 2 * time.Second,
+//	})
+//
+//	// etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
+//	if err == context.DeadlineExceeded {
+//		// handle errors
+//	}
+//
+//	// etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
+//	if err == grpc.ErrClientConnTimeout {
+//		// handle errors
+//	}
+//
+//	cli, err := clientv3.New(clientv3.Config{
+//		Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+//		DialTimeout: 5 * time.Second,
+//	})
+//	if err != nil {
+//		// handle error!
+//	}
+//	defer cli.Close()
+//
+// Make sure to close the client after using it. If the client is not closed, the
+// connection will have leaky goroutines.
+//
+// To specify a client request timeout, wrap the context with context.WithTimeout:
+//
+//	ctx, cancel := context.WithTimeout(context.Background(), timeout)
+//	resp, err := kvc.Put(ctx, "sample_key", "sample_value")
+//	cancel()
+//	if err != nil {
+//	    // handle error!
+//	}
+//	// use the response
+//
+// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
+// Clients are safe for concurrent use by multiple goroutines.
+//
+// etcd client returns 3 types of errors:
+//
+//  1. context error: canceled or deadline exceeded.
+//  2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
+//  3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
+//
+// Here is the example code to handle client errors:
+//
+//	resp, err := kvc.Put(ctx, "", "")
+//	if err != nil {
+//		if err == context.Canceled {
+//			// ctx is canceled by another routine
+//		} else if err == context.DeadlineExceeded {
+//			// ctx is attached with a deadline and it exceeded
+//		} else if err == rpctypes.ErrEmptyKey {
+//			// client-side error: key is not provided
+//		} else if ev, ok := status.FromError(err); ok {
+//			code := ev.Code()
+//			if code == codes.DeadlineExceeded {
+//				// server-side context might have timed-out first (due to clock skew)
+//				// while original client-side context is not timed-out yet
+//			}
+//		} else {
+//			// bad cluster endpoints, which are not etcd servers
+//		}
+//	}
+//
+//	go func() { cli.Close() }()
+//	_, err := kvc.Get(ctx, "a")
+//	if err != nil {
+//		// with etcd clientv3 <= v3.3
+//		if err == context.Canceled {
+//			// grpc balancer calls 'Get' with an inflight client.Close
+//		} else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
+//			// grpc balancer calls 'Get' after client.Close.
+//		}
+//		// with etcd clientv3 >= v3.4
+//		if clientv3.IsConnCanceled(err) {
+//			// gRPC client connection is closed
+//		}
+//	}
+//
+// The grpc load balancer is registered statically and is shared across etcd clients.
+// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
+// variable.  E.g. "ETCD_CLIENT_DEBUG=1".
+//
+package clientv3
diff --git a/vendor/go.etcd.io/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go
new file mode 100644
index 0000000..2b7864a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/kv.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	CompactResponse pb.CompactionResponse
+	PutResponse     pb.PutResponse
+	GetResponse     pb.RangeResponse
+	DeleteResponse  pb.DeleteRangeResponse
+	TxnResponse     pb.TxnResponse
+)
+
+type KV interface {
+	// Put puts a key-value pair into etcd.
+	// Note that key,value can be plain bytes array and string is
+	// an immutable representation of that bytes array.
+	// To get a string of bytes, do string([]byte{0x10, 0x20}).
+	Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
+
+	// Get retrieves keys.
+	// By default, Get will return the value for "key", if any.
+	// When passed WithRange(end), Get will return the keys in the range [key, end).
+	// When passed WithFromKey(), Get returns keys greater than or equal to key.
+	// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
+	// if the required revision is compacted, the request will fail with ErrCompacted .
+	// When passed WithLimit(limit), the number of returned keys is bounded by limit.
+	// When passed WithSort(), the keys will be sorted.
+	Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
+
+	// Delete deletes a key, or optionally using WithRange(end), [key, end).
+	Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
+
+	// Compact compacts etcd KV history before the given rev.
+	Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
+
+	// Do applies a single Op on KV without a transaction.
+	// Do is useful when creating arbitrary operations to be issued at a
+	// later time; the user can range over the operations, calling Do to
+	// execute them. Get/Put/Delete, on the other hand, are best suited
+	// for when the operation should be issued at the time of declaration.
+	Do(ctx context.Context, op Op) (OpResponse, error)
+
+	// Txn creates a transaction.
+	Txn(ctx context.Context) Txn
+}
+
+type OpResponse struct {
+	put *PutResponse
+	get *GetResponse
+	del *DeleteResponse
+	txn *TxnResponse
+}
+
+func (op OpResponse) Put() *PutResponse    { return op.put }
+func (op OpResponse) Get() *GetResponse    { return op.get }
+func (op OpResponse) Del() *DeleteResponse { return op.del }
+func (op OpResponse) Txn() *TxnResponse    { return op.txn }
+
+func (resp *PutResponse) OpResponse() OpResponse {
+	return OpResponse{put: resp}
+}
+func (resp *GetResponse) OpResponse() OpResponse {
+	return OpResponse{get: resp}
+}
+func (resp *DeleteResponse) OpResponse() OpResponse {
+	return OpResponse{del: resp}
+}
+func (resp *TxnResponse) OpResponse() OpResponse {
+	return OpResponse{txn: resp}
+}
+
+type kv struct {
+	remote   pb.KVClient
+	callOpts []grpc.CallOption
+}
+
+func NewKV(c *Client) KV {
+	api := &kv{remote: RetryKVClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
+	api := &kv{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
+	r, err := kv.Do(ctx, OpPut(key, val, opts...))
+	return r.put, toErr(ctx, err)
+}
+
+func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
+	r, err := kv.Do(ctx, OpGet(key, opts...))
+	return r.get, toErr(ctx, err)
+}
+
+func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
+	r, err := kv.Do(ctx, OpDelete(key, opts...))
+	return r.del, toErr(ctx, err)
+}
+
+func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
+	resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*CompactResponse)(resp), err
+}
+
+func (kv *kv) Txn(ctx context.Context) Txn {
+	return &txn{
+		kv:       kv,
+		ctx:      ctx,
+		callOpts: kv.callOpts,
+	}
+}
+
+func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
+	var err error
+	switch op.t {
+	case tRange:
+		var resp *pb.RangeResponse
+		resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
+		if err == nil {
+			return OpResponse{get: (*GetResponse)(resp)}, nil
+		}
+	case tPut:
+		var resp *pb.PutResponse
+		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+		resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
+		if err == nil {
+			return OpResponse{put: (*PutResponse)(resp)}, nil
+		}
+	case tDeleteRange:
+		var resp *pb.DeleteRangeResponse
+		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+		resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
+		if err == nil {
+			return OpResponse{del: (*DeleteResponse)(resp)}, nil
+		}
+	case tTxn:
+		var resp *pb.TxnResponse
+		resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
+		if err == nil {
+			return OpResponse{txn: (*TxnResponse)(resp)}, nil
+		}
+	default:
+		panic("Unknown op")
+	}
+	return OpResponse{}, toErr(ctx, err)
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go
new file mode 100644
index 0000000..c2796fc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/lease.go
@@ -0,0 +1,596 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+)
+
+type (
+	LeaseRevokeResponse pb.LeaseRevokeResponse
+	LeaseID             int64
+)
+
+// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
+type LeaseGrantResponse struct {
+	*pb.ResponseHeader
+	ID    LeaseID
+	TTL   int64
+	Error string
+}
+
+// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
+type LeaseKeepAliveResponse struct {
+	*pb.ResponseHeader
+	ID  LeaseID
+	TTL int64
+}
+
+// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
+type LeaseTimeToLiveResponse struct {
+	*pb.ResponseHeader
+	ID LeaseID `json:"id"`
+
+	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
+	TTL int64 `json:"ttl"`
+
+	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+	GrantedTTL int64 `json:"granted-ttl"`
+
+	// Keys is the list of keys attached to this lease.
+	Keys [][]byte `json:"keys"`
+}
+
+// LeaseStatus represents a lease status.
+type LeaseStatus struct {
+	ID LeaseID `json:"id"`
+	// TODO: TTL int64
+}
+
+// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
+type LeaseLeasesResponse struct {
+	*pb.ResponseHeader
+	Leases []LeaseStatus `json:"leases"`
+}
+
+const (
+	// defaultTTL is the assumed lease TTL used for the first keepalive
+	// deadline before the actual TTL is known to the client.
+	defaultTTL = 5 * time.Second
+	// NoLease is a lease ID for the absence of a lease.
+	NoLease LeaseID = 0
+
+	// retryConnWait is how long to wait before retrying request due to an error
+	retryConnWait = 500 * time.Millisecond
+)
+
+// LeaseResponseChSize is the size of buffer to store unsent lease responses.
+// WARNING: DO NOT UPDATE.
+// Only for testing purposes.
+var LeaseResponseChSize = 16
+
+// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
+//
+// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
+type ErrKeepAliveHalted struct {
+	Reason error
+}
+
+func (e ErrKeepAliveHalted) Error() string {
+	s := "etcdclient: leases keep alive halted"
+	if e.Reason != nil {
+		s += ": " + e.Reason.Error()
+	}
+	return s
+}
+
+type Lease interface {
+	// Grant creates a new lease.
+	Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
+
+	// Revoke revokes the given lease.
+	Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
+
+	// TimeToLive retrieves the lease information of the given lease ID.
+	TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
+
+	// Leases retrieves all leases.
+	Leases(ctx context.Context) (*LeaseLeasesResponse, error)
+
+	// KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
+	// to the channel are not consumed promptly the channel may become full. When full, the lease
+	// client will continue sending keep alive requests to the etcd server, but will drop responses
+	// until there is capacity on the channel to send more responses.
+	//
+	// If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
+	// canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
+	// containing the error reason.
+	//
+	// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
+	// alive stream is interrupted in some way the client cannot handle itself;
+	// given context "ctx" is canceled or timed out.
+	//
+	// TODO(v4.0): post errors to last keep alive message before closing
+	// (see https://github.com/etcd-io/etcd/pull/7866)
+	KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
+
+	// KeepAliveOnce renews the lease once. The response corresponds to the
+	// first message from calling KeepAlive. If the response has a recoverable
+	// error, KeepAliveOnce will retry the RPC with a new keep alive message.
+	//
+	// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
+	KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
+
+	// Close releases all resources Lease keeps for efficient communication
+	// with the etcd server.
+	Close() error
+}
+
+type lessor struct {
+	mu sync.Mutex // guards all fields
+
+	// donec is closed and loopErr is set when recvKeepAliveLoop stops
+	donec   chan struct{}
+	loopErr error
+
+	remote pb.LeaseClient
+
+	stream       pb.Lease_LeaseKeepAliveClient
+	streamCancel context.CancelFunc
+
+	stopCtx    context.Context
+	stopCancel context.CancelFunc
+
+	keepAlives map[LeaseID]*keepAlive
+
+	// firstKeepAliveTimeout is the timeout for the first keepalive request
+	// before the actual TTL is known to the lease client
+	firstKeepAliveTimeout time.Duration
+
+	// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
+	firstKeepAliveOnce sync.Once
+
+	callOpts []grpc.CallOption
+
+	lg *zap.Logger
+}
+
+// keepAlive multiplexes a keepalive for a lease over multiple channels
+type keepAlive struct {
+	chs  []chan<- *LeaseKeepAliveResponse
+	ctxs []context.Context
+	// deadline is the time the keep alive channels close if no response
+	deadline time.Time
+	// nextKeepAlive is when to send the next keep alive message
+	nextKeepAlive time.Time
+	// donec is closed on lease revoke, expiration, or cancel.
+	donec chan struct{}
+}
+
+func NewLease(c *Client) Lease {
+	return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
+}
+
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
+	l := &lessor{
+		donec:                 make(chan struct{}),
+		keepAlives:            make(map[LeaseID]*keepAlive),
+		remote:                remote,
+		firstKeepAliveTimeout: keepAliveTimeout,
+		lg:                    c.lg,
+	}
+	if l.firstKeepAliveTimeout == time.Second {
+		l.firstKeepAliveTimeout = defaultTTL
+	}
+	if c != nil {
+		l.callOpts = c.callOpts
+	}
+	reqLeaderCtx := WithRequireLeader(context.Background())
+	l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
+	return l
+}
+
+func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
+	r := &pb.LeaseGrantRequest{TTL: ttl}
+	resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
+	if err == nil {
+		gresp := &LeaseGrantResponse{
+			ResponseHeader: resp.GetHeader(),
+			ID:             LeaseID(resp.ID),
+			TTL:            resp.TTL,
+			Error:          resp.Error,
+		}
+		return gresp, nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
+	r := &pb.LeaseRevokeRequest{ID: int64(id)}
+	resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
+	if err == nil {
+		return (*LeaseRevokeResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
+	r := toLeaseTimeToLiveRequest(id, opts...)
+	resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
+	if err == nil {
+		gresp := &LeaseTimeToLiveResponse{
+			ResponseHeader: resp.GetHeader(),
+			ID:             LeaseID(resp.ID),
+			TTL:            resp.TTL,
+			GrantedTTL:     resp.GrantedTTL,
+			Keys:           resp.Keys,
+		}
+		return gresp, nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
+	resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
+	if err == nil {
+		leases := make([]LeaseStatus, len(resp.Leases))
+		for i := range resp.Leases {
+			leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
+		}
+		return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
+	ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
+
+	l.mu.Lock()
+	// ensure that recvKeepAliveLoop is still running
+	select {
+	case <-l.donec:
+		err := l.loopErr
+		l.mu.Unlock()
+		close(ch)
+		return ch, ErrKeepAliveHalted{Reason: err}
+	default:
+	}
+	ka, ok := l.keepAlives[id]
+	if !ok {
+		// create fresh keep alive
+		ka = &keepAlive{
+			chs:           []chan<- *LeaseKeepAliveResponse{ch},
+			ctxs:          []context.Context{ctx},
+			deadline:      time.Now().Add(l.firstKeepAliveTimeout),
+			nextKeepAlive: time.Now(),
+			donec:         make(chan struct{}),
+		}
+		l.keepAlives[id] = ka
+	} else {
+		// add channel and context to existing keep alive
+		ka.ctxs = append(ka.ctxs, ctx)
+		ka.chs = append(ka.chs, ch)
+	}
+	l.mu.Unlock()
+
+	go l.keepAliveCtxCloser(ctx, id, ka.donec)
+	l.firstKeepAliveOnce.Do(func() {
+		go l.recvKeepAliveLoop()
+		go l.deadlineLoop()
+	})
+
+	return ch, nil
+}
+
+func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+	for {
+		resp, err := l.keepAliveOnce(ctx, id)
+		if err == nil {
+			if resp.TTL <= 0 {
+				err = rpctypes.ErrLeaseNotFound
+			}
+			return resp, err
+		}
+		if isHaltErr(ctx, err) {
+			return nil, toErr(ctx, err)
+		}
+	}
+}
+
+func (l *lessor) Close() error {
+	l.stopCancel()
+	// close for synchronous teardown if stream goroutines never launched
+	l.firstKeepAliveOnce.Do(func() { close(l.donec) })
+	<-l.donec
+	return nil
+}
+
+func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
+	select {
+	case <-donec:
+		return
+	case <-l.donec:
+		return
+	case <-ctx.Done():
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	ka, ok := l.keepAlives[id]
+	if !ok {
+		return
+	}
+
+	// close channel and remove context if still associated with keep alive
+	for i, c := range ka.ctxs {
+		if c == ctx {
+			close(ka.chs[i])
+			ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
+			ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
+			break
+		}
+	}
+	// remove if no one more listeners
+	if len(ka.chs) == 0 {
+		delete(l.keepAlives, id)
+	}
+}
+
+// closeRequireLeader scans keepAlives for ctxs that have require leader
+// and closes the associated channels.
+func (l *lessor) closeRequireLeader() {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	for _, ka := range l.keepAlives {
+		reqIdxs := 0
+		// find all required leader channels, close, mark as nil
+		for i, ctx := range ka.ctxs {
+			md, ok := metadata.FromOutgoingContext(ctx)
+			if !ok {
+				continue
+			}
+			ks := md[rpctypes.MetadataRequireLeaderKey]
+			if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
+				continue
+			}
+			close(ka.chs[i])
+			ka.chs[i] = nil
+			reqIdxs++
+		}
+		if reqIdxs == 0 {
+			continue
+		}
+		// remove all channels that required a leader from keepalive
+		newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
+		newCtxs := make([]context.Context, len(newChs))
+		newIdx := 0
+		for i := range ka.chs {
+			if ka.chs[i] == nil {
+				continue
+			}
+			newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
+			newIdx++
+		}
+		ka.chs, ka.ctxs = newChs, newCtxs
+	}
+}
+
+func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+	cctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+
+	err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+
+	resp, rerr := stream.Recv()
+	if rerr != nil {
+		return nil, toErr(ctx, rerr)
+	}
+
+	karesp := &LeaseKeepAliveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+	}
+	return karesp, nil
+}
+
+func (l *lessor) recvKeepAliveLoop() (gerr error) {
+	defer func() {
+		l.mu.Lock()
+		close(l.donec)
+		l.loopErr = gerr
+		for _, ka := range l.keepAlives {
+			ka.close()
+		}
+		l.keepAlives = make(map[LeaseID]*keepAlive)
+		l.mu.Unlock()
+	}()
+
+	for {
+		stream, err := l.resetRecv()
+		if err != nil {
+			if canceledByCaller(l.stopCtx, err) {
+				return err
+			}
+		} else {
+			for {
+				resp, err := stream.Recv()
+				if err != nil {
+					if canceledByCaller(l.stopCtx, err) {
+						return err
+					}
+
+					if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
+						l.closeRequireLeader()
+					}
+					break
+				}
+
+				l.recvKeepAlive(resp)
+			}
+		}
+
+		select {
+		case <-time.After(retryConnWait):
+		case <-l.stopCtx.Done():
+			return l.stopCtx.Err()
+		}
+	}
+}
+
+// resetRecv opens a new lease stream and starts sending keep alive requests.
+func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
+	sctx, cancel := context.WithCancel(l.stopCtx)
+	stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
+	if err != nil {
+		cancel()
+		return nil, err
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	if l.stream != nil && l.streamCancel != nil {
+		l.streamCancel()
+	}
+
+	l.streamCancel = cancel
+	l.stream = stream
+
+	go l.sendKeepAliveLoop(stream)
+	return stream, nil
+}
+
+// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
+func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
+	karesp := &LeaseKeepAliveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	ka, ok := l.keepAlives[karesp.ID]
+	if !ok {
+		return
+	}
+
+	if karesp.TTL <= 0 {
+		// lease expired; close all keep alive channels
+		delete(l.keepAlives, karesp.ID)
+		ka.close()
+		return
+	}
+
+	// send update to all channels
+	nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
+	ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
+	for _, ch := range ka.chs {
+		select {
+		case ch <- karesp:
+		default:
+			if l.lg != nil {
+				l.lg.Warn("lease keepalive response queue is full; dropping response send",
+					zap.Int("queue-size", len(ch)),
+					zap.Int("queue-capacity", cap(ch)),
+				)
+			}
+		}
+		// still advance in order to rate-limit keep-alive sends
+		ka.nextKeepAlive = nextKeepAlive
+	}
+}
+
+// deadlineLoop reaps any keep alive channels that have not received a response
+// within the lease TTL
+func (l *lessor) deadlineLoop() {
+	for {
+		select {
+		case <-time.After(time.Second):
+		case <-l.donec:
+			return
+		}
+		now := time.Now()
+		l.mu.Lock()
+		for id, ka := range l.keepAlives {
+			if ka.deadline.Before(now) {
+				// waited too long for response; lease may be expired
+				ka.close()
+				delete(l.keepAlives, id)
+			}
+		}
+		l.mu.Unlock()
+	}
+}
+
+// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
+func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
+	for {
+		var tosend []LeaseID
+
+		now := time.Now()
+		l.mu.Lock()
+		for id, ka := range l.keepAlives {
+			if ka.nextKeepAlive.Before(now) {
+				tosend = append(tosend, id)
+			}
+		}
+		l.mu.Unlock()
+
+		for _, id := range tosend {
+			r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
+			if err := stream.Send(r); err != nil {
+				// TODO do something with this error?
+				return
+			}
+		}
+
+		select {
+		case <-time.After(retryConnWait):
+		case <-stream.Context().Done():
+			return
+		case <-l.donec:
+			return
+		case <-l.stopCtx.Done():
+			return
+		}
+	}
+}
+
+func (ka *keepAlive) close() {
+	close(ka.donec)
+	for _, ch := range ka.chs {
+		close(ch)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go
new file mode 100644
index 0000000..f5ae010
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/logger.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"io/ioutil"
+	"sync"
+
+	"go.etcd.io/etcd/pkg/logutil"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	lgMu sync.RWMutex
+	lg   logutil.Logger
+)
+
+type settableLogger struct {
+	l  grpclog.LoggerV2
+	mu sync.RWMutex
+}
+
+func init() {
+	// disable client side logs by default
+	lg = &settableLogger{}
+	SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
+}
+
+// SetLogger sets client-side Logger.
+func SetLogger(l grpclog.LoggerV2) {
+	lgMu.Lock()
+	lg = logutil.NewLogger(l)
+	// override grpclog so that any changes happen with locking
+	grpclog.SetLoggerV2(lg)
+	lgMu.Unlock()
+}
+
+// GetLogger returns the current logutil.Logger.
+func GetLogger() logutil.Logger {
+	lgMu.RLock()
+	l := lg
+	lgMu.RUnlock()
+	return l
+}
+
+// NewLogger returns a new Logger with logutil.Logger.
+func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
+	return &settableLogger{l: gl}
+}
+
+func (s *settableLogger) get() grpclog.LoggerV2 {
+	s.mu.RLock()
+	l := s.l
+	s.mu.RUnlock()
+	return l
+}
+
+// implement the grpclog.LoggerV2 interface
+
+func (s *settableLogger) Info(args ...interface{})                 { s.get().Info(args...) }
+func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
+func (s *settableLogger) Infoln(args ...interface{})               { s.get().Infoln(args...) }
+func (s *settableLogger) Warning(args ...interface{})              { s.get().Warning(args...) }
+func (s *settableLogger) Warningf(format string, args ...interface{}) {
+	s.get().Warningf(format, args...)
+}
+func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
+func (s *settableLogger) Error(args ...interface{})     { s.get().Error(args...) }
+func (s *settableLogger) Errorf(format string, args ...interface{}) {
+	s.get().Errorf(format, args...)
+}
+func (s *settableLogger) Errorln(args ...interface{})               { s.get().Errorln(args...) }
+func (s *settableLogger) Fatal(args ...interface{})                 { s.get().Fatal(args...) }
+func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
+func (s *settableLogger) Fatalln(args ...interface{})               { s.get().Fatalln(args...) }
+func (s *settableLogger) Print(args ...interface{})                 { s.get().Info(args...) }
+func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
+func (s *settableLogger) Println(args ...interface{})               { s.get().Infoln(args...) }
+func (s *settableLogger) V(l int) bool                              { return s.get().V(l) }
+func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
+	s.mu.RLock()
+	l := s.l
+	s.mu.RUnlock()
+	if l.V(lvl) {
+		return s
+	}
+	return logutil.NewDiscardLogger()
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
new file mode 100644
index 0000000..744455a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
@@ -0,0 +1,230 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"fmt"
+	"io"
+
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+type (
+	DefragmentResponse pb.DefragmentResponse
+	AlarmResponse      pb.AlarmResponse
+	AlarmMember        pb.AlarmMember
+	StatusResponse     pb.StatusResponse
+	HashKVResponse     pb.HashKVResponse
+	MoveLeaderResponse pb.MoveLeaderResponse
+)
+
+type Maintenance interface {
+	// AlarmList gets all active alarms.
+	AlarmList(ctx context.Context) (*AlarmResponse, error)
+
+	// AlarmDisarm disarms a given alarm.
+	AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
+
+	// Defragment releases wasted space from internal fragmentation on a given etcd member.
+	// Defragment is only needed when deleting a large number of keys and want to reclaim
+	// the resources.
+	// Defragment is an expensive operation. User should avoid defragmenting multiple members
+	// at the same time.
+	// To defragment multiple members in the cluster, user need to call defragment multiple
+	// times with different endpoints.
+	Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
+
+	// Status gets the status of the endpoint.
+	Status(ctx context.Context, endpoint string) (*StatusResponse, error)
+
+	// HashKV returns a hash of the KV state at the time of the RPC.
+	// If revision is zero, the hash is computed on all keys. If the revision
+	// is non-zero, the hash is computed on all keys at or below the given revision.
+	HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
+
+	// Snapshot provides a reader for a point-in-time snapshot of etcd.
+	// If the context "ctx" is canceled or timed out, reading from returned
+	// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+	Snapshot(ctx context.Context) (io.ReadCloser, error)
+
+	// MoveLeader requests current leader to transfer its leadership to the transferee.
+	// Request must be made to the leader.
+	MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
+}
+
+type maintenance struct {
+	dial     func(endpoint string) (pb.MaintenanceClient, func(), error)
+	remote   pb.MaintenanceClient
+	callOpts []grpc.CallOption
+}
+
+func NewMaintenance(c *Client) Maintenance {
+	api := &maintenance{
+		dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
+			conn, err := c.Dial(endpoint)
+			if err != nil {
+				return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
+			}
+			cancel := func() { conn.Close() }
+			return RetryMaintenanceClient(c, conn), cancel, nil
+		},
+		remote: RetryMaintenanceClient(c, c.conn),
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
+	api := &maintenance{
+		dial: func(string) (pb.MaintenanceClient, func(), error) {
+			return remote, func() {}, nil
+		},
+		remote: remote,
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
+	req := &pb.AlarmRequest{
+		Action:   pb.AlarmRequest_GET,
+		MemberID: 0,                 // all
+		Alarm:    pb.AlarmType_NONE, // all
+	}
+	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+	if err == nil {
+		return (*AlarmResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
+	req := &pb.AlarmRequest{
+		Action:   pb.AlarmRequest_DEACTIVATE,
+		MemberID: am.MemberID,
+		Alarm:    am.Alarm,
+	}
+
+	if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
+		ar, err := m.AlarmList(ctx)
+		if err != nil {
+			return nil, toErr(ctx, err)
+		}
+		ret := AlarmResponse{}
+		for _, am := range ar.Alarms {
+			dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
+			if derr != nil {
+				return nil, toErr(ctx, derr)
+			}
+			ret.Alarms = append(ret.Alarms, dresp.Alarms...)
+		}
+		return &ret, nil
+	}
+
+	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+	if err == nil {
+		return (*AlarmResponse)(resp), nil
+	}
+	return nil, toErr(ctx, err)
+}
+
+func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*DefragmentResponse)(resp), nil
+}
+
+func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*StatusResponse)(resp), nil
+}
+
+func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+
+		return nil, toErr(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+	return (*HashKVResponse)(resp), nil
+}
+
+func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+	ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+	if err != nil {
+		return nil, toErr(ctx, err)
+	}
+
+	pr, pw := io.Pipe()
+	go func() {
+		for {
+			resp, err := ss.Recv()
+			if err != nil {
+				pw.CloseWithError(err)
+				return
+			}
+			if resp == nil && err == nil {
+				break
+			}
+			if _, werr := pw.Write(resp.Blob); werr != nil {
+				pw.CloseWithError(werr)
+				return
+			}
+		}
+		pw.Close()
+	}()
+	return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
+}
+
+type snapshotReadCloser struct {
+	ctx context.Context
+	io.ReadCloser
+}
+
+func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
+	n, err = rc.ReadCloser.Read(p)
+	return n, toErr(rc.ctx, err)
+}
+
+func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
+	resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
+	return (*MoveLeaderResponse)(resp), toErr(ctx, err)
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go
new file mode 100644
index 0000000..81ae31f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/op.go
@@ -0,0 +1,560 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+type opType int
+
+const (
+	// A default Op has opType 0, which is invalid.
+	tRange opType = iota + 1
+	tPut
+	tDeleteRange
+	tTxn
+)
+
+var noPrefixEnd = []byte{0}
+
+// Op represents an Operation that kv can execute.
+type Op struct {
+	t   opType
+	key []byte
+	end []byte
+
+	// for range
+	limit        int64
+	sort         *SortOption
+	serializable bool
+	keysOnly     bool
+	countOnly    bool
+	minModRev    int64
+	maxModRev    int64
+	minCreateRev int64
+	maxCreateRev int64
+
+	// for range, watch
+	rev int64
+
+	// for watch, put, delete
+	prevKV bool
+
+	// for watch
+	// fragmentation should be disabled by default
+	// if true, split watch events when total exceeds
+	// "--max-request-bytes" flag value + 512-byte
+	fragment bool
+
+	// for put
+	ignoreValue bool
+	ignoreLease bool
+
+	// progressNotify is for progress updates.
+	progressNotify bool
+	// createdNotify is for created event
+	createdNotify bool
+	// filters for watchers
+	filterPut    bool
+	filterDelete bool
+
+	// for put
+	val     []byte
+	leaseID LeaseID
+
+	// txn
+	cmps    []Cmp
+	thenOps []Op
+	elseOps []Op
+}
+
+// accessors / mutators
+
+// IsTxn returns true if the "Op" type is transaction.
+func (op Op) IsTxn() bool {
+	return op.t == tTxn
+}
+
+// Txn returns the comparison(if) operations, "then" operations, and "else" operations.
+func (op Op) Txn() ([]Cmp, []Op, []Op) {
+	return op.cmps, op.thenOps, op.elseOps
+}
+
+// KeyBytes returns the byte slice holding the Op's key.
+func (op Op) KeyBytes() []byte { return op.key }
+
+// WithKeyBytes sets the byte slice for the Op's key.
+func (op *Op) WithKeyBytes(key []byte) { op.key = key }
+
+// RangeBytes returns the byte slice holding with the Op's range end, if any.
+func (op Op) RangeBytes() []byte { return op.end }
+
+// Rev returns the requested revision, if any.
+func (op Op) Rev() int64 { return op.rev }
+
+// IsPut returns true iff the operation is a Put.
+func (op Op) IsPut() bool { return op.t == tPut }
+
+// IsGet returns true iff the operation is a Get.
+func (op Op) IsGet() bool { return op.t == tRange }
+
+// IsDelete returns true iff the operation is a Delete.
+func (op Op) IsDelete() bool { return op.t == tDeleteRange }
+
+// IsSerializable returns true if the serializable field is true.
+func (op Op) IsSerializable() bool { return op.serializable }
+
+// IsKeysOnly returns whether keysOnly is set.
+func (op Op) IsKeysOnly() bool { return op.keysOnly }
+
+// IsCountOnly returns whether countOnly is set.
+func (op Op) IsCountOnly() bool { return op.countOnly }
+
+// MinModRev returns the operation's minimum modify revision.
+func (op Op) MinModRev() int64 { return op.minModRev }
+
+// MaxModRev returns the operation's maximum modify revision.
+func (op Op) MaxModRev() int64 { return op.maxModRev }
+
+// MinCreateRev returns the operation's minimum create revision.
+func (op Op) MinCreateRev() int64 { return op.minCreateRev }
+
+// MaxCreateRev returns the operation's maximum create revision.
+func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
+
+// WithRangeBytes sets the byte slice for the Op's range end.
+func (op *Op) WithRangeBytes(end []byte) { op.end = end }
+
+// ValueBytes returns the byte slice holding the Op's value, if any.
+func (op Op) ValueBytes() []byte { return op.val }
+
+// WithValueBytes sets the byte slice for the Op's value.
+func (op *Op) WithValueBytes(v []byte) { op.val = v }
+
+func (op Op) toRangeRequest() *pb.RangeRequest {
+	if op.t != tRange {
+		panic("op.t != tRange")
+	}
+	r := &pb.RangeRequest{
+		Key:               op.key,
+		RangeEnd:          op.end,
+		Limit:             op.limit,
+		Revision:          op.rev,
+		Serializable:      op.serializable,
+		KeysOnly:          op.keysOnly,
+		CountOnly:         op.countOnly,
+		MinModRevision:    op.minModRev,
+		MaxModRevision:    op.maxModRev,
+		MinCreateRevision: op.minCreateRev,
+		MaxCreateRevision: op.maxCreateRev,
+	}
+	if op.sort != nil {
+		r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
+		r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
+	}
+	return r
+}
+
+func (op Op) toTxnRequest() *pb.TxnRequest {
+	thenOps := make([]*pb.RequestOp, len(op.thenOps))
+	for i, tOp := range op.thenOps {
+		thenOps[i] = tOp.toRequestOp()
+	}
+	elseOps := make([]*pb.RequestOp, len(op.elseOps))
+	for i, eOp := range op.elseOps {
+		elseOps[i] = eOp.toRequestOp()
+	}
+	cmps := make([]*pb.Compare, len(op.cmps))
+	for i := range op.cmps {
+		cmps[i] = (*pb.Compare)(&op.cmps[i])
+	}
+	return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
+}
+
+func (op Op) toRequestOp() *pb.RequestOp {
+	switch op.t {
+	case tRange:
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
+	case tPut:
+		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
+	case tDeleteRange:
+		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
+	case tTxn:
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
+	default:
+		panic("Unknown Op")
+	}
+}
+
+func (op Op) isWrite() bool {
+	if op.t == tTxn {
+		for _, tOp := range op.thenOps {
+			if tOp.isWrite() {
+				return true
+			}
+		}
+		for _, tOp := range op.elseOps {
+			if tOp.isWrite() {
+				return true
+			}
+		}
+		return false
+	}
+	return op.t != tRange
+}
+
+// OpGet returns "get" operation based on given key and operation options.
+func OpGet(key string, opts ...OpOption) Op {
+	// WithPrefix and WithFromKey are not supported together
+	if isWithPrefix(opts) && isWithFromKey(opts) {
+		panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
+	}
+	ret := Op{t: tRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	return ret
+}
+
+// OpDelete returns "delete" operation based on given key and operation options.
+func OpDelete(key string, opts ...OpOption) Op {
+	// WithPrefix and WithFromKey are not supported together
+	if isWithPrefix(opts) && isWithFromKey(opts) {
+		panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
+	}
+	ret := Op{t: tDeleteRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.leaseID != 0:
+		panic("unexpected lease in delete")
+	case ret.limit != 0:
+		panic("unexpected limit in delete")
+	case ret.rev != 0:
+		panic("unexpected revision in delete")
+	case ret.sort != nil:
+		panic("unexpected sort in delete")
+	case ret.serializable:
+		panic("unexpected serializable in delete")
+	case ret.countOnly:
+		panic("unexpected countOnly in delete")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in delete")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in delete")
+	case ret.filterDelete, ret.filterPut:
+		panic("unexpected filter in delete")
+	case ret.createdNotify:
+		panic("unexpected createdNotify in delete")
+	}
+	return ret
+}
+
+// OpPut returns "put" operation based on given key-value and operation options.
+func OpPut(key, val string, opts ...OpOption) Op {
+	ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.end != nil:
+		panic("unexpected range in put")
+	case ret.limit != 0:
+		panic("unexpected limit in put")
+	case ret.rev != 0:
+		panic("unexpected revision in put")
+	case ret.sort != nil:
+		panic("unexpected sort in put")
+	case ret.serializable:
+		panic("unexpected serializable in put")
+	case ret.countOnly:
+		panic("unexpected countOnly in put")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in put")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in put")
+	case ret.filterDelete, ret.filterPut:
+		panic("unexpected filter in put")
+	case ret.createdNotify:
+		panic("unexpected createdNotify in put")
+	}
+	return ret
+}
+
+// OpTxn returns "txn" operation based on given transaction conditions.
+func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
+	return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
+}
+
+func opWatch(key string, opts ...OpOption) Op {
+	ret := Op{t: tRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.leaseID != 0:
+		panic("unexpected lease in watch")
+	case ret.limit != 0:
+		panic("unexpected limit in watch")
+	case ret.sort != nil:
+		panic("unexpected sort in watch")
+	case ret.serializable:
+		panic("unexpected serializable in watch")
+	case ret.countOnly:
+		panic("unexpected countOnly in watch")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in watch")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in watch")
+	}
+	return ret
+}
+
+func (op *Op) applyOpts(opts []OpOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// OpOption configures Operations like Get, Put, Delete.
+type OpOption func(*Op)
+
+// WithLease attaches a lease ID to a key in 'Put' request.
+func WithLease(leaseID LeaseID) OpOption {
+	return func(op *Op) { op.leaseID = leaseID }
+}
+
+// WithLimit limits the number of results to return from 'Get' request.
+// If WithLimit is given a 0 limit, it is treated as no limit.
+func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
+
+// WithRev specifies the store revision for 'Get' request.
+// Or the start revision of 'Watch' request.
+func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
+
+// WithSort specifies the ordering in 'Get' request. It requires
+// 'WithRange' and/or 'WithPrefix' to be specified too.
+// 'target' specifies the target to sort by: key, version, revisions, value.
+// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
+func WithSort(target SortTarget, order SortOrder) OpOption {
+	return func(op *Op) {
+		if target == SortByKey && order == SortAscend {
+			// If order != SortNone, server fetches the entire key-space,
+			// and then applies the sort and limit, if provided.
+			// Since by default the server returns results sorted by keys
+			// in lexicographically ascending order, the client should ignore
+			// SortOrder if the target is SortByKey.
+			order = SortNone
+		}
+		op.sort = &SortOption{target, order}
+	}
+}
+
+// GetPrefixRangeEnd gets the range end of the prefix.
+// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
+func GetPrefixRangeEnd(prefix string) string {
+	return string(getPrefix([]byte(prefix)))
+}
+
+func getPrefix(key []byte) []byte {
+	end := make([]byte, len(key))
+	copy(end, key)
+	for i := len(end) - 1; i >= 0; i-- {
+		if end[i] < 0xff {
+			end[i] = end[i] + 1
+			end = end[:i+1]
+			return end
+		}
+	}
+	// next prefix does not exist (e.g., 0xffff);
+	// default to WithFromKey policy
+	return noPrefixEnd
+}
+
+// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
+// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
+// can return 'foo1', 'foo2', and so on.
+func WithPrefix() OpOption {
+	return func(op *Op) {
+		if len(op.key) == 0 {
+			op.key, op.end = []byte{0}, []byte{0}
+			return
+		}
+		op.end = getPrefix(op.key)
+	}
+}
+
+// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
+// For example, 'Get' requests with 'WithRange(end)' returns
+// the keys in the range [key, end).
+// endKey must be lexicographically greater than start key.
+func WithRange(endKey string) OpOption {
+	return func(op *Op) { op.end = []byte(endKey) }
+}
+
+// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
+// to be equal or greater than the key in the argument.
+func WithFromKey() OpOption {
+	return func(op *Op) {
+		if len(op.key) == 0 {
+			op.key = []byte{0}
+		}
+		op.end = []byte("\x00")
+	}
+}
+
+// WithSerializable makes 'Get' request serializable. By default,
+// it's linearizable. Serializable requests are better for lower latency
+// requirement.
+func WithSerializable() OpOption {
+	return func(op *Op) { op.serializable = true }
+}
+
+// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
+// values will be omitted.
+func WithKeysOnly() OpOption {
+	return func(op *Op) { op.keysOnly = true }
+}
+
+// WithCountOnly makes the 'Get' request return only the count of keys.
+func WithCountOnly() OpOption {
+	return func(op *Op) { op.countOnly = true }
+}
+
+// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
+func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
+
+// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
+func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
+
+// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
+func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
+
+// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
+func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
+
+// WithFirstCreate gets the key with the oldest creation revision in the request range.
+func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
+
+// WithLastCreate gets the key with the latest creation revision in the request range.
+func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
+
+// WithFirstKey gets the lexically first key in the request range.
+func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
+
+// WithLastKey gets the lexically last key in the request range.
+func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
+
+// WithFirstRev gets the key with the oldest modification revision in the request range.
+func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
+
+// WithLastRev gets the key with the latest modification revision in the request range.
+func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
+
+// withTop gets the first key over the get's prefix given a sort order
+func withTop(target SortTarget, order SortOrder) []OpOption {
+	return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
+}
+
+// WithProgressNotify makes watch server send periodic progress updates
+// every 10 minutes when there is no incoming events.
+// Progress updates have zero events in WatchResponse.
+func WithProgressNotify() OpOption {
+	return func(op *Op) {
+		op.progressNotify = true
+	}
+}
+
+// WithCreatedNotify makes watch server sends the created event.
+func WithCreatedNotify() OpOption {
+	return func(op *Op) {
+		op.createdNotify = true
+	}
+}
+
+// WithFilterPut discards PUT events from the watcher.
+func WithFilterPut() OpOption {
+	return func(op *Op) { op.filterPut = true }
+}
+
+// WithFilterDelete discards DELETE events from the watcher.
+func WithFilterDelete() OpOption {
+	return func(op *Op) { op.filterDelete = true }
+}
+
+// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
+// nothing will be returned.
+func WithPrevKV() OpOption {
+	return func(op *Op) {
+		op.prevKV = true
+	}
+}
+
+// WithFragment to receive raw watch response with fragmentation.
+// Fragmentation is disabled by default. If fragmentation is enabled,
+// etcd watch server will split watch response before sending to clients
+// when the total size of watch events exceed server-side request limit.
+// The default server-side request limit is 1.5 MiB, which can be configured
+// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
+// See "etcdserver/api/v3rpc/watch.go" for more details.
+func WithFragment() OpOption {
+	return func(op *Op) { op.fragment = true }
+}
+
+// WithIgnoreValue updates the key using its current value.
+// This option can not be combined with non-empty values.
+// Returns an error if the key does not exist.
+func WithIgnoreValue() OpOption {
+	return func(op *Op) {
+		op.ignoreValue = true
+	}
+}
+
+// WithIgnoreLease updates the key using its current lease.
+// This option can not be combined with WithLease.
+// Returns an error if the key does not exist.
+func WithIgnoreLease() OpOption {
+	return func(op *Op) {
+		op.ignoreLease = true
+	}
+}
+
+// LeaseOp represents an Operation that lease can execute.
+type LeaseOp struct {
+	id LeaseID
+
+	// for TimeToLive
+	attachedKeys bool
+}
+
+// LeaseOption configures lease operations.
+type LeaseOption func(*LeaseOp)
+
+func (op *LeaseOp) applyOpts(opts []LeaseOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
+func WithAttachedKeys() LeaseOption {
+	return func(op *LeaseOp) { op.attachedKeys = true }
+}
+
+func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
+	ret := &LeaseOp{id: id}
+	ret.applyOpts(opts)
+	return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
+}
+
+// isWithPrefix returns true if WithPrefix is being called in the op
+func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) }
+
+// isWithFromKey returns true if WithFromKey is being called in the op
+func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) }
diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go
new file mode 100644
index 0000000..700714c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/options.go
@@ -0,0 +1,65 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"math"
+	"time"
+
+	"google.golang.org/grpc"
+)
+
+var (
+	// client-side handling retrying of request failures where data was not written to the wire or
+	// where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
+	// but for etcd we default to "FailFast(false)" to minimize client request error responses due to
+	// transient failures.
+	defaultFailFast = grpc.FailFast(false)
+
+	// client-side request send limit, gRPC default is math.MaxInt32
+	// Make sure that "client-side send limit < server-side default send/recv limit"
+	// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
+	defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
+
+	// client-side response receive limit, gRPC default is 4MB
+	// Make sure that "client-side receive limit >= server-side default send/recv limit"
+	// because range response can easily exceed request send limits
+	// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
+	defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
+
+	// client-side non-streaming retry limit, only applied to requests where server responds with
+	// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+	// If set to 0, retry is disabled.
+	defaultUnaryMaxRetries uint = 100
+
+	// client-side streaming retry limit, only applied to requests where server responds with
+	// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+	// If set to 0, retry is disabled.
+	defaultStreamMaxRetries = ^uint(0) // max uint
+
+	// client-side retry backoff wait between requests.
+	defaultBackoffWaitBetween = 25 * time.Millisecond
+
+	// client-side retry backoff default jitter fraction.
+	defaultBackoffJitterFraction = 0.10
+)
+
+// defaultCallOpts defines a list of default "gRPC.CallOption".
+// Some options are exposed to "clientv3.Config".
+// Defaults will be overridden by the settings in "clientv3.Config".
+var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go
new file mode 100644
index 0000000..7e855de
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/retry.go
@@ -0,0 +1,298 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type retryPolicy uint8
+
+const (
+	repeatable retryPolicy = iota
+	nonRepeatable
+)
+
+func (rp retryPolicy) String() string {
+	switch rp {
+	case repeatable:
+		return "repeatable"
+	case nonRepeatable:
+		return "nonRepeatable"
+	default:
+		return "UNKNOWN"
+	}
+}
+
+// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
+//
+// immutable requests (e.g. Get) should be retried unless it's
+// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
+//
+// Returning "false" means retry should stop, since client cannot
+// handle itself even with retries.
+func isSafeRetryImmutableRPC(err error) bool {
+	eErr := rpctypes.Error(err)
+	if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
+		// interrupted by non-transient server-side or gRPC-side error
+		// client cannot handle itself (e.g. rpctypes.ErrCompacted)
+		return false
+	}
+	// only retry if unavailable
+	ev, ok := status.FromError(err)
+	if !ok {
+		// all errors from RPC is typed "grpc/status.(*statusError)"
+		// (ref. https://github.com/grpc/grpc-go/pull/1782)
+		//
+		// if the error type is not "grpc/status.(*statusError)",
+		// it could be from "Dial"
+		// TODO: do not retry for now
+		// ref. https://github.com/grpc/grpc-go/issues/1581
+		return false
+	}
+	return ev.Code() == codes.Unavailable
+}
+
+// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
+//
+// mutable requests (e.g. Put, Delete, Txn) should only be retried
+// when the status code is codes.Unavailable when initial connection
+// has not been established (no endpoint is up).
+//
+// Returning "false" means retry should stop, otherwise it violates
+// write-at-most-once semantics.
+func isSafeRetryMutableRPC(err error) bool {
+	if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
+		// not safe for mutable RPCs
+		// e.g. interrupted by non-transient error that client cannot handle itself,
+		// or transient error while the connection has already been established
+		return false
+	}
+	desc := rpctypes.ErrorDesc(err)
+	return desc == "there is no address available" || desc == "there is no connection available"
+}
+
+type retryKVClient struct {
+	kc pb.KVClient
+}
+
+// RetryKVClient implements a KVClient.
+func RetryKVClient(c *Client) pb.KVClient {
+	return &retryKVClient{
+		kc: pb.NewKVClient(c.conn),
+	}
+}
+func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
+	return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
+	return rkv.kc.Put(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
+	return rkv.kc.DeleteRange(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
+	return rkv.kc.Txn(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
+	return rkv.kc.Compact(ctx, in, opts...)
+}
+
+type retryLeaseClient struct {
+	lc pb.LeaseClient
+}
+
+// RetryLeaseClient implements a LeaseClient.
+func RetryLeaseClient(c *Client) pb.LeaseClient {
+	return &retryLeaseClient{
+		lc: pb.NewLeaseClient(c.conn),
+	}
+}
+
+func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
+	return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
+	return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
+	return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
+	return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
+	return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
+}
+
+type retryClusterClient struct {
+	cc pb.ClusterClient
+}
+
+// RetryClusterClient implements a ClusterClient.
+func RetryClusterClient(c *Client) pb.ClusterClient {
+	return &retryClusterClient{
+		cc: pb.NewClusterClient(c.conn),
+	}
+}
+
+func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
+	return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
+	return rcc.cc.MemberAdd(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
+	return rcc.cc.MemberRemove(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
+	return rcc.cc.MemberUpdate(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) {
+	return rcc.cc.MemberPromote(ctx, in, opts...)
+}
+
+type retryMaintenanceClient struct {
+	mc pb.MaintenanceClient
+}
+
+// RetryMaintenanceClient implements a Maintenance.
+func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
+	return &retryMaintenanceClient{
+		mc: pb.NewMaintenanceClient(conn),
+	}
+}
+
+func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
+	return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
+	return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
+	return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
+	return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
+	return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
+	return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
+	return rmc.mc.Defragment(ctx, in, opts...)
+}
+
+type retryAuthClient struct {
+	ac pb.AuthClient
+}
+
+// RetryAuthClient implements a AuthClient.
+func RetryAuthClient(c *Client) pb.AuthClient {
+	return &retryAuthClient{
+		ac: pb.NewAuthClient(c.conn),
+	}
+}
+
+func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
+	return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
+	return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
+	return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
+	return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
+}
+
+func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
+	return rac.ac.AuthEnable(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
+	return rac.ac.AuthDisable(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
+	return rac.ac.UserAdd(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
+	return rac.ac.UserDelete(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
+	return rac.ac.UserChangePassword(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
+	return rac.ac.UserGrantRole(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
+	return rac.ac.UserRevokeRole(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
+	return rac.ac.RoleAdd(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
+	return rac.ac.RoleDelete(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
+	return rac.ac.RoleGrantPermission(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
+	return rac.ac.RoleRevokePermission(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
+	return rac.ac.Authenticate(ctx, in, opts...)
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
new file mode 100644
index 0000000..080490a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
@@ -0,0 +1,389 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
+// fine grained error checking required by write-at-most-once retry semantics of etcd.
+
+package clientv3
+
+import (
+	"context"
+	"io"
+	"sync"
+	"time"
+
+	"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// unaryClientInterceptor returns a new retrying unary client interceptor.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
+	intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+		grpcOpts, retryOpts := filterCallOptions(opts)
+		callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+		// short circuit for simplicity, and avoiding allocations.
+		if callOpts.max == 0 {
+			return invoker(ctx, method, req, reply, cc, grpcOpts...)
+		}
+		var lastErr error
+		for attempt := uint(0); attempt < callOpts.max; attempt++ {
+			if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
+				return err
+			}
+			logger.Debug(
+				"retrying of unary invoker",
+				zap.String("target", cc.Target()),
+				zap.Uint("attempt", attempt),
+			)
+			lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
+			if lastErr == nil {
+				return nil
+			}
+			logger.Warn(
+				"retrying of unary invoker failed",
+				zap.String("target", cc.Target()),
+				zap.Uint("attempt", attempt),
+				zap.Error(lastErr),
+			)
+			if isContextError(lastErr) {
+				if ctx.Err() != nil {
+					// its the context deadline or cancellation.
+					return lastErr
+				}
+				// its the callCtx deadline or cancellation, in which case try again.
+				continue
+			}
+			if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
+				gterr := c.getToken(ctx)
+				if gterr != nil {
+					logger.Warn(
+						"retrying of unary invoker failed to fetch new auth token",
+						zap.String("target", cc.Target()),
+						zap.Error(gterr),
+					)
+					return gterr // lastErr must be invalid auth token
+				}
+				continue
+			}
+			if !isSafeRetry(c.lg, lastErr, callOpts) {
+				return lastErr
+			}
+		}
+		return lastErr
+	}
+}
+
+// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+//
+// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
+// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
+// BidiStreams), the retry interceptor will fail the call.
+func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
+	intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+		grpcOpts, retryOpts := filterCallOptions(opts)
+		callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+		// short circuit for simplicity, and avoiding allocations.
+		if callOpts.max == 0 {
+			return streamer(ctx, desc, cc, method, grpcOpts...)
+		}
+		if desc.ClientStreams {
+			return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
+		}
+		newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
+		logger.Warn("retry stream intercept", zap.Error(err))
+		if err != nil {
+			// TODO(mwitkow): Maybe dial and transport errors should be retriable?
+			return nil, err
+		}
+		retryingStreamer := &serverStreamingRetryingStream{
+			client:       c,
+			ClientStream: newStreamer,
+			callOpts:     callOpts,
+			ctx:          ctx,
+			streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
+				return streamer(ctx, desc, cc, method, grpcOpts...)
+			},
+		}
+		return retryingStreamer, nil
+	}
+}
+
+// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
+// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
+// a new ClientStream according to the retry policy.
+type serverStreamingRetryingStream struct {
+	grpc.ClientStream
+	client        *Client
+	bufferedSends []interface{} // single message that the client can sen
+	receivedGood  bool          // indicates whether any prior receives were successful
+	wasClosedSend bool          // indicates that CloseSend was closed
+	ctx           context.Context
+	callOpts      *options
+	streamerCall  func(ctx context.Context) (grpc.ClientStream, error)
+	mu            sync.RWMutex
+}
+
+func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
+	s.mu.Lock()
+	s.ClientStream = clientStream
+	s.mu.Unlock()
+}
+
+func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	return s.ClientStream
+}
+
+func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
+	s.mu.Lock()
+	s.bufferedSends = append(s.bufferedSends, m)
+	s.mu.Unlock()
+	return s.getStream().SendMsg(m)
+}
+
+func (s *serverStreamingRetryingStream) CloseSend() error {
+	s.mu.Lock()
+	s.wasClosedSend = true
+	s.mu.Unlock()
+	return s.getStream().CloseSend()
+}
+
+func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
+	return s.getStream().Header()
+}
+
+func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
+	return s.getStream().Trailer()
+}
+
+func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
+	attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
+	if !attemptRetry {
+		return lastErr // success or hard failure
+	}
+	// We start off from attempt 1, because zeroth was already made on normal SendMsg().
+	for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
+		if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
+			return err
+		}
+		newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
+		if err != nil {
+			// TODO(mwitkow): Maybe dial and transport errors should be retriable?
+			return err
+		}
+		s.setStream(newStream)
+		attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
+		//fmt.Printf("Received message and indicate: %v  %v\n", attemptRetry, lastErr)
+		if !attemptRetry {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
+	s.mu.RLock()
+	wasGood := s.receivedGood
+	s.mu.RUnlock()
+	err := s.getStream().RecvMsg(m)
+	if err == nil || err == io.EOF {
+		s.mu.Lock()
+		s.receivedGood = true
+		s.mu.Unlock()
+		return false, err
+	} else if wasGood {
+		// previous RecvMsg in the stream succeeded, no retry logic should interfere
+		return false, err
+	}
+	if isContextError(err) {
+		if s.ctx.Err() != nil {
+			return false, err
+		}
+		// its the callCtx deadline or cancellation, in which case try again.
+		return true, err
+	}
+	if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
+		gterr := s.client.getToken(s.ctx)
+		if gterr != nil {
+			s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
+			return false, err // return the original error for simplicity
+		}
+		return true, err
+
+	}
+	return isSafeRetry(s.client.lg, err, s.callOpts), err
+}
+
+func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
+	s.mu.RLock()
+	bufferedSends := s.bufferedSends
+	s.mu.RUnlock()
+	newStream, err := s.streamerCall(callCtx)
+	if err != nil {
+		return nil, err
+	}
+	for _, msg := range bufferedSends {
+		if err := newStream.SendMsg(msg); err != nil {
+			return nil, err
+		}
+	}
+	if err := newStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return newStream, nil
+}
+
+func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
+	waitTime := time.Duration(0)
+	if attempt > 0 {
+		waitTime = callOpts.backoffFunc(attempt)
+	}
+	if waitTime > 0 {
+		timer := time.NewTimer(waitTime)
+		select {
+		case <-ctx.Done():
+			timer.Stop()
+			return contextErrToGrpcErr(ctx.Err())
+		case <-timer.C:
+		}
+	}
+	return nil
+}
+
+// isSafeRetry returns "true", if request is safe for retry with the given error.
+func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
+	if isContextError(err) {
+		return false
+	}
+	switch callOpts.retryPolicy {
+	case repeatable:
+		return isSafeRetryImmutableRPC(err)
+	case nonRepeatable:
+		return isSafeRetryMutableRPC(err)
+	default:
+		lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
+		return false
+	}
+}
+
+func isContextError(err error) bool {
+	return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
+}
+
+func contextErrToGrpcErr(err error) error {
+	switch err {
+	case context.DeadlineExceeded:
+		return status.Errorf(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return status.Errorf(codes.Canceled, err.Error())
+	default:
+		return status.Errorf(codes.Unknown, err.Error())
+	}
+}
+
+var (
+	defaultOptions = &options{
+		retryPolicy: nonRepeatable,
+		max:         0, // disable
+		backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
+		retryAuth:   true,
+	}
+)
+
+// backoffFunc denotes a family of functions that control the backoff duration between call retries.
+//
+// They are called with an identifier of the attempt, and should return a time the system client should
+// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
+// the deadline of the request takes precedence and the wait will be interrupted before proceeding
+// with the next iteration.
+type backoffFunc func(attempt uint) time.Duration
+
+// withRetryPolicy sets the retry policy of this call.
+func withRetryPolicy(rp retryPolicy) retryOption {
+	return retryOption{applyFunc: func(o *options) {
+		o.retryPolicy = rp
+	}}
+}
+
+// withMax sets the maximum number of retries on this call, or this interceptor.
+func withMax(maxRetries uint) retryOption {
+	return retryOption{applyFunc: func(o *options) {
+		o.max = maxRetries
+	}}
+}
+
+// WithBackoff sets the `BackoffFunc `used to control time between retries.
+func withBackoff(bf backoffFunc) retryOption {
+	return retryOption{applyFunc: func(o *options) {
+		o.backoffFunc = bf
+	}}
+}
+
+type options struct {
+	retryPolicy retryPolicy
+	max         uint
+	backoffFunc backoffFunc
+	retryAuth   bool
+}
+
+// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
+type retryOption struct {
+	grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
+	applyFunc            func(opt *options)
+}
+
+func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
+	if len(retryOptions) == 0 {
+		return opt
+	}
+	optCopy := &options{}
+	*optCopy = *opt
+	for _, f := range retryOptions {
+		f.applyFunc(optCopy)
+	}
+	return optCopy
+}
+
+func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
+	for _, opt := range callOptions {
+		if co, ok := opt.(retryOption); ok {
+			retryOptions = append(retryOptions, co)
+		} else {
+			grpcOptions = append(grpcOptions, opt)
+		}
+	}
+	return grpcOptions, retryOptions
+}
+
+// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
+//
+// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
+func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+	return func(attempt uint) time.Duration {
+		return jitterUp(waitBetween, jitterFraction)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/sort.go b/vendor/go.etcd.io/etcd/clientv3/sort.go
new file mode 100644
index 0000000..2bb9d9a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/sort.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+type SortTarget int
+type SortOrder int
+
+const (
+	SortNone SortOrder = iota
+	SortAscend
+	SortDescend
+)
+
+const (
+	SortByKey SortTarget = iota
+	SortByVersion
+	SortByCreateRevision
+	SortByModRevision
+	SortByValue
+)
+
+type SortOption struct {
+	Target SortTarget
+	Order  SortOrder
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go
new file mode 100644
index 0000000..c19715d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/txn.go
@@ -0,0 +1,151 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"sync"
+
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+	"google.golang.org/grpc"
+)
+
+// Txn is the interface that wraps mini-transactions.
+//
+//	 Txn(context.TODO()).If(
+//	  Compare(Value(k1), ">", v1),
+//	  Compare(Version(k1), "=", 2)
+//	 ).Then(
+//	  OpPut(k2,v2), OpPut(k3,v3)
+//	 ).Else(
+//	  OpPut(k4,v4), OpPut(k5,v5)
+//	 ).Commit()
+//
+type Txn interface {
+	// If takes a list of comparison. If all comparisons passed in succeed,
+	// the operations passed into Then() will be executed. Or the operations
+	// passed into Else() will be executed.
+	If(cs ...Cmp) Txn
+
+	// Then takes a list of operations. The Ops list will be executed, if the
+	// comparisons passed in If() succeed.
+	Then(ops ...Op) Txn
+
+	// Else takes a list of operations. The Ops list will be executed, if the
+	// comparisons passed in If() fail.
+	Else(ops ...Op) Txn
+
+	// Commit tries to commit the transaction.
+	Commit() (*TxnResponse, error)
+}
+
+type txn struct {
+	kv  *kv
+	ctx context.Context
+
+	mu    sync.Mutex
+	cif   bool
+	cthen bool
+	celse bool
+
+	isWrite bool
+
+	cmps []*pb.Compare
+
+	sus []*pb.RequestOp
+	fas []*pb.RequestOp
+
+	callOpts []grpc.CallOption
+}
+
+func (txn *txn) If(cs ...Cmp) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.cif {
+		panic("cannot call If twice!")
+	}
+
+	if txn.cthen {
+		panic("cannot call If after Then!")
+	}
+
+	if txn.celse {
+		panic("cannot call If after Else!")
+	}
+
+	txn.cif = true
+
+	for i := range cs {
+		txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
+	}
+
+	return txn
+}
+
+func (txn *txn) Then(ops ...Op) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.cthen {
+		panic("cannot call Then twice!")
+	}
+	if txn.celse {
+		panic("cannot call Then after Else!")
+	}
+
+	txn.cthen = true
+
+	for _, op := range ops {
+		txn.isWrite = txn.isWrite || op.isWrite()
+		txn.sus = append(txn.sus, op.toRequestOp())
+	}
+
+	return txn
+}
+
+func (txn *txn) Else(ops ...Op) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.celse {
+		panic("cannot call Else twice!")
+	}
+
+	txn.celse = true
+
+	for _, op := range ops {
+		txn.isWrite = txn.isWrite || op.isWrite()
+		txn.fas = append(txn.fas, op.toRequestOp())
+	}
+
+	return txn
+}
+
+func (txn *txn) Commit() (*TxnResponse, error) {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
+
+	var resp *pb.TxnResponse
+	var err error
+	resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
+	if err != nil {
+		return nil, toErr(txn.ctx, err)
+	}
+	return (*TxnResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go
new file mode 100644
index 0000000..b998c41
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/utils.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"math/rand"
+	"reflect"
+	"runtime"
+	"strings"
+	"time"
+)
+
+// jitterUp adds random jitter to the duration.
+//
+// This adds or subtracts time from the duration within a given jitter fraction.
+// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
+//
+// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
+func jitterUp(duration time.Duration, jitter float64) time.Duration {
+	multiplier := jitter * (rand.Float64()*2 - 1)
+	return time.Duration(float64(duration) * (1 + multiplier))
+}
+
+// Check if the provided function is being called in the op options.
+func isOpFuncCalled(op string, opts []OpOption) bool {
+	for _, opt := range opts {
+		v := reflect.ValueOf(opt)
+		if v.Kind() == reflect.Func {
+			if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
+				if strings.Contains(opFunc.Name(), op) {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go
new file mode 100644
index 0000000..87d222d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/watch.go
@@ -0,0 +1,987 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+	mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	EventTypeDelete = mvccpb.DELETE
+	EventTypePut    = mvccpb.PUT
+
+	closeSendErrTimeout = 250 * time.Millisecond
+)
+
+type Event mvccpb.Event
+
+type WatchChan <-chan WatchResponse
+
+type Watcher interface {
+	// Watch watches on a key or prefix. The watched events will be returned
+	// through the returned channel. If revisions waiting to be sent over the
+	// watch are compacted, then the watch will be canceled by the server, the
+	// client will post a compacted error watch response, and the channel will close.
+	// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
+	// and "WatchResponse" from this closed channel has zero events and nil "Err()".
+	// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
+	// to release the associated resources.
+	//
+	// If the context is "context.Background/TODO", returned "WatchChan" will
+	// not be closed and block until event is triggered, except when server
+	// returns a non-recoverable error (e.g. ErrCompacted).
+	// For example, when context passed with "WithRequireLeader" and the
+	// connected server has no leader (e.g. due to network partition),
+	// error "etcdserver: no leader" (ErrNoLeader) will be returned,
+	// and then "WatchChan" is closed with non-nil "Err()".
+	// In order to prevent a watch stream being stuck in a partitioned node,
+	// make sure to wrap context with "WithRequireLeader".
+	//
+	// Otherwise, as long as the context has not been canceled or timed out,
+	// watch will retry on other recoverable errors forever until reconnected.
+	//
+	// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
+	// Currently, client contexts are overwritten with "valCtx" that never closes.
+	// TODO(v3.4): configure watch retry policy, limit maximum retry number
+	// (see https://github.com/etcd-io/etcd/issues/8980)
+	Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
+
+	// RequestProgress requests a progress notify response be sent in all watch channels.
+	RequestProgress(ctx context.Context) error
+
+	// Close closes the watcher and cancels all watch requests.
+	Close() error
+}
+
+type WatchResponse struct {
+	Header pb.ResponseHeader
+	Events []*Event
+
+	// CompactRevision is the minimum revision the watcher may receive.
+	CompactRevision int64
+
+	// Canceled is used to indicate watch failure.
+	// If the watch failed and the stream was about to close, before the channel is closed,
+	// the channel sends a final response that has Canceled set to true with a non-nil Err().
+	Canceled bool
+
+	// Created is used to indicate the creation of the watcher.
+	Created bool
+
+	closeErr error
+
+	// cancelReason is a reason of canceling watch
+	cancelReason string
+}
+
+// IsCreate returns true if the event tells that the key is newly created.
+func (e *Event) IsCreate() bool {
+	return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+// IsModify returns true if the event tells that a new value is put on existing key.
+func (e *Event) IsModify() bool {
+	return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
+}
+
+// Err is the error value if this WatchResponse holds an error.
+func (wr *WatchResponse) Err() error {
+	switch {
+	case wr.closeErr != nil:
+		return v3rpc.Error(wr.closeErr)
+	case wr.CompactRevision != 0:
+		return v3rpc.ErrCompacted
+	case wr.Canceled:
+		if len(wr.cancelReason) != 0 {
+			return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
+		}
+		return v3rpc.ErrFutureRev
+	}
+	return nil
+}
+
+// IsProgressNotify returns true if the WatchResponse is progress notification.
+func (wr *WatchResponse) IsProgressNotify() bool {
+	return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
+}
+
+// watcher implements the Watcher interface
+type watcher struct {
+	remote   pb.WatchClient
+	callOpts []grpc.CallOption
+
+	// mu protects the grpc streams map
+	mu sync.RWMutex
+
+	// streams holds all the active grpc streams keyed by ctx value.
+	streams map[string]*watchGrpcStream
+}
+
+// watchGrpcStream tracks all watch resources attached to a single grpc stream.
+type watchGrpcStream struct {
+	owner    *watcher
+	remote   pb.WatchClient
+	callOpts []grpc.CallOption
+
+	// ctx controls internal remote.Watch requests
+	ctx context.Context
+	// ctxKey is the key used when looking up this stream's context
+	ctxKey string
+	cancel context.CancelFunc
+
+	// substreams holds all active watchers on this grpc stream
+	substreams map[int64]*watcherStream
+	// resuming holds all resuming watchers on this grpc stream
+	resuming []*watcherStream
+
+	// reqc sends a watch request from Watch() to the main goroutine
+	reqc chan watchStreamRequest
+	// respc receives data from the watch client
+	respc chan *pb.WatchResponse
+	// donec closes to broadcast shutdown
+	donec chan struct{}
+	// errc transmits errors from grpc Recv to the watch stream reconnect logic
+	errc chan error
+	// closingc gets the watcherStream of closing watchers
+	closingc chan *watcherStream
+	// wg is Done when all substream goroutines have exited
+	wg sync.WaitGroup
+
+	// resumec closes to signal that all substreams should begin resuming
+	resumec chan struct{}
+	// closeErr is the error that closed the watch stream
+	closeErr error
+}
+
+// watchStreamRequest is a union of the supported watch request operation types
+type watchStreamRequest interface {
+	toPB() *pb.WatchRequest
+}
+
+// watchRequest is issued by the subscriber to start a new watcher
+type watchRequest struct {
+	ctx context.Context
+	key string
+	end string
+	rev int64
+
+	// send created notification event if this field is true
+	createdNotify bool
+	// progressNotify is for progress updates
+	progressNotify bool
+	// fragmentation should be disabled by default
+	// if true, split watch events when total exceeds
+	// "--max-request-bytes" flag value + 512-byte
+	fragment bool
+
+	// filters is the list of events to filter out
+	filters []pb.WatchCreateRequest_FilterType
+	// get the previous key-value pair before the event happens
+	prevKV bool
+	// retc receives a chan WatchResponse once the watcher is established
+	retc chan chan WatchResponse
+}
+
+// progressRequest is issued by the subscriber to request watch progress
+type progressRequest struct {
+}
+
+// watcherStream represents a registered watcher
+type watcherStream struct {
+	// initReq is the request that initiated this request
+	initReq watchRequest
+
+	// outc publishes watch responses to subscriber
+	outc chan WatchResponse
+	// recvc buffers watch responses before publishing
+	recvc chan *WatchResponse
+	// donec closes when the watcherStream goroutine stops.
+	donec chan struct{}
+	// closing is set to true when stream should be scheduled to shutdown.
+	closing bool
+	// id is the registered watch id on the grpc stream
+	id int64
+
+	// buf holds all events received from etcd but not yet consumed by the client
+	buf []*WatchResponse
+}
+
+func NewWatcher(c *Client) Watcher {
+	return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
+}
+
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
+	w := &watcher{
+		remote:  wc,
+		streams: make(map[string]*watchGrpcStream),
+	}
+	if c != nil {
+		w.callOpts = c.callOpts
+	}
+	return w
+}
+
+// never closes
+var valCtxCh = make(chan struct{})
+var zeroTime = time.Unix(0, 0)
+
+// ctx with only the values; never Done
+type valCtx struct{ context.Context }
+
+func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
+func (vc *valCtx) Done() <-chan struct{}       { return valCtxCh }
+func (vc *valCtx) Err() error                  { return nil }
+
+func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
+	ctx, cancel := context.WithCancel(&valCtx{inctx})
+	wgs := &watchGrpcStream{
+		owner:      w,
+		remote:     w.remote,
+		callOpts:   w.callOpts,
+		ctx:        ctx,
+		ctxKey:     streamKeyFromCtx(inctx),
+		cancel:     cancel,
+		substreams: make(map[int64]*watcherStream),
+		respc:      make(chan *pb.WatchResponse),
+		reqc:       make(chan watchStreamRequest),
+		donec:      make(chan struct{}),
+		errc:       make(chan error, 1),
+		closingc:   make(chan *watcherStream),
+		resumec:    make(chan struct{}),
+	}
+	go wgs.run()
+	return wgs
+}
+
+// Watch posts a watch request to run() and waits for a new watcher channel
+func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
+	ow := opWatch(key, opts...)
+
+	var filters []pb.WatchCreateRequest_FilterType
+	if ow.filterPut {
+		filters = append(filters, pb.WatchCreateRequest_NOPUT)
+	}
+	if ow.filterDelete {
+		filters = append(filters, pb.WatchCreateRequest_NODELETE)
+	}
+
+	wr := &watchRequest{
+		ctx:            ctx,
+		createdNotify:  ow.createdNotify,
+		key:            string(ow.key),
+		end:            string(ow.end),
+		rev:            ow.rev,
+		progressNotify: ow.progressNotify,
+		fragment:       ow.fragment,
+		filters:        filters,
+		prevKV:         ow.prevKV,
+		retc:           make(chan chan WatchResponse, 1),
+	}
+
+	ok := false
+	ctxKey := streamKeyFromCtx(ctx)
+
+	// find or allocate appropriate grpc watch stream
+	w.mu.Lock()
+	if w.streams == nil {
+		// closed
+		w.mu.Unlock()
+		ch := make(chan WatchResponse)
+		close(ch)
+		return ch
+	}
+	wgs := w.streams[ctxKey]
+	if wgs == nil {
+		wgs = w.newWatcherGrpcStream(ctx)
+		w.streams[ctxKey] = wgs
+	}
+	donec := wgs.donec
+	reqc := wgs.reqc
+	w.mu.Unlock()
+
+	// couldn't create channel; return closed channel
+	closeCh := make(chan WatchResponse, 1)
+
+	// submit request
+	select {
+	case reqc <- wr:
+		ok = true
+	case <-wr.ctx.Done():
+	case <-donec:
+		if wgs.closeErr != nil {
+			closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+			break
+		}
+		// retry; may have dropped stream from no ctxs
+		return w.Watch(ctx, key, opts...)
+	}
+
+	// receive channel
+	if ok {
+		select {
+		case ret := <-wr.retc:
+			return ret
+		case <-ctx.Done():
+		case <-donec:
+			if wgs.closeErr != nil {
+				closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+				break
+			}
+			// retry; may have dropped stream from no ctxs
+			return w.Watch(ctx, key, opts...)
+		}
+	}
+
+	close(closeCh)
+	return closeCh
+}
+
+func (w *watcher) Close() (err error) {
+	w.mu.Lock()
+	streams := w.streams
+	w.streams = nil
+	w.mu.Unlock()
+	for _, wgs := range streams {
+		if werr := wgs.close(); werr != nil {
+			err = werr
+		}
+	}
+	// Consider context.Canceled as a successful close
+	if err == context.Canceled {
+		err = nil
+	}
+	return err
+}
+
+// RequestProgress requests a progress notify response be sent in all watch channels.
+func (w *watcher) RequestProgress(ctx context.Context) (err error) {
+	ctxKey := streamKeyFromCtx(ctx)
+
+	w.mu.Lock()
+	if w.streams == nil {
+		w.mu.Unlock()
+		return fmt.Errorf("no stream found for context")
+	}
+	wgs := w.streams[ctxKey]
+	if wgs == nil {
+		wgs = w.newWatcherGrpcStream(ctx)
+		w.streams[ctxKey] = wgs
+	}
+	donec := wgs.donec
+	reqc := wgs.reqc
+	w.mu.Unlock()
+
+	pr := &progressRequest{}
+
+	select {
+	case reqc <- pr:
+		return nil
+	case <-ctx.Done():
+		if err == nil {
+			return ctx.Err()
+		}
+		return err
+	case <-donec:
+		if wgs.closeErr != nil {
+			return wgs.closeErr
+		}
+		// retry; may have dropped stream from no ctxs
+		return w.RequestProgress(ctx)
+	}
+}
+
+func (w *watchGrpcStream) close() (err error) {
+	w.cancel()
+	<-w.donec
+	select {
+	case err = <-w.errc:
+	default:
+	}
+	return toErr(w.ctx, err)
+}
+
+func (w *watcher) closeStream(wgs *watchGrpcStream) {
+	w.mu.Lock()
+	close(wgs.donec)
+	wgs.cancel()
+	if w.streams != nil {
+		delete(w.streams, wgs.ctxKey)
+	}
+	w.mu.Unlock()
+}
+
+func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
+	// check watch ID for backward compatibility (<= v3.3)
+	if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
+		w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
+		// failed; no channel
+		close(ws.recvc)
+		return
+	}
+	ws.id = resp.WatchId
+	w.substreams[ws.id] = ws
+}
+
+func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
+	select {
+	case ws.outc <- *resp:
+	case <-ws.initReq.ctx.Done():
+	case <-time.After(closeSendErrTimeout):
+	}
+	close(ws.outc)
+}
+
+func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
+	// send channel response in case stream was never established
+	select {
+	case ws.initReq.retc <- ws.outc:
+	default:
+	}
+	// close subscriber's channel
+	if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
+		go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
+	} else if ws.outc != nil {
+		close(ws.outc)
+	}
+	if ws.id != -1 {
+		delete(w.substreams, ws.id)
+		return
+	}
+	for i := range w.resuming {
+		if w.resuming[i] == ws {
+			w.resuming[i] = nil
+			return
+		}
+	}
+}
+
+// run is the root of the goroutines for managing a watcher client
+func (w *watchGrpcStream) run() {
+	var wc pb.Watch_WatchClient
+	var closeErr error
+
+	// substreams marked to close but goroutine still running; needed for
+	// avoiding double-closing recvc on grpc stream teardown
+	closing := make(map[*watcherStream]struct{})
+
+	defer func() {
+		w.closeErr = closeErr
+		// shutdown substreams and resuming substreams
+		for _, ws := range w.substreams {
+			if _, ok := closing[ws]; !ok {
+				close(ws.recvc)
+				closing[ws] = struct{}{}
+			}
+		}
+		for _, ws := range w.resuming {
+			if _, ok := closing[ws]; ws != nil && !ok {
+				close(ws.recvc)
+				closing[ws] = struct{}{}
+			}
+		}
+		w.joinSubstreams()
+		for range closing {
+			w.closeSubstream(<-w.closingc)
+		}
+		w.wg.Wait()
+		w.owner.closeStream(w)
+	}()
+
+	// start a stream with the etcd grpc server
+	if wc, closeErr = w.newWatchClient(); closeErr != nil {
+		return
+	}
+
+	cancelSet := make(map[int64]struct{})
+
+	var cur *pb.WatchResponse
+	for {
+		select {
+		// Watch() requested
+		case req := <-w.reqc:
+			switch wreq := req.(type) {
+			case *watchRequest:
+				outc := make(chan WatchResponse, 1)
+				// TODO: pass custom watch ID?
+				ws := &watcherStream{
+					initReq: *wreq,
+					id:      -1,
+					outc:    outc,
+					// unbuffered so resumes won't cause repeat events
+					recvc: make(chan *WatchResponse),
+				}
+
+				ws.donec = make(chan struct{})
+				w.wg.Add(1)
+				go w.serveSubstream(ws, w.resumec)
+
+				// queue up for watcher creation/resume
+				w.resuming = append(w.resuming, ws)
+				if len(w.resuming) == 1 {
+					// head of resume queue, can register a new watcher
+					wc.Send(ws.initReq.toPB())
+				}
+			case *progressRequest:
+				wc.Send(wreq.toPB())
+			}
+
+		// new events from the watch client
+		case pbresp := <-w.respc:
+			if cur == nil || pbresp.Created || pbresp.Canceled {
+				cur = pbresp
+			} else if cur != nil && cur.WatchId == pbresp.WatchId {
+				// merge new events
+				cur.Events = append(cur.Events, pbresp.Events...)
+				// update "Fragment" field; last response with "Fragment" == false
+				cur.Fragment = pbresp.Fragment
+			}
+
+			switch {
+			case pbresp.Created:
+				// response to head of queue creation
+				if ws := w.resuming[0]; ws != nil {
+					w.addSubstream(pbresp, ws)
+					w.dispatchEvent(pbresp)
+					w.resuming[0] = nil
+				}
+
+				if ws := w.nextResume(); ws != nil {
+					wc.Send(ws.initReq.toPB())
+				}
+
+				// reset for next iteration
+				cur = nil
+
+			case pbresp.Canceled && pbresp.CompactRevision == 0:
+				delete(cancelSet, pbresp.WatchId)
+				if ws, ok := w.substreams[pbresp.WatchId]; ok {
+					// signal to stream goroutine to update closingc
+					close(ws.recvc)
+					closing[ws] = struct{}{}
+				}
+
+				// reset for next iteration
+				cur = nil
+
+			case cur.Fragment:
+				// watch response events are still fragmented
+				// continue to fetch next fragmented event arrival
+				continue
+
+			default:
+				// dispatch to appropriate watch stream
+				ok := w.dispatchEvent(cur)
+
+				// reset for next iteration
+				cur = nil
+
+				if ok {
+					break
+				}
+
+				// watch response on unexpected watch id; cancel id
+				if _, ok := cancelSet[pbresp.WatchId]; ok {
+					break
+				}
+
+				cancelSet[pbresp.WatchId] = struct{}{}
+				cr := &pb.WatchRequest_CancelRequest{
+					CancelRequest: &pb.WatchCancelRequest{
+						WatchId: pbresp.WatchId,
+					},
+				}
+				req := &pb.WatchRequest{RequestUnion: cr}
+				wc.Send(req)
+			}
+
+		// watch client failed on Recv; spawn another if possible
+		case err := <-w.errc:
+			if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
+				closeErr = err
+				return
+			}
+			if wc, closeErr = w.newWatchClient(); closeErr != nil {
+				return
+			}
+			if ws := w.nextResume(); ws != nil {
+				wc.Send(ws.initReq.toPB())
+			}
+			cancelSet = make(map[int64]struct{})
+
+		case <-w.ctx.Done():
+			return
+
+		case ws := <-w.closingc:
+			w.closeSubstream(ws)
+			delete(closing, ws)
+			// no more watchers on this stream, shutdown
+			if len(w.substreams)+len(w.resuming) == 0 {
+				return
+			}
+		}
+	}
+}
+
+// nextResume chooses the next resuming to register with the grpc stream. Abandoned
+// streams are marked as nil in the queue since the head must wait for its inflight registration.
+func (w *watchGrpcStream) nextResume() *watcherStream {
+	for len(w.resuming) != 0 {
+		if w.resuming[0] != nil {
+			return w.resuming[0]
+		}
+		w.resuming = w.resuming[1:len(w.resuming)]
+	}
+	return nil
+}
+
+// dispatchEvent sends a WatchResponse to the appropriate watcher stream
+func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
+	events := make([]*Event, len(pbresp.Events))
+	for i, ev := range pbresp.Events {
+		events[i] = (*Event)(ev)
+	}
+	// TODO: return watch ID?
+	wr := &WatchResponse{
+		Header:          *pbresp.Header,
+		Events:          events,
+		CompactRevision: pbresp.CompactRevision,
+		Created:         pbresp.Created,
+		Canceled:        pbresp.Canceled,
+		cancelReason:    pbresp.CancelReason,
+	}
+
+	// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
+	// indicate they should be broadcast.
+	if wr.IsProgressNotify() && pbresp.WatchId == -1 {
+		return w.broadcastResponse(wr)
+	}
+
+	return w.unicastResponse(wr, pbresp.WatchId)
+
+}
+
+// broadcastResponse send a watch response to all watch substreams.
+func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
+	for _, ws := range w.substreams {
+		select {
+		case ws.recvc <- wr:
+		case <-ws.donec:
+		}
+	}
+	return true
+}
+
+// unicastResponse sends a watch response to a specific watch substream.
+func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
+	ws, ok := w.substreams[watchId]
+	if !ok {
+		return false
+	}
+	select {
+	case ws.recvc <- wr:
+	case <-ws.donec:
+		return false
+	}
+	return true
+}
+
+// serveWatchClient forwards messages from the grpc stream to run()
+func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
+	for {
+		resp, err := wc.Recv()
+		if err != nil {
+			select {
+			case w.errc <- err:
+			case <-w.donec:
+			}
+			return
+		}
+		select {
+		case w.respc <- resp:
+		case <-w.donec:
+			return
+		}
+	}
+}
+
+// serveSubstream forwards watch responses from run() to the subscriber
+func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
+	if ws.closing {
+		panic("created substream goroutine but substream is closing")
+	}
+
+	// nextRev is the minimum expected next revision
+	nextRev := ws.initReq.rev
+	resuming := false
+	defer func() {
+		if !resuming {
+			ws.closing = true
+		}
+		close(ws.donec)
+		if !resuming {
+			w.closingc <- ws
+		}
+		w.wg.Done()
+	}()
+
+	emptyWr := &WatchResponse{}
+	for {
+		curWr := emptyWr
+		outc := ws.outc
+
+		if len(ws.buf) > 0 {
+			curWr = ws.buf[0]
+		} else {
+			outc = nil
+		}
+		select {
+		case outc <- *curWr:
+			if ws.buf[0].Err() != nil {
+				return
+			}
+			ws.buf[0] = nil
+			ws.buf = ws.buf[1:]
+		case wr, ok := <-ws.recvc:
+			if !ok {
+				// shutdown from closeSubstream
+				return
+			}
+
+			if wr.Created {
+				if ws.initReq.retc != nil {
+					ws.initReq.retc <- ws.outc
+					// to prevent next write from taking the slot in buffered channel
+					// and posting duplicate create events
+					ws.initReq.retc = nil
+
+					// send first creation event only if requested
+					if ws.initReq.createdNotify {
+						ws.outc <- *wr
+					}
+					// once the watch channel is returned, a current revision
+					// watch must resume at the store revision. This is necessary
+					// for the following case to work as expected:
+					//	wch := m1.Watch("a")
+					//	m2.Put("a", "b")
+					//	<-wch
+					// If the revision is only bound on the first observed event,
+					// if wch is disconnected before the Put is issued, then reconnects
+					// after it is committed, it'll miss the Put.
+					if ws.initReq.rev == 0 {
+						nextRev = wr.Header.Revision
+					}
+				}
+			} else {
+				// current progress of watch; <= store revision
+				nextRev = wr.Header.Revision
+			}
+
+			if len(wr.Events) > 0 {
+				nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
+			}
+			ws.initReq.rev = nextRev
+
+			// created event is already sent above,
+			// watcher should not post duplicate events
+			if wr.Created {
+				continue
+			}
+
+			// TODO pause channel if buffer gets too large
+			ws.buf = append(ws.buf, wr)
+		case <-w.ctx.Done():
+			return
+		case <-ws.initReq.ctx.Done():
+			return
+		case <-resumec:
+			resuming = true
+			return
+		}
+	}
+	// lazily send cancel message if events on missing id
+}
+
+func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
+	// mark all substreams as resuming
+	close(w.resumec)
+	w.resumec = make(chan struct{})
+	w.joinSubstreams()
+	for _, ws := range w.substreams {
+		ws.id = -1
+		w.resuming = append(w.resuming, ws)
+	}
+	// strip out nils, if any
+	var resuming []*watcherStream
+	for _, ws := range w.resuming {
+		if ws != nil {
+			resuming = append(resuming, ws)
+		}
+	}
+	w.resuming = resuming
+	w.substreams = make(map[int64]*watcherStream)
+
+	// connect to grpc stream while accepting watcher cancelation
+	stopc := make(chan struct{})
+	donec := w.waitCancelSubstreams(stopc)
+	wc, err := w.openWatchClient()
+	close(stopc)
+	<-donec
+
+	// serve all non-closing streams, even if there's a client error
+	// so that the teardown path can shutdown the streams as expected.
+	for _, ws := range w.resuming {
+		if ws.closing {
+			continue
+		}
+		ws.donec = make(chan struct{})
+		w.wg.Add(1)
+		go w.serveSubstream(ws, w.resumec)
+	}
+
+	if err != nil {
+		return nil, v3rpc.Error(err)
+	}
+
+	// receive data from new grpc stream
+	go w.serveWatchClient(wc)
+	return wc, nil
+}
+
+func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
+	var wg sync.WaitGroup
+	wg.Add(len(w.resuming))
+	donec := make(chan struct{})
+	for i := range w.resuming {
+		go func(ws *watcherStream) {
+			defer wg.Done()
+			if ws.closing {
+				if ws.initReq.ctx.Err() != nil && ws.outc != nil {
+					close(ws.outc)
+					ws.outc = nil
+				}
+				return
+			}
+			select {
+			case <-ws.initReq.ctx.Done():
+				// closed ws will be removed from resuming
+				ws.closing = true
+				close(ws.outc)
+				ws.outc = nil
+				w.wg.Add(1)
+				go func() {
+					defer w.wg.Done()
+					w.closingc <- ws
+				}()
+			case <-stopc:
+			}
+		}(w.resuming[i])
+	}
+	go func() {
+		defer close(donec)
+		wg.Wait()
+	}()
+	return donec
+}
+
+// joinSubstreams waits for all substream goroutines to complete.
+func (w *watchGrpcStream) joinSubstreams() {
+	for _, ws := range w.substreams {
+		<-ws.donec
+	}
+	for _, ws := range w.resuming {
+		if ws != nil {
+			<-ws.donec
+		}
+	}
+}
+
+var maxBackoff = 100 * time.Millisecond
+
+// openWatchClient retries opening a watch client until success or halt.
+// manually retry in case "ws==nil && err==nil"
+// TODO: remove FailFast=false
+func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
+	backoff := time.Millisecond
+	for {
+		select {
+		case <-w.ctx.Done():
+			if err == nil {
+				return nil, w.ctx.Err()
+			}
+			return nil, err
+		default:
+		}
+		if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
+			break
+		}
+		if isHaltErr(w.ctx, err) {
+			return nil, v3rpc.Error(err)
+		}
+		if isUnavailableErr(w.ctx, err) {
+			// retry, but backoff
+			if backoff < maxBackoff {
+				// 25% backoff factor
+				backoff = backoff + backoff/4
+				if backoff > maxBackoff {
+					backoff = maxBackoff
+				}
+			}
+			time.Sleep(backoff)
+		}
+	}
+	return ws, nil
+}
+
+// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
+func (wr *watchRequest) toPB() *pb.WatchRequest {
+	req := &pb.WatchCreateRequest{
+		StartRevision:  wr.rev,
+		Key:            []byte(wr.key),
+		RangeEnd:       []byte(wr.end),
+		ProgressNotify: wr.progressNotify,
+		Filters:        wr.filters,
+		PrevKv:         wr.prevKV,
+		Fragment:       wr.fragment,
+	}
+	cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
+	return &pb.WatchRequest{RequestUnion: cr}
+}
+
+// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
+func (pr *progressRequest) toPB() *pb.WatchRequest {
+	req := &pb.WatchProgressRequest{}
+	cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
+	return &pb.WatchRequest{RequestUnion: cr}
+}
+
+func streamKeyFromCtx(ctx context.Context) string {
+	if md, ok := metadata.FromOutgoingContext(ctx); ok {
+		return fmt.Sprintf("%+v", md)
+	}
+	return ""
+}