[SEBA-660] : Adding Igmp support in BbSim

Change-Id: I9f5c7d8ad39ac82850b04e2c997996d6c47b32d2
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
index 024408e..a11e8cb 100644
--- a/vendor/google.golang.org/grpc/.travis.yml
+++ b/vendor/google.golang.org/grpc/.travis.yml
@@ -2,18 +2,20 @@
 
 matrix:
   include:
-  - go: 1.12.x
+  - go: 1.13.x
     env: VET=1 GO111MODULE=on
-  - go: 1.12.x
+  - go: 1.13.x
     env: RACE=1 GO111MODULE=on
-  - go: 1.12.x
+  - go: 1.13.x
     env: RUN386=1
-  - go: 1.12.x
+  - go: 1.13.x
     env: GRPC_GO_RETRY=on
+  - go: 1.13.x
+    env: TESTEXTRAS=1
+  - go: 1.12.x
+    env: GO111MODULE=on
   - go: 1.11.x
     env: GO111MODULE=on
-  - go: 1.10.x
-  - go: 1.9.x
   - go: 1.9.x
     env: GAE=1
 
@@ -23,17 +25,18 @@
   - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
   - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
   - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
-  - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi
+  - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi
 
 install:
   - try3() { eval "$*" || eval "$*" || eval "$*"; }
   - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
-  - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi
-  - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi
+  - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi
+  - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi
 
 script:
   - set -e
-  - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi
-  - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi
-  - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi
+  - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi
+  - if [[ -n "${VET}" ]]; then ./vet.sh; fi
+  - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi
+  - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi
   - make test
diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
new file mode 100644
index 0000000..9d4213e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## Community Code of Conduct
+
+gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 6e69b28..4f1567e 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -1,6 +1,8 @@
 # How to contribute
 
-We definitely welcome your patches and contributions to gRPC!
+We definitely welcome your patches and contributions to gRPC! Please read the gRPC
+organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
+and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
 
 If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
 
diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md
new file mode 100644
index 0000000..d6ff267
--- /dev/null
+++ b/vendor/google.golang.org/grpc/GOVERNANCE.md
@@ -0,0 +1 @@
+This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md).
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
new file mode 100644
index 0000000..093c82b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -0,0 +1,27 @@
+This page lists all active maintainers of this repository. If you were a
+maintainer and would like to add your name to the Emeritus list, please send us a
+PR.
+
+See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md)
+for governance guidelines and how to become a maintainer.
+See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md)
+for general contribution guidelines.
+
+## Maintainers (in alphabetical order)
+- [canguler](https://github.com/canguler), Google LLC
+- [cesarghali](https://github.com/cesarghali), Google LLC
+- [dfawley](https://github.com/dfawley), Google LLC
+- [easwars](https://github.com/easwars), Google LLC
+- [jadekler](https://github.com/jadekler), Google LLC
+- [menghanl](https://github.com/menghanl), Google LLC
+- [srini100](https://github.com/srini100), Google LLC
+
+## Emeritus Maintainers (in alphabetical order)
+- [adelez](https://github.com/adelez), Google LLC
+- [iamqizhao](https://github.com/iamqizhao), Google LLC
+- [jtattermusch](https://github.com/jtattermusch), Google LLC
+- [lyuxuan](https://github.com/lyuxuan), Google LLC
+- [makmukhi](https://github.com/makmukhi), Google LLC
+- [matt-kwong](https://github.com/matt-kwong), Google LLC
+- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
+- [yongni](https://github.com/yongni), Google LLC
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index db982aa..410f7d5 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -19,6 +19,9 @@
 test: testdeps
 	go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
 
+testsubmodule: testdeps
+	cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+
 testappengine: testappenginedeps
 	goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
 
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
new file mode 100644
index 0000000..68ffc62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// All APIs in this package are EXPERIMENTAL.
+package attributes
+
+import "fmt"
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs.  Keys must be hashable, and users should define their own
+// types for keys.
+type Attributes struct {
+	m map[interface{}]interface{}
+}
+
+// New returns a new Attributes containing all key/value pairs in kvs.  If the
+// same key appears multiple times, the last value overwrites all previous
+// values for that key.  Panics if len(kvs) is not even.
+func New(kvs ...interface{}) *Attributes {
+	if len(kvs)%2 != 0 {
+		panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+	}
+	a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
+	for i := 0; i < len(kvs)/2; i++ {
+		a.m[kvs[i*2]] = kvs[i*2+1]
+	}
+	return a
+}
+
+// WithValues returns a new Attributes containing all key/value pairs in a and
+// kvs.  Panics if len(kvs) is not even.  If the same key appears multiple
+// times, the last value overwrites all previous values for that key.  To
+// remove an existing key, use a nil value.
+func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
+	if len(kvs)%2 != 0 {
+		panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+	}
+	n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
+	for k, v := range a.m {
+		n.m[k] = v
+	}
+	for i := 0; i < len(kvs)/2; i++ {
+		n.m[kvs[i*2]] = kvs[i*2+1]
+	}
+	return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key.
+func (a *Attributes) Value(key interface{}) interface{} {
+	return a.m[key]
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index 97c6e25..ff7c3ee 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -23,16 +23,36 @@
 
 import (
 	"time"
+
+	"google.golang.org/grpc/backoff"
 )
 
 // DefaultBackoffConfig uses values specified for backoff in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
 var DefaultBackoffConfig = BackoffConfig{
 	MaxDelay: 120 * time.Second,
 }
 
 // BackoffConfig defines the parameters for the default gRPC backoff strategy.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
 type BackoffConfig struct {
 	// MaxDelay is the upper bound of backoff delay.
 	MaxDelay time.Duration
 }
+
+// ConnectParams defines the parameters for connecting and retrying. Users are
+// encouraged to use this instead of the BackoffConfig type defined above. See
+// here for more details:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This API is EXPERIMENTAL.
+type ConnectParams struct {
+	// Backoff specifies the configuration options for connection backoff.
+	Backoff backoff.Config
+	// MinConnectTimeout is the minimum amount of time we are willing to give a
+	// connection to complete.
+	MinConnectTimeout time.Duration
+}
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
new file mode 100644
index 0000000..0787d0b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff provides configuration options for backoff.
+//
+// More details can be found at:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// All APIs in this package are experimental.
+package backoff
+
+import "time"
+
+// Config defines the configuration options for backoff.
+type Config struct {
+	// BaseDelay is the amount of time to backoff after the first failure.
+	BaseDelay time.Duration
+	// Multiplier is the factor with which to multiply backoffs after a
+	// failed retry. Should ideally be greater than 1.
+	Multiplier float64
+	// Jitter is the factor with which backoffs are randomized.
+	Jitter float64
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// DefaultConfig is a backoff configuration with the default values specfied
+// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This should be useful for callers who want to configure backoff with
+// non-default values only for a subset of the options.
+var DefaultConfig = Config{
+	BaseDelay:  1.0 * time.Second,
+	Multiplier: 1.6,
+	Jitter:     0.2,
+	MaxDelay:   120 * time.Second,
+}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index a78e702..a8eb0f4 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -43,7 +43,7 @@
 
 // BalancerConfig specifies the configurations for Balancer.
 //
-// Deprecated: please use package balancer.
+// Deprecated: please use package balancer.  May be removed in a future 1.x release.
 type BalancerConfig struct {
 	// DialCreds is the transport credential the Balancer implementation can
 	// use to dial to a remote load balancer server. The Balancer implementations
@@ -57,7 +57,7 @@
 
 // BalancerGetOptions configures a Get call.
 //
-// Deprecated: please use package balancer.
+// Deprecated: please use package balancer.  May be removed in a future 1.x release.
 type BalancerGetOptions struct {
 	// BlockingWait specifies whether Get should block when there is no
 	// connected address.
@@ -66,7 +66,7 @@
 
 // Balancer chooses network addresses for RPCs.
 //
-// Deprecated: please use package balancer.
+// Deprecated: please use package balancer.  May be removed in a future 1.x release.
 type Balancer interface {
 	// Start does the initialization work to bootstrap a Balancer. For example,
 	// this function may start the name resolution and watch the updates. It will
@@ -120,7 +120,7 @@
 // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
 // the name resolution updates and updates the addresses available correspondingly.
 //
-// Deprecated: please use package balancer/roundrobin.
+// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
 func RoundRobin(r naming.Resolver) Balancer {
 	return &roundRobin{r: r}
 }
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index c266f4e..9258858 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -117,6 +117,15 @@
 	HealthCheckEnabled bool
 }
 
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+	// State contains the connectivity state of the balancer, which is used to
+	// determine the state of the ClientConn.
+	ConnectivityState connectivity.State
+	// Picker is used to choose connections (SubConns) for RPCs.
+	Picker V2Picker
+}
+
 // ClientConn represents a gRPC ClientConn.
 //
 // This interface is to be implemented by gRPC. Users should not need a
@@ -137,10 +146,19 @@
 	//
 	// gRPC will update the connectivity state of the ClientConn, and will call pick
 	// on the new picker to pick new SubConn.
+	//
+	// Deprecated: use UpdateState instead
 	UpdateBalancerState(s connectivity.State, p Picker)
 
+	// UpdateState notifies gRPC that the balancer's internal state has
+	// changed.
+	//
+	// gRPC will update the connectivity state of the ClientConn, and will call pick
+	// on the new picker to pick new SubConns.
+	UpdateState(State)
+
 	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
-	ResolveNow(resolver.ResolveNowOption)
+	ResolveNow(resolver.ResolveNowOptions)
 
 	// Target returns the dial target for this ClientConn.
 	//
@@ -185,11 +203,14 @@
 	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
 }
 
-// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
 	// FullMethodName is the method name that NewClientStream() is called
 	// with. The canonical format is /service/Method.
 	FullMethodName string
+	// Ctx is the RPC's context, and may contain relevant RPC-level information
+	// like the outgoing header metadata.
+	Ctx context.Context
 }
 
 // DoneInfo contains additional information for done.
@@ -215,7 +236,7 @@
 	ErrNoSubConnAvailable = errors.New("no SubConn is available")
 	// ErrTransientFailure indicates all SubConns are in TransientFailure.
 	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
-	ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
+	ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
 )
 
 // Picker is used by gRPC to pick a SubConn to send an RPC.
@@ -223,6 +244,8 @@
 // internal state has changed.
 //
 // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+//
+// Deprecated: use V2Picker instead
 type Picker interface {
 	// Pick returns the SubConn to be used to send the RPC.
 	// The returned SubConn must be one returned by NewSubConn().
@@ -243,18 +266,76 @@
 	//
 	// If the returned error is not nil:
 	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
-	// - If the error is ErrTransientFailure:
+	// - If the error is ErrTransientFailure or implements IsTransientFailure()
+	//   bool, returning true:
 	//   - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
 	//     is called to pick again;
 	//   - Otherwise, RPC will fail with unavailable error.
 	// - Else (error is other non-nil error):
-	//   - The RPC will fail with unavailable error.
+	//   - The RPC will fail with the error's status code, or Unknown if it is
+	//     not a status error.
 	//
 	// The returned done() function will be called once the rpc has finished,
 	// with the final status of that RPC.  If the SubConn returned is not a
 	// valid SubConn type, done may not be called.  done may be nil if balancer
 	// doesn't care about the RPC status.
-	Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
+	Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
+}
+
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+	// SubConn is the connection to use for this pick, if its state is Ready.
+	// If the state is not Ready, gRPC will block the RPC until a new Picker is
+	// provided by the balancer (using ClientConn.UpdateState).  The SubConn
+	// must be one returned by ClientConn.NewSubConn.
+	SubConn SubConn
+
+	// Done is called when the RPC is completed.  If the SubConn is not ready,
+	// this will be called with a nil parameter.  If the SubConn is not a valid
+	// type, Done may not be called.  May be nil if the balancer does not wish
+	// to be notified when the RPC completes.
+	Done func(DoneInfo)
+}
+
+type transientFailureError struct {
+	error
+}
+
+func (e *transientFailureError) IsTransientFailure() bool { return true }
+
+// TransientFailureError wraps err in an error implementing
+// IsTransientFailure() bool, returning true.
+func TransientFailureError(err error) error {
+	return &transientFailureError{error: err}
+}
+
+// V2Picker is used by gRPC to pick a SubConn to send an RPC.
+// Balancer is expected to generate a new picker from its snapshot every time its
+// internal state has changed.
+//
+// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+type V2Picker interface {
+	// Pick returns the connection to use for this RPC and related information.
+	//
+	// Pick should not block.  If the balancer needs to do I/O or any blocking
+	// or time-consuming work to service this call, it should return
+	// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+	// the Picker is updated (using ClientConn.UpdateState).
+	//
+	// If an error is returned:
+	//
+	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+	//   Picker is provided by the balancer (using ClientConn.UpdateState).
+	//
+	// - If the error implements IsTransientFailure() bool, returning true,
+	//   wait for ready RPCs will wait, but non-wait for ready RPCs will be
+	//   terminated with this error's Error() string and status code
+	//   Unavailable.
+	//
+	// - Any other errors terminate all RPCs with the code and message
+	//   provided.  If the error is not a status error, it will be converted by
+	//   gRPC to a status error with code Unknown.
+	Pick(info PickInfo) (PickResult, error)
 }
 
 // Balancer takes input from gRPC, manages SubConns, and collects and aggregates
@@ -292,8 +373,11 @@
 
 // SubConnState describes the state of a SubConn.
 type SubConnState struct {
+	// ConnectivityState is the connectivity state of the SubConn.
 	ConnectivityState connectivity.State
-	// TODO: add last connection error
+	// ConnectionError is set if the ConnectivityState is TransientFailure,
+	// describing the reason the SubConn failed.  Otherwise, it is nil.
+	ConnectionError error
 }
 
 // ClientConnState describes the state of a ClientConn relevant to the
@@ -305,14 +389,23 @@
 	BalancerConfig serviceconfig.LoadBalancingConfig
 }
 
+// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
+// problem with the provided name resolver data.
+var ErrBadResolverState = errors.New("bad resolver state")
+
 // V2Balancer is defined for documentation purposes.  If a Balancer also
 // implements V2Balancer, its UpdateClientConnState method will be called
 // instead of HandleResolvedAddrs and its UpdateSubConnState will be called
 // instead of HandleSubConnStateChange.
 type V2Balancer interface {
 	// UpdateClientConnState is called by gRPC when the state of the ClientConn
-	// changes.
-	UpdateClientConnState(ClientConnState)
+	// changes.  If the error returned is ErrBadResolverState, the ClientConn
+	// will begin calling ResolveNow on the active name resolver with
+	// exponential backoff until a subsequent call to UpdateClientConnState
+	// returns a nil error.  Any other errors are currently ignored.
+	UpdateClientConnState(ClientConnState) error
+	// ResolverError is called by gRPC when the name resolver reports an error.
+	ResolverError(error)
 	// UpdateSubConnState is called by gRPC when the state of a SubConn
 	// changes.
 	UpdateSubConnState(SubConn, SubConnState)
@@ -326,9 +419,8 @@
 //
 // It's not thread safe.
 type ConnectivityStateEvaluator struct {
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
+	numReady      uint64 // Number of addrConns in ready state.
+	numConnecting uint64 // Number of addrConns in connecting state.
 }
 
 // RecordTransition records state change happening in subConn and based on that
@@ -348,8 +440,6 @@
 			cse.numReady += updateVal
 		case connectivity.Connecting:
 			cse.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			cse.numTransientFailure += updateVal
 		}
 	}
 
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index e587d8d..d952f09 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -20,6 +20,7 @@
 
 import (
 	"context"
+	"errors"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
@@ -28,34 +29,44 @@
 )
 
 type baseBuilder struct {
-	name          string
-	pickerBuilder PickerBuilder
-	config        Config
+	name            string
+	pickerBuilder   PickerBuilder
+	v2PickerBuilder V2PickerBuilder
+	config          Config
 }
 
 func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	return &baseBalancer{
-		cc:            cc,
-		pickerBuilder: bb.pickerBuilder,
+	bal := &baseBalancer{
+		cc:              cc,
+		pickerBuilder:   bb.pickerBuilder,
+		v2PickerBuilder: bb.v2PickerBuilder,
 
 		subConns: make(map[resolver.Address]balancer.SubConn),
 		scStates: make(map[balancer.SubConn]connectivity.State),
 		csEvltr:  &balancer.ConnectivityStateEvaluator{},
-		// Initialize picker to a picker that always return
-		// ErrNoSubConnAvailable, because when state of a SubConn changes, we
-		// may call UpdateBalancerState with this picker.
-		picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
-		config: bb.config,
+		config:   bb.config,
 	}
+	// Initialize picker to a picker that always returns
+	// ErrNoSubConnAvailable, because when state of a SubConn changes, we
+	// may call UpdateState with this picker.
+	if bb.pickerBuilder != nil {
+		bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+	} else {
+		bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
+	}
+	return bal
 }
 
 func (bb *baseBuilder) Name() string {
 	return bb.name
 }
 
+var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
+
 type baseBalancer struct {
-	cc            balancer.ClientConn
-	pickerBuilder PickerBuilder
+	cc              balancer.ClientConn
+	pickerBuilder   PickerBuilder
+	v2PickerBuilder V2PickerBuilder
 
 	csEvltr *balancer.ConnectivityStateEvaluator
 	state   connectivity.State
@@ -63,6 +74,7 @@
 	subConns map[resolver.Address]balancer.SubConn
 	scStates map[balancer.SubConn]connectivity.State
 	picker   balancer.Picker
+	v2Picker balancer.V2Picker
 	config   Config
 }
 
@@ -70,10 +82,23 @@
 	panic("not implemented")
 }
 
-func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
+func (b *baseBalancer) ResolverError(err error) {
+	switch b.state {
+	case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
+		if b.picker != nil {
+			b.picker = NewErrPicker(err)
+		} else {
+			b.v2Picker = NewErrPickerV2(err)
+		}
+	}
+}
+
+func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
 	// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
 	// TODO: handle s.ResolverState.ServiceConfig?
-	grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+	if grpclog.V(2) {
+		grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+	}
 	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
 	addrsSet := make(map[resolver.Address]struct{})
 	for _, a := range s.ResolverState.Addresses {
@@ -99,26 +124,51 @@
 			// The entry will be deleted in HandleSubConnStateChange.
 		}
 	}
+	return nil
 }
 
 // regeneratePicker takes a snapshot of the balancer, and generates a picker
 // from it. The picker is
 //  - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
 //  - built by the pickerBuilder with all READY SubConns otherwise.
-func (b *baseBalancer) regeneratePicker() {
+func (b *baseBalancer) regeneratePicker(err error) {
 	if b.state == connectivity.TransientFailure {
-		b.picker = NewErrPicker(balancer.ErrTransientFailure)
+		if b.pickerBuilder != nil {
+			b.picker = NewErrPicker(balancer.ErrTransientFailure)
+		} else {
+			if err != nil {
+				b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(err))
+			} else {
+				// This means the last subchannel transition was not to
+				// TransientFailure (otherwise err must be set), but the
+				// aggregate state of the balancer is TransientFailure, meaning
+				// there are no other addresses.
+				b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(errors.New("resolver returned no addresses")))
+			}
+		}
 		return
 	}
-	readySCs := make(map[resolver.Address]balancer.SubConn)
+	if b.pickerBuilder != nil {
+		readySCs := make(map[resolver.Address]balancer.SubConn)
 
-	// Filter out all ready SCs from full subConn map.
-	for addr, sc := range b.subConns {
-		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
-			readySCs[addr] = sc
+		// Filter out all ready SCs from full subConn map.
+		for addr, sc := range b.subConns {
+			if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+				readySCs[addr] = sc
+			}
 		}
+		b.picker = b.pickerBuilder.Build(readySCs)
+	} else {
+		readySCs := make(map[balancer.SubConn]SubConnInfo)
+
+		// Filter out all ready SCs from full subConn map.
+		for addr, sc := range b.subConns {
+			if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+				readySCs[sc] = SubConnInfo{Address: addr}
+			}
+		}
+		b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
 	}
-	b.picker = b.pickerBuilder.Build(readySCs)
 }
 
 func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
@@ -127,10 +177,14 @@
 
 func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
 	s := state.ConnectivityState
-	grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	if grpclog.V(2) {
+		grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	}
 	oldS, ok := b.scStates[sc]
 	if !ok {
-		grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		if grpclog.V(2) {
+			grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		}
 		return
 	}
 	b.scStates[sc] = s
@@ -153,10 +207,14 @@
 	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
 	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
 		(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
-		b.regeneratePicker()
+		b.regeneratePicker(state.ConnectionError)
 	}
 
-	b.cc.UpdateBalancerState(b.state, b.picker)
+	if b.picker != nil {
+		b.cc.UpdateBalancerState(b.state, b.picker)
+	} else {
+		b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
+	}
 }
 
 // Close is a nop because base balancer doesn't have internal state to clean up,
@@ -173,6 +231,19 @@
 	err error // Pick() always returns this err.
 }
 
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
 	return nil, nil, p.err
 }
+
+// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
+func NewErrPickerV2(err error) balancer.V2Picker {
+	return &errPickerV2{err: err}
+}
+
+type errPickerV2 struct {
+	err error // Pick() always returns this err.
+}
+
+func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	return balancer.PickResult{}, p.err
+}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 34b1f29..4192918 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -42,6 +42,26 @@
 	Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
 }
 
+// V2PickerBuilder creates balancer.V2Picker.
+type V2PickerBuilder interface {
+	// Build returns a picker that will be used by gRPC to pick a SubConn.
+	Build(info PickerBuildInfo) balancer.V2Picker
+}
+
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+	// ReadySCs is a map from all ready SubConns to the Addresses used to
+	// create them.
+	ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+	Address resolver.Address // the address used to create this SubConn
+}
+
 // NewBalancerBuilder returns a balancer builder. The balancers
 // built by this builder will use the picker builder to build pickers.
 func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
@@ -62,3 +82,12 @@
 		config:        config,
 	}
 }
+
+// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
+func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
+	return &baseBuilder{
+		name:            name,
+		v2PickerBuilder: pb,
+		config:          config,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 29f7a4d..d4d6455 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,14 +22,12 @@
 package roundrobin
 
 import (
-	"context"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer/base"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/grpcrand"
-	"google.golang.org/grpc/resolver"
 )
 
 // Name is the name of round_robin balancer.
@@ -37,7 +35,7 @@
 
 // newBuilder creates a new roundrobin balancer builder.
 func newBuilder() balancer.Builder {
-	return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+	return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
 }
 
 func init() {
@@ -46,13 +44,13 @@
 
 type rrPickerBuilder struct{}
 
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
-	grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
-	if len(readySCs) == 0 {
-		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
+	grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
+	if len(info.ReadySCs) == 0 {
+		return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
 	}
 	var scs []balancer.SubConn
-	for _, sc := range readySCs {
+	for sc := range info.ReadySCs {
 		scs = append(scs, sc)
 	}
 	return &rrPicker{
@@ -74,10 +72,10 @@
 	next int
 }
 
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
 	p.mu.Lock()
 	sc := p.subConns[p.next]
 	p.next = (p.next + 1) % len(p.subConns)
 	p.mu.Unlock()
-	return sc, nil, nil
+	return balancer.PickResult{SubConn: sc}, nil
 }
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index 7bc6621..824f28e 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -25,6 +25,8 @@
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/buffer"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/resolver"
 )
 
@@ -32,64 +34,17 @@
 type scStateUpdate struct {
 	sc    balancer.SubConn
 	state connectivity.State
-}
-
-// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
-// TODO make a general purpose buffer that uses interface{}.
-type scStateUpdateBuffer struct {
-	c       chan *scStateUpdate
-	mu      sync.Mutex
-	backlog []*scStateUpdate
-}
-
-func newSCStateUpdateBuffer() *scStateUpdateBuffer {
-	return &scStateUpdateBuffer{
-		c: make(chan *scStateUpdate, 1),
-	}
-}
-
-func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if len(b.backlog) == 0 {
-		select {
-		case b.c <- t:
-			return
-		default:
-		}
-	}
-	b.backlog = append(b.backlog, t)
-}
-
-func (b *scStateUpdateBuffer) load() {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if len(b.backlog) > 0 {
-		select {
-		case b.c <- b.backlog[0]:
-			b.backlog[0] = nil
-			b.backlog = b.backlog[1:]
-		default:
-		}
-	}
-}
-
-// get returns the channel that the scStateUpdate will be sent to.
-//
-// Upon receiving, the caller should call load to send another
-// scStateChangeTuple onto the channel if there is any.
-func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
-	return b.c
+	err   error
 }
 
 // ccBalancerWrapper is a wrapper on top of cc for balancers.
 // It implements balancer.ClientConn interface.
 type ccBalancerWrapper struct {
-	cc               *ClientConn
-	balancer         balancer.Balancer
-	stateChangeQueue *scStateUpdateBuffer
-	ccUpdateCh       chan *balancer.ClientConnState
-	done             chan struct{}
+	cc         *ClientConn
+	balancerMu sync.Mutex // synchronizes calls to the balancer
+	balancer   balancer.Balancer
+	scBuffer   *buffer.Unbounded
+	done       *grpcsync.Event
 
 	mu       sync.Mutex
 	subConns map[*acBalancerWrapper]struct{}
@@ -97,11 +52,10 @@
 
 func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
 	ccb := &ccBalancerWrapper{
-		cc:               cc,
-		stateChangeQueue: newSCStateUpdateBuffer(),
-		ccUpdateCh:       make(chan *balancer.ClientConnState, 1),
-		done:             make(chan struct{}),
-		subConns:         make(map[*acBalancerWrapper]struct{}),
+		cc:       cc,
+		scBuffer: buffer.NewUnbounded(),
+		done:     grpcsync.NewEvent(),
+		subConns: make(map[*acBalancerWrapper]struct{}),
 	}
 	go ccb.watcher()
 	ccb.balancer = b.Build(ccb, bopts)
@@ -113,36 +67,23 @@
 func (ccb *ccBalancerWrapper) watcher() {
 	for {
 		select {
-		case t := <-ccb.stateChangeQueue.get():
-			ccb.stateChangeQueue.load()
-			select {
-			case <-ccb.done:
-				ccb.balancer.Close()
-				return
-			default:
+		case t := <-ccb.scBuffer.Get():
+			ccb.scBuffer.Load()
+			if ccb.done.HasFired() {
+				break
 			}
+			ccb.balancerMu.Lock()
+			su := t.(*scStateUpdate)
 			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
+				ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
 			} else {
-				ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
+				ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
 			}
-		case s := <-ccb.ccUpdateCh:
-			select {
-			case <-ccb.done:
-				ccb.balancer.Close()
-				return
-			default:
-			}
-			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateClientConnState(*s)
-			} else {
-				ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
-			}
-		case <-ccb.done:
+			ccb.balancerMu.Unlock()
+		case <-ccb.done.Done():
 		}
 
-		select {
-		case <-ccb.done:
+		if ccb.done.HasFired() {
 			ccb.balancer.Close()
 			ccb.mu.Lock()
 			scs := ccb.subConns
@@ -151,19 +92,17 @@
 			for acbw := range scs {
 				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
 			}
-			ccb.UpdateBalancerState(connectivity.Connecting, nil)
+			ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
 			return
-		default:
 		}
-		ccb.cc.firstResolveEvent.Fire()
 	}
 }
 
 func (ccb *ccBalancerWrapper) close() {
-	close(ccb.done)
+	ccb.done.Fire()
 }
 
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
 	// When updating addresses for a SubConn, if the address in use is not in
 	// the new addresses, the old ac will be tearDown() and a new ac will be
 	// created. tearDown() generates a state change with Shutdown state, we
@@ -174,30 +113,29 @@
 	if sc == nil {
 		return
 	}
-	ccb.stateChangeQueue.put(&scStateUpdate{
+	ccb.scBuffer.Put(&scStateUpdate{
 		sc:    sc,
 		state: s,
+		err:   err,
 	})
 }
 
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
-	if ccb.cc.curBalancerName != grpclbName {
-		// Filter any grpclb addresses since we don't have the grpclb balancer.
-		s := ccs.ResolverState
-		for i := 0; i < len(s.Addresses); {
-			if s.Addresses[i].Type == resolver.GRPCLB {
-				copy(s.Addresses[i:], s.Addresses[i+1:])
-				s.Addresses = s.Addresses[:len(s.Addresses)-1]
-				continue
-			}
-			i++
-		}
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+	ccb.balancerMu.Lock()
+	defer ccb.balancerMu.Unlock()
+	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+		return ub.UpdateClientConnState(*ccs)
 	}
-	select {
-	case <-ccb.ccUpdateCh:
-	default:
+	ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
+	return nil
+}
+
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+		ccb.balancerMu.Lock()
+		ub.ResolverError(err)
+		ccb.balancerMu.Unlock()
 	}
-	ccb.ccUpdateCh <- ccs
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -250,7 +188,22 @@
 	ccb.cc.csMgr.updateState(s)
 }
 
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+	ccb.mu.Lock()
+	defer ccb.mu.Unlock()
+	if ccb.subConns == nil {
+		return
+	}
+	// Update picker before updating state.  Even though the ordering here does
+	// not matter, it can lead to multiple calls of Pick in the common start-up
+	// case where we wait for ready and then perform an RPC.  If the picker is
+	// updated later, we could call the "connecting" picker when the state is
+	// updated, and then call the "ready" picker after the picker gets updated.
+	ccb.cc.blockingpicker.updatePickerV2(s.Picker)
+	ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
 	ccb.cc.resolveNow(o)
 }
 
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
index 66e9a44..db04b08 100644
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -19,7 +19,6 @@
 package grpc
 
 import (
-	"context"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
@@ -49,7 +48,7 @@
 		csEvltr:    &balancer.ConnectivityStateEvaluator{},
 		state:      connectivity.Idle,
 	}
-	cc.UpdateBalancerState(connectivity.Idle, bw)
+	cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
 	go bw.lbWatcher()
 	return bw
 }
@@ -243,7 +242,7 @@
 	if bw.state != sa {
 		bw.state = sa
 	}
-	bw.cc.UpdateBalancerState(bw.state, bw)
+	bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
 	if s == connectivity.Shutdown {
 		// Remove state for this sc.
 		delete(bw.connSt, sc)
@@ -275,17 +274,17 @@
 
 // The picker is the balancerWrapper itself.
 // It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
+func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
 	failfast := true // Default failfast is true.
-	if ss, ok := rpcInfoFromContext(ctx); ok {
+	if ss, ok := rpcInfoFromContext(info.Ctx); ok {
 		failfast = ss.failfast
 	}
-	a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
+	a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
 	if err != nil {
-		return nil, nil, err
+		return balancer.PickResult{}, toRPCErr(err)
 	}
 	if p != nil {
-		done = func(balancer.DoneInfo) { p() }
+		result.Done = func(balancer.DoneInfo) { p() }
 		defer func() {
 			if err != nil {
 				p()
@@ -297,38 +296,39 @@
 	defer bw.mu.Unlock()
 	if bw.pickfirst {
 		// Get the first sc in conns.
-		for _, sc := range bw.conns {
-			return sc, done, nil
+		for _, result.SubConn = range bw.conns {
+			return result, nil
 		}
-		return nil, nil, balancer.ErrNoSubConnAvailable
+		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
 	}
-	sc, ok1 := bw.conns[resolver.Address{
+	var ok1 bool
+	result.SubConn, ok1 = bw.conns[resolver.Address{
 		Addr:       a.Addr,
 		Type:       resolver.Backend,
 		ServerName: "",
 		Metadata:   a.Metadata,
 	}]
-	s, ok2 := bw.connSt[sc]
+	s, ok2 := bw.connSt[result.SubConn]
 	if !ok1 || !ok2 {
 		// This can only happen due to a race where Get() returned an address
 		// that was subsequently removed by Notify.  In this case we should
 		// retry always.
-		return nil, nil, balancer.ErrNoSubConnAvailable
+		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
 	}
 	switch s.s {
 	case connectivity.Ready, connectivity.Idle:
-		return sc, done, nil
+		return result, nil
 	case connectivity.Shutdown, connectivity.TransientFailure:
 		// If the returned sc has been shut down or is in transient failure,
 		// return error, and this RPC will fail or wait for another picker (if
 		// non-failfast).
-		return nil, nil, balancer.ErrTransientFailure
+		return balancer.PickResult{}, balancer.ErrTransientFailure
 	default:
 		// For other states (connecting or unknown), the v1 balancer would
 		// traditionally wait until ready and then issue the RPC.  Returning
 		// ErrNoSubConnAvailable will be a slight improvement in that it will
 		// allow the balancer to choose another address in case others are
 		// connected.
-		return nil, nil, balancer.ErrNoSubConnAvailable
+		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
 	}
 }
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 84e31a2..f58740b 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -31,22 +31,23 @@
 	"time"
 
 	"google.golang.org/grpc/balancer"
-	_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+	"google.golang.org/grpc/balancer/base"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/channelz"
-	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
-	_ "google.golang.org/grpc/resolver/dns"         // To register dns resolver.
-	_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
 	"google.golang.org/grpc/serviceconfig"
 	"google.golang.org/grpc/status"
+
+	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
+	_ "google.golang.org/grpc/internal/resolver/dns"         // To register dns resolver.
+	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
 )
 
 const (
@@ -187,11 +188,11 @@
 	}
 
 	if cc.dopts.defaultServiceConfigRawJSON != nil {
-		sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
-		if err != nil {
-			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
+		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+		if scpr.Err != nil {
+			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
 		}
-		cc.dopts.defaultServiceConfig = sc
+		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
 	}
 	cc.mkp = cc.dopts.copts.KeepaliveParams
 
@@ -236,29 +237,28 @@
 		}
 	}
 	if cc.dopts.bs == nil {
-		cc.dopts.bs = backoff.Exponential{
-			MaxDelay: DefaultBackoffConfig.MaxDelay,
+		cc.dopts.bs = backoff.DefaultExponential
+	}
+
+	// Determine the resolver to use.
+	cc.parsedTarget = parseTarget(cc.target)
+	grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
+	resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
+	if resolverBuilder == nil {
+		// If resolver builder is still nil, the parsed target's scheme is
+		// not registered. Fallback to default resolver and set Endpoint to
+		// the original target.
+		grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
+		cc.parsedTarget = resolver.Target{
+			Scheme:   resolver.GetDefaultScheme(),
+			Endpoint: target,
+		}
+		resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
+		if resolverBuilder == nil {
+			return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
 		}
 	}
-	if cc.dopts.resolverBuilder == nil {
-		// Only try to parse target when resolver builder is not already set.
-		cc.parsedTarget = parseTarget(cc.target)
-		grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
-		cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
-		if cc.dopts.resolverBuilder == nil {
-			// If resolver builder is still nil, the parsed target's scheme is
-			// not registered. Fallback to default resolver and set Endpoint to
-			// the original target.
-			grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
-			cc.parsedTarget = resolver.Target{
-				Scheme:   resolver.GetDefaultScheme(),
-				Endpoint: target,
-			}
-			cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
-		}
-	} else {
-		cc.parsedTarget = resolver.Target{Endpoint: target}
-	}
+
 	creds := cc.dopts.copts.TransportCredentials
 	if creds != nil && creds.Info().ServerName != "" {
 		cc.authority = creds.Info().ServerName
@@ -298,14 +298,14 @@
 	}
 
 	// Build the resolver.
-	rWrapper, err := newCCResolverWrapper(cc)
+	rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
 	if err != nil {
 		return nil, fmt.Errorf("failed to build resolver: %v", err)
 	}
-
 	cc.mu.Lock()
 	cc.resolverWrapper = rWrapper
 	cc.mu.Unlock()
+
 	// A blocking dial blocks until the clientConn is ready.
 	if cc.dopts.block {
 		for {
@@ -444,7 +444,32 @@
 	return csm.notifyChan
 }
 
-// ClientConn represents a client connection to an RPC server.
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs.  It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+	// Invoke performs a unary RPC and returns after the response is received
+	// into reply.
+	Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
+	// NewStream begins a streaming RPC.
+	NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
+// ClientConn represents a virtual connection to a conceptual endpoint, to
+// perform RPCs.
+//
+// A ClientConn is free to have zero or more actual connections to the endpoint
+// based on configuration, load, etc. It is also free to determine which actual
+// endpoints to use and may change it every RPC, permitting client-side load
+// balancing.
+//
+// A ClientConn encapsulates a range of functionality including name
+// resolution, TCP connection establishment (with retries and backoff) and TLS
+// handshakes. It also handles errors on established connections by
+// re-resolving the name and reconnecting.
 type ClientConn struct {
 	ctx    context.Context
 	cancel context.CancelFunc
@@ -533,58 +558,104 @@
 	}
 }
 
-func (cc *ClientConn) updateResolverState(s resolver.State) error {
+var emptyServiceConfig *ServiceConfig
+
+func init() {
+	cfg := parseServiceConfig("{}")
+	if cfg.Err != nil {
+		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
+	}
+	emptyServiceConfig = cfg.Config.(*ServiceConfig)
+}
+
+func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+	if cc.sc != nil {
+		cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+		return
+	}
+	if cc.dopts.defaultServiceConfig != nil {
+		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+	} else {
+		cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+	}
+}
+
+func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+	defer cc.firstResolveEvent.Fire()
 	cc.mu.Lock()
-	defer cc.mu.Unlock()
 	// Check if the ClientConn is already closed. Some fields (e.g.
 	// balancerWrapper) are set to nil when closing the ClientConn, and could
 	// cause nil pointer panic if we don't have this check.
 	if cc.conns == nil {
+		cc.mu.Unlock()
 		return nil
 	}
 
-	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
-		if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
-			cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
+	if err != nil {
+		// May need to apply the initial service config in case the resolver
+		// doesn't support service configs, or doesn't provide a service config
+		// with the new addresses.
+		cc.maybeApplyDefaultServiceConfig(nil)
+
+		if cc.balancerWrapper != nil {
+			cc.balancerWrapper.resolverError(err)
 		}
-	} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
-		cc.applyServiceConfig(sc)
+
+		// No addresses are valid with err set; return early.
+		cc.mu.Unlock()
+		return balancer.ErrBadResolverState
+	}
+
+	var ret error
+	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
+		cc.maybeApplyDefaultServiceConfig(s.Addresses)
+		// TODO: do we need to apply a failing LB policy if there is no
+		// default, per the error handling design?
+	} else {
+		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
+			cc.applyServiceConfigAndBalancer(sc, s.Addresses)
+		} else {
+			ret = balancer.ErrBadResolverState
+			if cc.balancerWrapper == nil {
+				var err error
+				if s.ServiceConfig.Err != nil {
+					err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
+				} else {
+					err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
+				}
+				cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+				cc.csMgr.updateState(connectivity.TransientFailure)
+				cc.mu.Unlock()
+				return ret
+			}
+		}
 	}
 
 	var balCfg serviceconfig.LoadBalancingConfig
-	if cc.dopts.balancerBuilder == nil {
-		// Only look at balancer types and switch balancer if balancer dial
-		// option is not set.
-		var newBalancerName string
-		if cc.sc != nil && cc.sc.lbConfig != nil {
-			newBalancerName = cc.sc.lbConfig.name
-			balCfg = cc.sc.lbConfig.cfg
-		} else {
-			var isGRPCLB bool
-			for _, a := range s.Addresses {
-				if a.Type == resolver.GRPCLB {
-					isGRPCLB = true
-					break
-				}
-			}
-			if isGRPCLB {
-				newBalancerName = grpclbName
-			} else if cc.sc != nil && cc.sc.LB != nil {
-				newBalancerName = *cc.sc.LB
-			} else {
-				newBalancerName = PickFirstBalancerName
-			}
-		}
-		cc.switchBalancer(newBalancerName)
-	} else if cc.balancerWrapper == nil {
-		// Balancer dial option was set, and this is the first time handling
-		// resolved addresses. Build a balancer with dopts.balancerBuilder.
-		cc.curBalancerName = cc.dopts.balancerBuilder.Name()
-		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+	if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
+		balCfg = cc.sc.lbConfig.cfg
 	}
 
-	cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
-	return nil
+	cbn := cc.curBalancerName
+	bw := cc.balancerWrapper
+	cc.mu.Unlock()
+	if cbn != grpclbName {
+		// Filter any grpclb addresses since we don't have the grpclb balancer.
+		for i := 0; i < len(s.Addresses); {
+			if s.Addresses[i].Type == resolver.GRPCLB {
+				copy(s.Addresses[i:], s.Addresses[i+1:])
+				s.Addresses = s.Addresses[:len(s.Addresses)-1]
+				continue
+			}
+			i++
+		}
+	}
+	uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
+	if ret == nil {
+		ret = uccsErr // prefer ErrBadResolver state since any other error is
+		// currently meaningless to the caller.
+	}
+	return ret
 }
 
 // switchBalancer starts the switching from current balancer to the balancer
@@ -632,7 +703,7 @@
 	cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
 }
 
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
 	cc.mu.Lock()
 	if cc.conns == nil {
 		cc.mu.Unlock()
@@ -640,7 +711,7 @@
 	}
 	// TODO(bar switching) send updates to all balancer wrappers when balancer
 	// gracefully switching is supported.
-	cc.balancerWrapper.handleSubConnStateChange(sc, s)
+	cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
 	cc.mu.Unlock()
 }
 
@@ -737,7 +808,7 @@
 	}
 	// Update connectivity state within the lock to prevent subsequent or
 	// concurrent calls from resetting the transport more than once.
-	ac.updateConnectivityState(connectivity.Connecting)
+	ac.updateConnectivityState(connectivity.Connecting, nil)
 	ac.mu.Unlock()
 
 	// Start a goroutine connecting to the server asynchronously.
@@ -823,7 +894,8 @@
 }
 
 func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
-	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
+	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+		Ctx:            ctx,
 		FullMethodName: method,
 	})
 	if err != nil {
@@ -832,10 +904,10 @@
 	return t, done, nil
 }
 
-func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
 	if sc == nil {
 		// should never reach here.
-		return fmt.Errorf("got nil pointer for service config")
+		return
 	}
 	cc.sc = sc
 
@@ -851,10 +923,38 @@
 		cc.retryThrottler.Store((*retryThrottler)(nil))
 	}
 
-	return nil
+	if cc.dopts.balancerBuilder == nil {
+		// Only look at balancer types and switch balancer if balancer dial
+		// option is not set.
+		var newBalancerName string
+		if cc.sc != nil && cc.sc.lbConfig != nil {
+			newBalancerName = cc.sc.lbConfig.name
+		} else {
+			var isGRPCLB bool
+			for _, a := range addrs {
+				if a.Type == resolver.GRPCLB {
+					isGRPCLB = true
+					break
+				}
+			}
+			if isGRPCLB {
+				newBalancerName = grpclbName
+			} else if cc.sc != nil && cc.sc.LB != nil {
+				newBalancerName = *cc.sc.LB
+			} else {
+				newBalancerName = PickFirstBalancerName
+			}
+		}
+		cc.switchBalancer(newBalancerName)
+	} else if cc.balancerWrapper == nil {
+		// Balancer dial option was set, and this is the first time handling
+		// resolved addresses. Build a balancer with dopts.balancerBuilder.
+		cc.curBalancerName = cc.dopts.balancerBuilder.Name()
+		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+	}
 }
 
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
 	cc.mu.RLock()
 	r := cc.resolverWrapper
 	cc.mu.RUnlock()
@@ -876,8 +976,9 @@
 // This API is EXPERIMENTAL.
 func (cc *ClientConn) ResetConnectBackoff() {
 	cc.mu.Lock()
-	defer cc.mu.Unlock()
-	for ac := range cc.conns {
+	conns := cc.conns
+	cc.mu.Unlock()
+	for ac := range conns {
 		ac.resetConnectBackoff()
 	}
 }
@@ -963,7 +1064,7 @@
 }
 
 // Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
 	if ac.state == s {
 		return
 	}
@@ -976,7 +1077,7 @@
 			Severity: channelz.CtINFO,
 		})
 	}
-	ac.cc.handleSubConnStateChange(ac.acbw, s)
+	ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
 }
 
 // adjustParams updates parameters used to create transports upon
@@ -996,7 +1097,7 @@
 func (ac *addrConn) resetTransport() {
 	for i := 0; ; i++ {
 		if i > 0 {
-			ac.cc.resolveNow(resolver.ResolveNowOption{})
+			ac.cc.resolveNow(resolver.ResolveNowOptions{})
 		}
 
 		ac.mu.Lock()
@@ -1025,7 +1126,7 @@
 		// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
 		connectDeadline := time.Now().Add(dialDuration)
 
-		ac.updateConnectivityState(connectivity.Connecting)
+		ac.updateConnectivityState(connectivity.Connecting, nil)
 		ac.transport = nil
 		ac.mu.Unlock()
 
@@ -1038,7 +1139,7 @@
 				ac.mu.Unlock()
 				return
 			}
-			ac.updateConnectivityState(connectivity.TransientFailure)
+			ac.updateConnectivityState(connectivity.TransientFailure, err)
 
 			// Backoff.
 			b := ac.resetBackoff
@@ -1061,8 +1162,8 @@
 
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
-			newTr.Close()
 			ac.mu.Unlock()
+			newTr.Close()
 			return
 		}
 		ac.curAddr = addr
@@ -1077,20 +1178,16 @@
 		// we restart from the top of the addr list.
 		<-reconnect.Done()
 		hcancel()
-
-		// Need to reconnect after a READY, the addrConn enters
-		// TRANSIENT_FAILURE.
+		// restart connecting - the top of the loop will set state to
+		// CONNECTING.  This is against the current connectivity semantics doc,
+		// however it allows for graceful behavior for RPCs not yet dispatched
+		// - unfortunate timing would otherwise lead to the RPC failing even
+		// though the TRANSIENT_FAILURE state (called for by the doc) would be
+		// instantaneous.
 		//
-		// This will set addrConn to TRANSIENT_FAILURE for a very short period
-		// of time, and turns CONNECTING. It seems reasonable to skip this, but
-		// READY-CONNECTING is not a valid transition.
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			return
-		}
-		ac.updateConnectivityState(connectivity.TransientFailure)
-		ac.mu.Unlock()
+		// Ideally we should transition to Idle here and block until there is
+		// RPC activity that leads to the balancer requesting a reconnect of
+		// the associated SubConn.
 	}
 }
 
@@ -1098,6 +1195,7 @@
 // first successful one. It returns the transport, the address and a Event in
 // the successful case. The Event fires when the returned transport disconnects.
 func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+	var firstConnErr error
 	for _, addr := range addrs {
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
@@ -1126,11 +1224,14 @@
 		if err == nil {
 			return newTr, addr, reconnect, nil
 		}
+		if firstConnErr == nil {
+			firstConnErr = err
+		}
 		ac.cc.blockingpicker.updateConnectionError(err)
 	}
 
 	// Couldn't connect to any address.
-	return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+	return nil, resolver.Address{}, nil, firstConnErr
 }
 
 // createTransport creates a connection to addr. It returns the transport and a
@@ -1141,20 +1242,47 @@
 	onCloseCalled := make(chan struct{})
 	reconnect := grpcsync.NewEvent()
 
+	authority := ac.cc.authority
+	// addr.ServerName takes precedent over ClientConn authority, if present.
+	if addr.ServerName != "" {
+		authority = addr.ServerName
+	}
+
 	target := transport.TargetInfo{
 		Addr:      addr.Addr,
 		Metadata:  addr.Metadata,
-		Authority: ac.cc.authority,
+		Authority: authority,
 	}
 
+	once := sync.Once{}
 	onGoAway := func(r transport.GoAwayReason) {
 		ac.mu.Lock()
 		ac.adjustParams(r)
+		once.Do(func() {
+			if ac.state == connectivity.Ready {
+				// Prevent this SubConn from being used for new RPCs by setting its
+				// state to Connecting.
+				//
+				// TODO: this should be Idle when grpc-go properly supports it.
+				ac.updateConnectivityState(connectivity.Connecting, nil)
+			}
+		})
 		ac.mu.Unlock()
 		reconnect.Fire()
 	}
 
 	onClose := func() {
+		ac.mu.Lock()
+		once.Do(func() {
+			if ac.state == connectivity.Ready {
+				// Prevent this SubConn from being used for new RPCs by setting its
+				// state to Connecting.
+				//
+				// TODO: this should be Idle when grpc-go properly supports it.
+				ac.updateConnectivityState(connectivity.Connecting, nil)
+			}
+		})
+		ac.mu.Unlock()
 		close(onCloseCalled)
 		reconnect.Fire()
 	}
@@ -1176,20 +1304,18 @@
 		return nil, nil, err
 	}
 
-	if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
-		select {
-		case <-time.After(connectDeadline.Sub(time.Now())):
-			// We didn't get the preface in time.
-			newTr.Close()
-			grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
-			return nil, nil, errors.New("timed out waiting for server handshake")
-		case <-prefaceReceived:
-			// We got the preface - huzzah! things are good.
-		case <-onCloseCalled:
-			// The transport has already closed - noop.
-			return nil, nil, errors.New("connection closed")
-			// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
-		}
+	select {
+	case <-time.After(time.Until(connectDeadline)):
+		// We didn't get the preface in time.
+		newTr.Close()
+		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
+		return nil, nil, errors.New("timed out waiting for server handshake")
+	case <-prefaceReceived:
+		// We got the preface - huzzah! things are good.
+	case <-onCloseCalled:
+		// The transport has already closed - noop.
+		return nil, nil, errors.New("connection closed")
+		// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
 	}
 	return newTr, reconnect, nil
 }
@@ -1210,7 +1336,7 @@
 	var healthcheckManagingState bool
 	defer func() {
 		if !healthcheckManagingState {
-			ac.updateConnectivityState(connectivity.Ready)
+			ac.updateConnectivityState(connectivity.Ready, nil)
 		}
 	}()
 
@@ -1246,13 +1372,13 @@
 		ac.mu.Unlock()
 		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
 	}
-	setConnectivityState := func(s connectivity.State) {
+	setConnectivityState := func(s connectivity.State, lastErr error) {
 		ac.mu.Lock()
 		defer ac.mu.Unlock()
 		if ac.transport != currentTr {
 			return
 		}
-		ac.updateConnectivityState(s)
+		ac.updateConnectivityState(s, lastErr)
 	}
 	// Start the health checking stream.
 	go func() {
@@ -1317,8 +1443,8 @@
 	curTr := ac.transport
 	ac.transport = nil
 	// We have to set the state to Shutdown before anything else to prevent races
-	// between setting the state and logic that waits on context cancelation / etc.
-	ac.updateConnectivityState(connectivity.Shutdown)
+	// between setting the state and logic that waits on context cancellation / etc.
+	ac.updateConnectivityState(connectivity.Shutdown, nil)
 	ac.cancel()
 	ac.curAddr = resolver.Address{}
 	if err == errConnDrain && curTr != nil {
@@ -1341,7 +1467,7 @@
 			},
 		})
 		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
-		// the entity beng deleted, and thus prevent it from being deleted right away.
+		// the entity being deleted, and thus prevent it from being deleted right away.
 		channelz.RemoveEntry(ac.channelzID)
 	}
 	ac.mu.Unlock()
@@ -1431,3 +1557,12 @@
 // Deprecated: This error is never returned by grpc and should not be
 // referenced by users.
 var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+	for _, rb := range cc.dopts.resolvers {
+		if cc.parsedTarget.Scheme == rb.Scheme() {
+			return rb
+		}
+	}
+	return resolver.Get(cc.parsedTarget.Scheme)
+}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 8ea3d4a..845ce5d 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -24,16 +24,12 @@
 
 import (
 	"context"
-	"crypto/tls"
-	"crypto/x509"
 	"errors"
 	"fmt"
-	"io/ioutil"
 	"net"
-	"strings"
 
 	"github.com/golang/protobuf/proto"
-	"google.golang.org/grpc/credentials/internal"
+	"google.golang.org/grpc/internal"
 )
 
 // PerRPCCredentials defines the common interface for the credentials which need to
@@ -45,7 +41,8 @@
 	// context. If a status code is returned, it will be used as the status
 	// for the RPC. uri is the URI of the entry point for the request.
 	// When supported by the underlying implementation, ctx can be used for
-	// timeout and cancellation.
+	// timeout and cancellation. Additionally, RequestInfo data will be
+	// available via ctx to this call.
 	// TODO(zhaoq): Define the set of the qualified keys instead of leaving
 	// it as an arbitrary string.
 	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
@@ -54,6 +51,48 @@
 	RequireTransportSecurity() bool
 }
 
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+	// NoSecurity indicates a connection is insecure.
+	// The zero SecurityLevel value is invalid for backward compatibility.
+	NoSecurity SecurityLevel = iota + 1
+	// IntegrityOnly indicates a connection only provides integrity protection.
+	IntegrityOnly
+	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+	PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+	switch s {
+	case NoSecurity:
+		return "NoSecurity"
+	case IntegrityOnly:
+		return "IntegrityOnly"
+	case PrivacyAndIntegrity:
+		return "PrivacyAndIntegrity"
+	}
+	return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+	SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
+	return c
+}
+
 // ProtocolInfo provides information regarding the gRPC wire protocol version,
 // security protocol, security protocol version in use, server name, etc.
 type ProtocolInfo struct {
@@ -68,6 +107,8 @@
 }
 
 // AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
 type AuthInfo interface {
 	AuthType() string
 }
@@ -82,7 +123,8 @@
 	// ClientHandshake does the authentication handshake specified by the corresponding
 	// authentication protocol on rawConn for clients. It returns the authenticated
 	// connection and the corresponding auth information about the connection.
-	// Implementations must use the provided context to implement timely cancellation.
+	// The auth information should embed CommonAuthInfo to return additional information about
+	// the credentials. Implementations must use the provided context to implement timely cancellation.
 	// gRPC will try to reconnect if the error returned is a temporary error
 	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
 	// If the returned error is a wrapper error, implementations should make sure that
@@ -92,7 +134,8 @@
 	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
 	// ServerHandshake does the authentication handshake for servers. It returns
 	// the authenticated connection and the corresponding auth information about
-	// the connection.
+	// the connection. The auth information should embed CommonAuthInfo to return additional information
+	// about the credentials.
 	//
 	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
 	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
@@ -125,145 +168,63 @@
 	NewWithMode(mode string) (Bundle, error)
 }
 
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
-	State tls.ConnectionState
+// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
+//
+// This API is experimental.
+type RequestInfo struct {
+	// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
+	Method string
+	// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+	AuthInfo AuthInfo
 }
 
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
-	return "tls"
+// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
+type requestInfoKey struct{}
+
+// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
+//
+// This API is experimental.
+func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
+	ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
+	return
 }
 
-// GetSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
-	v := &TLSChannelzSecurityValue{
-		StandardName: cipherSuiteLookup[t.State.CipherSuite],
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
+	type internalInfo interface {
+		GetCommonAuthInfo() *CommonAuthInfo
 	}
-	// Currently there's no way to get LocalCertificate info from tls package.
-	if len(t.State.PeerCertificates) > 0 {
-		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	ri, _ := RequestInfoFromContext(ctx)
+	if ri.AuthInfo == nil {
+		return errors.New("unable to obtain SecurityLevel from context")
 	}
-	return v
-}
-
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
-	// TLS configuration
-	config *tls.Config
-}
-
-func (c tlsCreds) Info() ProtocolInfo {
-	return ProtocolInfo{
-		SecurityProtocol: "tls",
-		SecurityVersion:  "1.2",
-		ServerName:       c.config.ServerName,
-	}
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
-	// use local cfg to avoid clobbering ServerName if using multiple endpoints
-	cfg := cloneTLSConfig(c.config)
-	if cfg.ServerName == "" {
-		colonPos := strings.LastIndex(authority, ":")
-		if colonPos == -1 {
-			colonPos = len(authority)
+	if ci, ok := ri.AuthInfo.(internalInfo); ok {
+		// CommonAuthInfo.SecurityLevel has an invalid value.
+		if ci.GetCommonAuthInfo().SecurityLevel == 0 {
+			return nil
 		}
-		cfg.ServerName = authority[:colonPos]
-	}
-	conn := tls.Client(rawConn, cfg)
-	errChannel := make(chan error, 1)
-	go func() {
-		errChannel <- conn.Handshake()
-	}()
-	select {
-	case err := <-errChannel:
-		if err != nil {
-			return nil, nil, err
+		if ci.GetCommonAuthInfo().SecurityLevel < level {
+			return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
 		}
-	case <-ctx.Done():
-		return nil, nil, ctx.Err()
 	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
-	conn := tls.Server(rawConn, c.config)
-	if err := conn.Handshake(); err != nil {
-		return nil, nil, err
-	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
-	return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
-	c.config.ServerName = serverNameOverride
+	// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
 	return nil
 }
 
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
-	for _, p := range ps {
-		if p == alpnProtoStrH2 {
-			return ps
-		}
+func init() {
+	internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
+		return context.WithValue(ctx, requestInfoKey{}, ri)
 	}
-	ret := make([]string, 0, len(ps)+1)
-	ret = append(ret, ps...)
-	return append(ret, alpnProtoStrH2)
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
-	tc := &tlsCreds{cloneTLSConfig(c)}
-	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
-	return tc
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
-	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
-	b, err := ioutil.ReadFile(certFile)
-	if err != nil {
-		return nil, err
-	}
-	cp := x509.NewCertPool()
-	if !cp.AppendCertsFromPEM(b) {
-		return nil, fmt.Errorf("credentials: failed to append certificates")
-	}
-	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
-	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
-}
-
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
-	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
-	if err != nil {
-		return nil, err
-	}
-	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
 }
 
 // ChannelzSecurityInfo defines the interface that security protocols should implement
 // in order to provide security info to channelz.
+//
+// This API is experimental.
 type ChannelzSecurityInfo interface {
 	GetSecurityValue() ChannelzSecurityValue
 }
@@ -271,66 +232,20 @@
 // ChannelzSecurityValue defines the interface that GetSecurityValue() return value
 // should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
 // and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
 type ChannelzSecurityValue interface {
 	isChannelzSecurityValue()
 }
 
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
-	ChannelzSecurityValue
-	StandardName      string
-	LocalCertificate  []byte
-	RemoteCertificate []byte
-}
-
 // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
 // from GetSecurityValue(), which contains protocol specific security info. Note
 // the Value field will be sent to users of channelz requesting channel info, and
 // thus sensitive info should better be avoided.
+//
+// This API is experimental.
 type OtherChannelzSecurityValue struct {
 	ChannelzSecurityValue
 	Name  string
 	Value proto.Message
 }
-
-var cipherSuiteLookup = map[uint16]string{
-	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
-	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
-	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
-	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
-	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-
-	return cfg.Clone()
-}
diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/go12.go
similarity index 100%
rename from vendor/google.golang.org/grpc/credentials/tls13.go
rename to vendor/google.golang.org/grpc/credentials/go12.go
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
new file mode 100644
index 0000000..28b4f62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -0,0 +1,225 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net"
+
+	"google.golang.org/grpc/credentials/internal"
+)
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+	State tls.ConnectionState
+	CommonAuthInfo
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+	return "tls"
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+	v := &TLSChannelzSecurityValue{
+		StandardName: cipherSuiteLookup[t.State.CipherSuite],
+	}
+	// Currently there's no way to get LocalCertificate info from tls package.
+	if len(t.State.PeerCertificates) > 0 {
+		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	}
+	return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+	// TLS configuration
+	config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+	return ProtocolInfo{
+		SecurityProtocol: "tls",
+		SecurityVersion:  "1.2",
+		ServerName:       c.config.ServerName,
+	}
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+	// use local cfg to avoid clobbering ServerName if using multiple endpoints
+	cfg := cloneTLSConfig(c.config)
+	if cfg.ServerName == "" {
+		serverName, _, err := net.SplitHostPort(authority)
+		if err != nil {
+			// If the authority had no host port or if the authority cannot be parsed, use it as-is.
+			serverName = authority
+		}
+		cfg.ServerName = serverName
+	}
+	conn := tls.Client(rawConn, cfg)
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+		close(errChannel)
+	}()
+	select {
+	case err := <-errChannel:
+		if err != nil {
+			conn.Close()
+			return nil, nil, err
+		}
+	case <-ctx.Done():
+		conn.Close()
+		return nil, nil, ctx.Err()
+	}
+	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+	conn := tls.Server(rawConn, c.config)
+	if err := conn.Handshake(); err != nil {
+		conn.Close()
+		return nil, nil, err
+	}
+	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+	return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+	c.config.ServerName = serverNameOverride
+	return nil
+}
+
+const alpnProtoStrH2 = "h2"
+
+func appendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+	tc := &tlsCreds{cloneTLSConfig(c)}
+	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
+	return tc
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+	b, err := ioutil.ReadFile(certFile)
+	if err != nil {
+		return nil, err
+	}
+	cp := x509.NewCertPool()
+	if !cp.AppendCertsFromPEM(b) {
+		return nil, fmt.Errorf("credentials: failed to append certificates")
+	}
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// This API is EXPERIMENTAL.
+type TLSChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	StandardName      string
+	LocalCertificate  []byte
+	RemoteCertificate []byte
+}
+
+var cipherSuiteLookup = map[uint16]string{
+	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
+	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+}
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 69c0031..63f5ae2 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -24,11 +24,12 @@
 	"net"
 	"time"
 
+	"google.golang.org/grpc/backoff"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
-	"google.golang.org/grpc/internal/backoff"
+	internalbackoff "google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
@@ -47,7 +48,7 @@
 
 	cp          Compressor
 	dc          Decompressor
-	bs          backoff.Strategy
+	bs          internalbackoff.Strategy
 	block       bool
 	insecure    bool
 	timeout     time.Duration
@@ -57,10 +58,7 @@
 	callOptions []CallOption
 	// This is used by v1 balancer dial option WithBalancer to support v1
 	// balancer, and also by WithBalancerName dial option.
-	balancerBuilder balancer.Builder
-	// This is to support grpclb.
-	resolverBuilder             resolver.Builder
-	reqHandshake                envconfig.RequireHandshakeSetting
+	balancerBuilder             balancer.Builder
 	channelzParentID            int64
 	disableServiceConfig        bool
 	disableRetry                bool
@@ -69,6 +67,11 @@
 	minConnectTimeout           func() time.Duration
 	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
 	defaultServiceConfigRawJSON *string
+	// This is used by ccResolverWrapper to backoff between successive calls to
+	// resolver.ResolveNow(). The user will have no need to configure this, but
+	// we need to be able to configure this in tests.
+	resolveNowBackoff func(int) time.Duration
+	resolvers         []resolver.Builder
 }
 
 // DialOption configures how we set up the connection.
@@ -100,17 +103,6 @@
 	}
 }
 
-// WithWaitForHandshake blocks until the initial settings frame is received from
-// the server before assigning RPCs to the connection.
-//
-// Deprecated: this is the default behavior, and this option will be removed
-// after the 1.18 release.
-func WithWaitForHandshake() DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.reqHandshake = envconfig.RequireHandshakeOn
-	})
-}
-
 // WithWriteBufferSize determines how much data can be batched before doing a
 // write on the wire. The corresponding memory allocation for this buffer will
 // be twice the size to keep syscalls low. The default value for this buffer is
@@ -156,7 +148,8 @@
 // WithMaxMsgSize returns a DialOption which sets the maximum message size the
 // client can receive.
 //
-// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
+// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.  Will
+// be supported throughout 1.x.
 func WithMaxMsgSize(s int) DialOption {
 	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
 }
@@ -172,7 +165,8 @@
 // WithCodec returns a DialOption which sets a codec for message marshaling and
 // unmarshaling.
 //
-// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.
+// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.  Will be
+// supported throughout 1.x.
 func WithCodec(c Codec) DialOption {
 	return WithDefaultCallOptions(CallCustomCodec(c))
 }
@@ -181,7 +175,7 @@
 // message compression. It has lower priority than the compressor set by the
 // UseCompressor CallOption.
 //
-// Deprecated: use UseCompressor instead.
+// Deprecated: use UseCompressor instead.  Will be supported throughout 1.x.
 func WithCompressor(cp Compressor) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.cp = cp
@@ -196,7 +190,8 @@
 // message.  If no compressor is registered for the encoding, an Unimplemented
 // status error will be returned.
 //
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead.  Will be supported
+// throughout 1.x.
 func WithDecompressor(dc Decompressor) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.dc = dc
@@ -207,7 +202,7 @@
 // Name resolver will be ignored if this DialOption is specified.
 //
 // Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName.
+// WithBalancerName.  Will be removed in a future 1.x release.
 func WithBalancer(b Balancer) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.balancerBuilder = &balancerWrapperBuilder{
@@ -223,7 +218,8 @@
 // The balancer cannot be overridden by balancer option specified by service
 // config.
 //
-// This is an EXPERIMENTAL API.
+// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
+// instead.  Will be removed in a future 1.x release.
 func WithBalancerName(balancerName string) DialOption {
 	builder := balancer.Get(balancerName)
 	if builder == nil {
@@ -234,27 +230,41 @@
 	})
 }
 
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.resolverBuilder = b
-	})
-}
-
 // WithServiceConfig returns a DialOption which has a channel to read the
 // service configuration.
 //
-// Deprecated: service config should be received through name resolver, as
-// specified here.
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// Deprecated: service config should be received through name resolver or via
+// WithDefaultServiceConfig, as specified at
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md.  Will be
+// removed in a future 1.x release.
 func WithServiceConfig(c <-chan ServiceConfig) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.scChan = c
 	})
 }
 
+// WithConnectParams configures the dialer to use the provided ConnectParams.
+//
+// The backoff configuration specified as part of the ConnectParams overrides
+// all defaults specified in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
+// using the backoff.DefaultConfig as a base, in cases where you want to
+// override only a subset of the backoff configuration.
+//
+// This API is EXPERIMENTAL.
+func WithConnectParams(p ConnectParams) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = internalbackoff.Exponential{Config: p.Backoff}
+		o.minConnectTimeout = func() time.Duration {
+			return p.MinConnectTimeout
+		}
+	})
+}
+
 // WithBackoffMaxDelay configures the dialer to use the provided maximum delay
 // when backing off after failed connection attempts.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
 func WithBackoffMaxDelay(md time.Duration) DialOption {
 	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
 }
@@ -262,19 +272,18 @@
 // WithBackoffConfig configures the dialer to use the provided backoff
 // parameters after connection failures.
 //
-// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
-// for use.
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
 func WithBackoffConfig(b BackoffConfig) DialOption {
-	return withBackoff(backoff.Exponential{
-		MaxDelay: b.MaxDelay,
-	})
+	bc := backoff.DefaultConfig
+	bc.MaxDelay = b.MaxDelay
+	return withBackoff(internalbackoff.Exponential{Config: bc})
 }
 
 // withBackoff sets the backoff strategy used for connectRetryNum after a failed
 // connection attempt.
 //
 // This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs backoff.Strategy) DialOption {
+func withBackoff(bs internalbackoff.Strategy) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.bs = bs
 	})
@@ -329,7 +338,8 @@
 // WithTimeout returns a DialOption that configures a timeout for dialing a
 // ClientConn initially. This is valid if and only if WithBlock() is present.
 //
-// Deprecated: use DialContext and context.WithTimeout instead.
+// Deprecated: use DialContext instead of Dial and context.WithTimeout
+// instead.  Will be supported throughout 1.x.
 func WithTimeout(d time.Duration) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.timeout = d
@@ -347,7 +357,6 @@
 }
 
 func init() {
-	internal.WithResolverBuilder = withResolverBuilder
 	internal.WithHealthCheckFunc = withHealthCheckFunc
 }
 
@@ -356,7 +365,8 @@
 // is returned by f, gRPC checks the error's Temporary() method to decide if it
 // should try to reconnect to the network address.
 //
-// Deprecated: use WithContextDialer instead
+// Deprecated: use WithContextDialer instead.  Will be supported throughout
+// 1.x.
 func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
 	return WithContextDialer(
 		func(ctx context.Context, addr string) (net.Conn, error) {
@@ -460,6 +470,8 @@
 // WithChannelzParentID returns a DialOption that specifies the channelz ID of
 // current ClientConn's parent. This function is used in nested channel creation
 // (e.g. grpclb dial).
+//
+// This API is EXPERIMENTAL.
 func WithChannelzParentID(id int64) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.channelzParentID = id
@@ -480,8 +492,10 @@
 
 // WithDefaultServiceConfig returns a DialOption that configures the default
 // service config, which will be used in cases where:
-// 1. WithDisableServiceConfig is called.
-// 2. Resolver does not return service config or if the resolver gets and invalid config.
+//
+// 1. WithDisableServiceConfig is also used.
+// 2. Resolver does not return a service config or if the resolver returns an
+//    invalid service config.
 //
 // This API is EXPERIMENTAL.
 func WithDefaultServiceConfig(s string) DialOption {
@@ -537,12 +551,12 @@
 func defaultDialOptions() dialOptions {
 	return dialOptions{
 		disableRetry:    !envconfig.Retry,
-		reqHandshake:    envconfig.RequireHandshake,
 		healthCheckFunc: internal.HealthCheckFunc,
 		copts: transport.ConnectOptions{
 			WriteBufferSize: defaultWriteBufSize,
 			ReadBufferSize:  defaultReadBufSize,
 		},
+		resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
 	}
 }
 
@@ -556,3 +570,25 @@
 		o.minConnectTimeout = f
 	})
 }
+
+// withResolveNowBackoff specifies the function that clientconn uses to backoff
+// between successive calls to resolver.ResolveNow().
+//
+// For testing purpose only.
+func withResolveNowBackoff(f func(int) time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolveNowBackoff = f
+	})
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register.  They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// This API is EXPERIMENTAL.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolvers = append(o.resolvers, rs...)
+	})
+}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 30a75da..195e844 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -46,6 +46,10 @@
 	// coding header.  The result must be static; the result cannot change
 	// between calls.
 	Name() string
+	// EXPERIMENTAL: if a Compressor implements
+	// DecompressedSize(compressedBytes []byte) int, gRPC will call it
+	// to determine the size of the buffer allocated for the result of decompression.
+	// Return -1 to indicate unknown size.
 }
 
 var registeredCompressor = make(map[string]Compressor)
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
index c1a8340..2378361 100644
--- a/vendor/google.golang.org/grpc/go.mod
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -1,19 +1,16 @@
 module google.golang.org/grpc
 
+go 1.11
+
 require (
-	cloud.google.com/go v0.26.0 // indirect
-	github.com/BurntSushi/toml v0.3.1 // indirect
-	github.com/client9/misspell v0.3.4
+	github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473
+	github.com/envoyproxy/protoc-gen-validate v0.1.0
 	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
 	github.com/golang/mock v1.1.1
-	github.com/golang/protobuf v1.2.0
+	github.com/golang/protobuf v1.3.2
 	github.com/google/go-cmp v0.2.0
-	golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
 	golang.org/x/net v0.0.0-20190311183353-d8887717615a
 	golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
 	golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
-	golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
-	google.golang.org/appengine v1.1.0 // indirect
-	google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
-	honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
+	google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
 )
diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum
index 741677d..dd5d0ce 100644
--- a/vendor/google.golang.org/grpc/go.sum
+++ b/vendor/google.golang.org/grpc/go.sum
@@ -1,37 +1,53 @@
 cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 51bb945..874ea6d 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -89,7 +89,7 @@
 }
 
 // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
-// It calles os.Exit() with exit code 1.
+// It calls os.Exit() with exit code 1.
 func Fatalf(format string, args ...interface{}) {
 	logger.Fatalf(format, args...)
 	// Make sure fatal logs will exit.
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 1bd0cce..5fc0ee3 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -25,44 +25,39 @@
 import (
 	"time"
 
+	grpcbackoff "google.golang.org/grpc/backoff"
 	"google.golang.org/grpc/internal/grpcrand"
 )
 
 // Strategy defines the methodology for backing off after a grpc connection
 // failure.
-//
 type Strategy interface {
 	// Backoff returns the amount of time to wait before the next retry given
 	// the number of consecutive failures.
 	Backoff(retries int) time.Duration
 }
 
-const (
-	// baseDelay is the amount of time to wait before retrying after the first
-	// failure.
-	baseDelay = 1.0 * time.Second
-	// factor is applied to the backoff after each retry.
-	factor = 1.6
-	// jitter provides a range to randomize backoff delays.
-	jitter = 0.2
-)
+// DefaultExponential is an exponential backoff implementation using the
+// default values for all the configurable knobs defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
 
 // Exponential implements exponential backoff algorithm as defined in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
 type Exponential struct {
-	// MaxDelay is the upper bound of backoff delay.
-	MaxDelay time.Duration
+	// Config contains all options to configure the backoff algorithm.
+	Config grpcbackoff.Config
 }
 
 // Backoff returns the amount of time to wait before the next retry given the
 // number of retries.
 func (bc Exponential) Backoff(retries int) time.Duration {
 	if retries == 0 {
-		return baseDelay
+		return bc.Config.BaseDelay
 	}
-	backoff, max := float64(baseDelay), float64(bc.MaxDelay)
+	backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
 	for backoff < max && retries > 0 {
-		backoff *= factor
+		backoff *= bc.Config.Multiplier
 		retries--
 	}
 	if backoff > max {
@@ -70,7 +65,7 @@
 	}
 	// Randomize backoff delays so that if a cluster of requests start at
 	// the same time, they won't operate in lockstep.
-	backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
+	backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
 	if backoff < 0 {
 		return 0
 	}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index fee6aec..8b10516 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -34,7 +34,7 @@
 }
 
 // binLogger is the global binary logger for the binary. One of this should be
-// built at init time from the configuration (environment varialbe or flags).
+// built at init time from the configuration (environment variable or flags).
 //
 // It is used to get a methodLogger for each individual method.
 var binLogger Logger
@@ -98,7 +98,7 @@
 // New methodLogger with same service overrides the old one.
 func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
 	if _, ok := l.services[service]; ok {
-		return fmt.Errorf("conflicting rules for service %v found", service)
+		return fmt.Errorf("conflicting service rules for service %v found", service)
 	}
 	if l.services == nil {
 		l.services = make(map[string]*methodLoggerConfig)
@@ -112,10 +112,10 @@
 // New methodLogger with same method overrides the old one.
 func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
 	if _, ok := l.blacklist[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
 	}
 	if _, ok := l.methods[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting method rules for method %v found", method)
 	}
 	if l.methods == nil {
 		l.methods = make(map[string]*methodLoggerConfig)
@@ -127,10 +127,10 @@
 // Set blacklist method for "-service/method".
 func (l *logger) setBlacklist(method string) error {
 	if _, ok := l.blacklist[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
 	}
 	if _, ok := l.methods[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting method rules for method %v found", method)
 	}
 	if l.blacklist == nil {
 		l.blacklist = make(map[string]struct{})
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index 4cc2525..be30d0e 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -43,7 +43,7 @@
 //    Foo.
 //
 // If two configs exist for one certain method or service, the one specified
-// later overrides the privous config.
+// later overrides the previous config.
 func NewLoggerFromConfigString(s string) Logger {
 	if s == "" {
 		return nil
@@ -74,7 +74,7 @@
 			return fmt.Errorf("invalid config: %q, %v", config, err)
 		}
 		if m == "*" {
-			return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
+			return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
 		}
 		if suffix != "" {
 			return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index 20d044f..a2e7c34 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -63,7 +63,7 @@
 
 // newWriterSink creates a binary log sink with the given writer.
 //
-// Write() marshalls the proto message and writes it to the given writer. Each
+// Write() marshals the proto message and writes it to the given writer. Each
 // message is prefixed with a 4 byte big endian unsigned integer as the length.
 //
 // No buffer is done, Close() doesn't try to close the writer.
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
new file mode 100644
index 0000000..9f6a0c1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package buffer provides an implementation of an unbounded buffer.
+package buffer
+
+import "sync"
+
+// Unbounded is an implementation of an unbounded buffer which does not use
+// extra goroutines. This is typically used for passing updates from one entity
+// to another within gRPC.
+//
+// All methods on this type are thread-safe and don't block on anything except
+// the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `interface{}`. This means that a call to Put() incurs an extra memory
+// allocation, and also that users need a type assertion while reading. For
+// performance critical code paths, using Unbounded is strongly discouraged and
+// defining a new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
+type Unbounded struct {
+	c       chan interface{}
+	mu      sync.Mutex
+	backlog []interface{}
+}
+
+// NewUnbounded returns a new instance of Unbounded.
+func NewUnbounded() *Unbounded {
+	return &Unbounded{c: make(chan interface{}, 1)}
+}
+
+// Put adds t to the unbounded buffer.
+func (b *Unbounded) Put(t interface{}) {
+	b.mu.Lock()
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- t:
+			b.mu.Unlock()
+			return
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, t)
+	b.mu.Unlock()
+}
+
+// Load sends the earliest buffered data, if any, onto the read channel
+// returned by Get(). Users are expected to call this every time they read a
+// value from the read channel.
+func (b *Unbounded) Load() {
+	b.mu.Lock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog[0] = nil
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	}
+	b.mu.Unlock()
+}
+
+// Get returns a read channel on which values added to the buffer, via Put(),
+// are sent on.
+//
+// Upon reading a value from this channel, users are expected to call Load() to
+// send the next buffered value onto the channel if there is any.
+func (b *Unbounded) Get() <-chan interface{} {
+	return b.c
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 11be7cd..ae6c897 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -25,40 +25,14 @@
 )
 
 const (
-	prefix              = "GRPC_GO_"
-	retryStr            = prefix + "RETRY"
-	requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
-)
-
-// RequireHandshakeSetting describes the settings for handshaking.
-type RequireHandshakeSetting int
-
-const (
-	// RequireHandshakeOn indicates to wait for handshake before considering a
-	// connection ready/successful.
-	RequireHandshakeOn RequireHandshakeSetting = iota
-	// RequireHandshakeOff indicates to not wait for handshake before
-	// considering a connection ready/successful.
-	RequireHandshakeOff
+	prefix          = "GRPC_GO_"
+	retryStr        = prefix + "RETRY"
+	txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
 )
 
 var (
 	// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
 	Retry = strings.EqualFold(os.Getenv(retryStr), "on")
-	// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
-	// environment variable.
-	//
-	// Will be removed after the 1.18 release.
-	RequireHandshake = RequireHandshakeOn
+	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+	TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
 )
-
-func init() {
-	switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
-	case "on":
-		fallthrough
-	default:
-		RequireHandshake = RequireHandshakeOn
-	case "off":
-		RequireHandshake = RequireHandshakeOff
-	}
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index bc1f99a..0912f0b 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -28,9 +28,7 @@
 )
 
 var (
-	// WithResolverBuilder is exported by dialoptions.go
-	WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
-	// WithHealthCheckFunc is not exported by dialoptions.go
+	// WithHealthCheckFunc is set by dialoptions.go
 	WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
 	// HealthCheckFunc is used to provide client-side LB channel health checking
 	HealthCheckFunc HealthChecker
@@ -39,14 +37,17 @@
 	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
 	// default, but tests may wish to set it lower for convenience.
 	KeepaliveMinPingTime = 10 * time.Second
-	// ParseServiceConfig is a function to parse JSON service configs into
-	// opaque data structures.
-	ParseServiceConfig func(sc string) (interface{}, error)
 	// StatusRawProto is exported by status/status.go. This func returns a
 	// pointer to the wrapped Status proto for a given status.Status without a
 	// call to proto.Clone(). The returned Status proto should not be mutated by
 	// the caller.
 	StatusRawProto interface{} // func (*status.Status) *spb.Status
+	// NewRequestInfoContext creates a new context based on the argument context attaching
+	// the passed in RequestInfo to the new context.
+	NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
+	// ParseServiceConfigForTesting is for creating a fake
+	// ClientConn for resolver testing only
+	ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
 )
 
 // HealthChecker defines the signature of the client-side LB channel health checking function.
@@ -57,7 +58,7 @@
 //
 // The health checking protocol is defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
+type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
 
 const (
 	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
similarity index 72%
rename from vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
rename to vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 297492e..c368db6 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -33,18 +33,22 @@
 	"time"
 
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
 )
 
+// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+// addresses from SRV records.  Must not be changed after init time.
+var EnableSRVLookups = false
+
 func init() {
 	resolver.Register(NewBuilder())
 }
 
 const (
 	defaultPort       = "443"
-	defaultFreq       = time.Minute * 30
 	defaultDNSSvrPort = "53"
 	golang            = "GO"
 	// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
@@ -94,47 +98,33 @@
 
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 func NewBuilder() resolver.Builder {
-	return &dnsBuilder{minFreq: defaultFreq}
+	return &dnsBuilder{}
 }
 
-type dnsBuilder struct {
-	// minimum frequency of polling the DNS server.
-	minFreq time.Duration
-}
+type dnsBuilder struct{}
 
 // Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
 	host, port, err := parseTarget(target.Endpoint, defaultPort)
 	if err != nil {
 		return nil, err
 	}
 
 	// IP address.
-	if net.ParseIP(host) != nil {
-		host, _ = formatIP(host)
-		addr := []resolver.Address{{Addr: host + ":" + port}}
-		i := &ipResolver{
-			cc: cc,
-			ip: addr,
-			rn: make(chan struct{}, 1),
-			q:  make(chan struct{}),
-		}
-		cc.NewAddress(addr)
-		go i.watcher()
-		return i, nil
+	if ipAddr, ok := formatIP(host); ok {
+		addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+		cc.UpdateState(resolver.State{Addresses: addr})
+		return deadResolver{}, nil
 	}
 
 	// DNS address (non-IP).
 	ctx, cancel := context.WithCancel(context.Background())
 	d := &dnsResolver{
-		freq:                 b.minFreq,
-		backoff:              backoff.Exponential{MaxDelay: b.minFreq},
 		host:                 host,
 		port:                 port,
 		ctx:                  ctx,
 		cancel:               cancel,
 		cc:                   cc,
-		t:                    time.NewTimer(0),
 		rn:                   make(chan struct{}, 1),
 		disableServiceConfig: opts.DisableServiceConfig,
 	}
@@ -150,6 +140,7 @@
 
 	d.wg.Add(1)
 	go d.watcher()
+	d.ResolveNow(resolver.ResolveNowOptions{})
 	return d, nil
 }
 
@@ -164,53 +155,23 @@
 	LookupTXT(ctx context.Context, name string) (txts []string, err error)
 }
 
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
-	cc resolver.ClientConn
-	ip []resolver.Address
-	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
-	rn chan struct{}
-	q  chan struct{}
-}
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
 
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
-	select {
-	case i.rn <- struct{}{}:
-	default:
-	}
-}
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
 
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
-	close(i.q)
-}
-
-func (i *ipResolver) watcher() {
-	for {
-		select {
-		case <-i.rn:
-			i.cc.NewAddress(i.ip)
-		case <-i.q:
-			return
-		}
-	}
-}
+func (deadResolver) Close() {}
 
 // dnsResolver watches for the name resolution update for a non-IP target.
 type dnsResolver struct {
-	freq       time.Duration
-	backoff    backoff.Exponential
-	retryCount int
-	host       string
-	port       string
-	resolver   netResolver
-	ctx        context.Context
-	cancel     context.CancelFunc
-	cc         resolver.ClientConn
+	host     string
+	port     string
+	resolver netResolver
+	ctx      context.Context
+	cancel   context.CancelFunc
+	cc       resolver.ClientConn
 	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
 	rn chan struct{}
-	t  *time.Timer
 	// wg is used to enforce Close() to return after the watcher() goroutine has finished.
 	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
 	// replace the real lookup functions with mocked ones to facilitate testing.
@@ -222,7 +183,7 @@
 }
 
 // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
 	select {
 	case d.rn <- struct{}{}:
 	default:
@@ -233,7 +194,6 @@
 func (d *dnsResolver) Close() {
 	d.cancel()
 	d.wg.Wait()
-	d.t.Stop()
 }
 
 func (d *dnsResolver) watcher() {
@@ -242,27 +202,15 @@
 		select {
 		case <-d.ctx.Done():
 			return
-		case <-d.t.C:
 		case <-d.rn:
-			if !d.t.Stop() {
-				// Before resetting a timer, it should be stopped to prevent racing with
-				// reads on it's channel.
-				<-d.t.C
-			}
 		}
 
-		result, sc := d.lookup()
-		// Next lookup should happen within an interval defined by d.freq. It may be
-		// more often due to exponential retry on empty address list.
-		if len(result) == 0 {
-			d.retryCount++
-			d.t.Reset(d.backoff.Backoff(d.retryCount))
+		state, err := d.lookup()
+		if err != nil {
+			d.cc.ReportError(err)
 		} else {
-			d.retryCount = 0
-			d.t.Reset(d.freq)
+			d.cc.UpdateState(*state)
 		}
-		d.cc.NewServiceConfig(sc)
-		d.cc.NewAddress(result)
 
 		// Sleep to prevent excessive re-resolutions. Incoming resolution requests
 		// will be queued in d.rn.
@@ -276,37 +224,68 @@
 	}
 }
 
-func (d *dnsResolver) lookupSRV() []resolver.Address {
+func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
+	if !EnableSRVLookups {
+		return nil, nil
+	}
 	var newAddrs []resolver.Address
 	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
 	if err != nil {
-		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
-		return nil
+		err = handleDNSError(err, "SRV") // may become nil
+		return nil, err
 	}
 	for _, s := range srvs {
 		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
 		if err != nil {
-			grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
-			continue
-		}
-		for _, a := range lbAddrs {
-			a, ok := formatIP(a)
-			if !ok {
-				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+			err = handleDNSError(err, "A") // may become nil
+			if err == nil {
+				// If there are other SRV records, look them up and ignore this
+				// one that does not exist.
 				continue
 			}
-			addr := a + ":" + strconv.Itoa(int(s.Port))
+			return nil, err
+		}
+		for _, a := range lbAddrs {
+			ip, ok := formatIP(a)
+			if !ok {
+				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+			}
+			addr := ip + ":" + strconv.Itoa(int(s.Port))
 			newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
 		}
 	}
-	return newAddrs
+	return newAddrs, nil
 }
 
-func (d *dnsResolver) lookupTXT() string {
+var filterError = func(err error) error {
+	if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+		// Timeouts and temporary errors should be communicated to gRPC to
+		// attempt another DNS query (with backoff).  Other errors should be
+		// suppressed (they may represent the absence of a TXT record).
+		return nil
+	}
+	return err
+}
+
+func handleDNSError(err error, lookupType string) error {
+	err = filterError(err)
+	if err != nil {
+		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+		grpclog.Infoln(err)
+	}
+	return err
+}
+
+func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
 	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
 	if err != nil {
-		grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
-		return ""
+		if envconfig.TXTErrIgnore {
+			return nil
+		}
+		if err = handleDNSError(err, "TXT"); err != nil {
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		return nil
 	}
 	var res string
 	for _, s := range ss {
@@ -315,40 +294,45 @@
 
 	// TXT record must have "grpc_config=" attribute in order to be used as service config.
 	if !strings.HasPrefix(res, txtAttribute) {
-		grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
-		return ""
+		grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+		// This is not an error; it is the equivalent of not having a service config.
+		return nil
 	}
-	return strings.TrimPrefix(res, txtAttribute)
+	sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+	return d.cc.ParseServiceConfig(sc)
 }
 
-func (d *dnsResolver) lookupHost() []resolver.Address {
+func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
 	var newAddrs []resolver.Address
 	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
 	if err != nil {
-		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
-		return nil
+		err = handleDNSError(err, "A")
+		return nil, err
 	}
 	for _, a := range addrs {
-		a, ok := formatIP(a)
+		ip, ok := formatIP(a)
 		if !ok {
-			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-			continue
+			return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
 		}
-		addr := a + ":" + d.port
+		addr := ip + ":" + d.port
 		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
 	}
-	return newAddrs
+	return newAddrs, nil
 }
 
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
-	newAddrs := d.lookupSRV()
-	// Support fallback to non-balancer address.
-	newAddrs = append(newAddrs, d.lookupHost()...)
-	if d.disableServiceConfig {
-		return newAddrs, ""
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+	srv, srvErr := d.lookupSRV()
+	addrs, hostErr := d.lookupHost()
+	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+		return nil, hostErr
 	}
-	sc := d.lookupTXT()
-	return newAddrs, canaryingSC(sc)
+	state := &resolver.State{
+		Addresses: append(addrs, srv...),
+	}
+	if !d.disableServiceConfig {
+		state.ServiceConfig = d.lookupTXT()
+	}
+	return state, nil
 }
 
 // formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@@ -434,12 +418,12 @@
 	var rcs []rawChoice
 	err := json.Unmarshal([]byte(js), &rcs)
 	if err != nil {
-		grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+		grpclog.Warningf("dns: error parsing service config json: %v", err)
 		return ""
 	}
 	cliHostname, err := os.Hostname()
 	if err != nil {
-		grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+		grpclog.Warningf("dns: error getting client hostname: %v", err)
 		return ""
 	}
 	var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
new file mode 100644
index 0000000..8783a8c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
@@ -0,0 +1,33 @@
+// +build go1.13
+
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package dns
+
+import "net"
+
+func init() {
+	filterError = func(err error) error {
+		if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
+			// The name does not exist; not an error.
+			return nil
+		}
+		return err
+	}
+}
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
similarity index 94%
rename from vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
rename to vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index 893d5d1..520d922 100644
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -26,7 +26,7 @@
 
 type passthroughBuilder struct{}
 
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
 	r := &passthroughResolver{
 		target: target,
 		cc:     cc,
@@ -48,7 +48,7 @@
 	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
 }
 
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
 
 func (*passthroughResolver) Close() {}
 
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 204ba15..ddee20b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -23,6 +23,7 @@
 	"fmt"
 	"runtime"
 	"sync"
+	"sync/atomic"
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
@@ -84,24 +85,40 @@
 // the control buffer of transport. They represent different aspects of
 // control tasks, e.g., flow control, settings, streaming resetting, etc.
 
+// maxQueuedTransportResponseFrames is the most queued "transport response"
+// frames we will buffer before preventing new reads from occurring on the
+// transport.  These are control frames sent in response to client requests,
+// such as RST_STREAM due to bad headers or settings acks.
+const maxQueuedTransportResponseFrames = 50
+
+type cbItem interface {
+	isTransportResponseFrame() bool
+}
+
 // registerStream is used to register an incoming stream with loopy writer.
 type registerStream struct {
 	streamID uint32
 	wq       *writeQuota
 }
 
+func (*registerStream) isTransportResponseFrame() bool { return false }
+
 // headerFrame is also used to register stream on the client-side.
 type headerFrame struct {
 	streamID   uint32
 	hf         []hpack.HeaderField
-	endStream  bool                       // Valid on server side.
-	initStream func(uint32) (bool, error) // Used only on the client side.
+	endStream  bool               // Valid on server side.
+	initStream func(uint32) error // Used only on the client side.
 	onWrite    func()
 	wq         *writeQuota    // write quota for the stream created.
 	cleanup    *cleanupStream // Valid on the server side.
 	onOrphaned func(error)    // Valid on client-side
 }
 
+func (h *headerFrame) isTransportResponseFrame() bool {
+	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
+}
+
 type cleanupStream struct {
 	streamID uint32
 	rst      bool
@@ -109,6 +126,8 @@
 	onWrite  func()
 }
 
+func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+
 type dataFrame struct {
 	streamID  uint32
 	endStream bool
@@ -119,27 +138,41 @@
 	onEachWrite func()
 }
 
+func (*dataFrame) isTransportResponseFrame() bool { return false }
+
 type incomingWindowUpdate struct {
 	streamID  uint32
 	increment uint32
 }
 
+func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
+
 type outgoingWindowUpdate struct {
 	streamID  uint32
 	increment uint32
 }
 
+func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
+	return false // window updates are throttled by thresholds
+}
+
 type incomingSettings struct {
 	ss []http2.Setting
 }
 
+func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
+
 type outgoingSettings struct {
 	ss []http2.Setting
 }
 
+func (*outgoingSettings) isTransportResponseFrame() bool { return false }
+
 type incomingGoAway struct {
 }
 
+func (*incomingGoAway) isTransportResponseFrame() bool { return false }
+
 type goAway struct {
 	code      http2.ErrCode
 	debugData []byte
@@ -147,15 +180,21 @@
 	closeConn bool
 }
 
+func (*goAway) isTransportResponseFrame() bool { return false }
+
 type ping struct {
 	ack  bool
 	data [8]byte
 }
 
+func (*ping) isTransportResponseFrame() bool { return true }
+
 type outFlowControlSizeRequest struct {
 	resp chan uint32
 }
 
+func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
+
 type outStreamState int
 
 const (
@@ -238,6 +277,14 @@
 	consumerWaiting bool
 	list            *itemList
 	err             error
+
+	// transportResponseFrames counts the number of queued items that represent
+	// the response of an action initiated by the peer.  trfChan is created
+	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
+	// closed and nilled when transportResponseFrames drops below the
+	// threshold.  Both fields are protected by mu.
+	transportResponseFrames int
+	trfChan                 atomic.Value // *chan struct{}
 }
 
 func newControlBuffer(done <-chan struct{}) *controlBuffer {
@@ -248,12 +295,24 @@
 	}
 }
 
-func (c *controlBuffer) put(it interface{}) error {
+// throttle blocks if there are too many incomingSettings/cleanupStreams in the
+// controlbuf.
+func (c *controlBuffer) throttle() {
+	ch, _ := c.trfChan.Load().(*chan struct{})
+	if ch != nil {
+		select {
+		case <-*ch:
+		case <-c.done:
+		}
+	}
+}
+
+func (c *controlBuffer) put(it cbItem) error {
 	_, err := c.executeAndPut(nil, it)
 	return err
 }
 
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
+func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
 	var wakeUp bool
 	c.mu.Lock()
 	if c.err != nil {
@@ -271,6 +330,15 @@
 		c.consumerWaiting = false
 	}
 	c.list.enqueue(it)
+	if it.isTransportResponseFrame() {
+		c.transportResponseFrames++
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are adding the frame that puts us over the threshold; create
+			// a throttling channel.
+			ch := make(chan struct{})
+			c.trfChan.Store(&ch)
+		}
+	}
 	c.mu.Unlock()
 	if wakeUp {
 		select {
@@ -304,7 +372,17 @@
 			return nil, c.err
 		}
 		if !c.list.isEmpty() {
-			h := c.list.dequeue()
+			h := c.list.dequeue().(cbItem)
+			if h.isTransportResponseFrame() {
+				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+					// We are removing the frame that put us over the
+					// threshold; close and clear the throttling channel.
+					ch := c.trfChan.Load().(*chan struct{})
+					close(*ch)
+					c.trfChan.Store((*chan struct{})(nil))
+				}
+				c.transportResponseFrames--
+			}
 			c.mu.Unlock()
 			return h, nil
 		}
@@ -559,21 +637,17 @@
 
 func (l *loopyWriter) originateStream(str *outStream) error {
 	hdr := str.itl.dequeue().(*headerFrame)
-	sendPing, err := hdr.initStream(str.id)
-	if err != nil {
+	if err := hdr.initStream(str.id); err != nil {
 		if err == ErrConnClosing {
 			return err
 		}
 		// Other errors(errStreamDrain) need not close transport.
 		return nil
 	}
-	if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
+	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
 		return err
 	}
 	l.estdStreams[str.id] = str
-	if sendPing {
-		return l.pingHandler(&ping{data: [8]byte{}})
-	}
 	return nil
 }
 
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index 5ea997a..f262edd 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -149,6 +149,7 @@
 		n = uint32(math.MaxInt32)
 	}
 	f.mu.Lock()
+	defer f.mu.Unlock()
 	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
 	// can send without a window update.
 	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@@ -169,10 +170,8 @@
 			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
 			f.delta = n
 		}
-		f.mu.Unlock()
 		return f.delta
 	}
-	f.mu.Unlock()
 	return 0
 }
 
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 78f9ddc..c3c32da 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -227,7 +227,9 @@
 
 	if err == nil { // transport has not been closed
 		if ht.stats != nil {
-			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+				Trailer: s.trailer.Copy(),
+			})
 		}
 	}
 	ht.Close()
@@ -289,7 +291,9 @@
 
 	if err == nil {
 		if ht.stats != nil {
-			ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
+			ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+				Header: md.Copy(),
+			})
 		}
 	}
 	return err
@@ -334,7 +338,7 @@
 		Addr: ht.RemoteAddr(),
 	}
 	if req.TLS != nil {
-		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
+		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}}
 	}
 	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
 	s.ctx = peer.NewContext(ctx, pr)
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index c96178d..2d6feeb 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -35,6 +35,7 @@
 
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/syscall"
 	"google.golang.org/grpc/keepalive"
@@ -44,8 +45,14 @@
 	"google.golang.org/grpc/status"
 )
 
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
 // http2Client implements the ClientTransport interface with HTTP2.
 type http2Client struct {
+	lastRead   int64 // Keep this field 64-bit aligned. Accessed atomically.
 	ctx        context.Context
 	cancel     context.CancelFunc
 	ctxDone    <-chan struct{} // Cache the ctx.Done() chan.
@@ -62,8 +69,6 @@
 	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
 	// that the server sent GoAway on this transport.
 	goAway chan struct{}
-	// awakenKeepalive is used to wake up keepalive when after it has gone dormant.
-	awakenKeepalive chan struct{}
 
 	framer *framer
 	// controlBuf delivers all the control related tasks (e.g., window
@@ -77,9 +82,6 @@
 
 	perRPCCreds []credentials.PerRPCCredentials
 
-	// Boolean to keep track of reading activity on transport.
-	// 1 is true and 0 is false.
-	activity         uint32 // Accessed atomically.
 	kp               keepalive.ClientParameters
 	keepaliveEnabled bool
 
@@ -110,6 +112,16 @@
 	// goAwayReason records the http2.ErrCode and debug data received with the
 	// GoAway frame.
 	goAwayReason GoAwayReason
+	// A condition variable used to signal when the keepalive goroutine should
+	// go dormant. The condition for dormancy is based on the number of active
+	// streams and the `PermitWithoutStream` keepalive client parameter. And
+	// since the number of active streams is guarded by the above mutex, we use
+	// the same for this condition variable as well.
+	kpDormancyCond *sync.Cond
+	// A boolean to track whether the keepalive goroutine is dormant or not.
+	// This is checked before attempting to signal the above condition
+	// variable.
+	kpDormant bool
 
 	// Fields below are for channelz metric collection.
 	channelzID int64 // channelz unique identification number
@@ -119,6 +131,8 @@
 	onClose  func()
 
 	bufferPool *bufferPool
+
+	connectionID uint64
 }
 
 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@@ -232,7 +246,6 @@
 		readerDone:            make(chan struct{}),
 		writerDone:            make(chan struct{}),
 		goAway:                make(chan struct{}),
-		awakenKeepalive:       make(chan struct{}, 1),
 		framer:                newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
 		fc:                    &trInFlow{limit: uint32(icwz)},
 		scheme:                scheme,
@@ -264,9 +277,6 @@
 			updateFlowControl: t.updateFlowControl,
 		}
 	}
-	// Make sure awakenKeepalive can't be written upon.
-	// keepalive routine will make it writable, if need be.
-	t.awakenKeepalive <- struct{}{}
 	if t.statsHandler != nil {
 		t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
 			RemoteAddr: t.remoteAddr,
@@ -281,6 +291,7 @@
 		t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
 	}
 	if t.keepaliveEnabled {
+		t.kpDormancyCond = sync.NewCond(&t.mu)
 		go t.keepalive()
 	}
 	// Start the reader goroutine for incoming message. Each transport has
@@ -325,6 +336,8 @@
 		}
 	}
 
+	t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
 	if err := t.framer.writer.Flush(); err != nil {
 		return nil, err
 	}
@@ -347,6 +360,7 @@
 func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
 	s := &Stream{
+		ct:             t,
 		done:           make(chan struct{}),
 		method:         callHdr.Method,
 		sendCompress:   callHdr.SendCompress,
@@ -380,23 +394,24 @@
 }
 
 func (t *http2Client) getPeer() *peer.Peer {
-	pr := &peer.Peer{
-		Addr: t.remoteAddr,
+	return &peer.Peer{
+		Addr:     t.remoteAddr,
+		AuthInfo: t.authInfo,
 	}
-	// Attach Auth info if there is any.
-	if t.authInfo != nil {
-		pr.AuthInfo = t.authInfo
-	}
-	return pr
 }
 
 func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
 	aud := t.createAudience(callHdr)
-	authData, err := t.getTrAuthData(ctx, aud)
+	ri := credentials.RequestInfo{
+		Method:   callHdr.Method,
+		AuthInfo: t.authInfo,
+	}
+	ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
 	if err != nil {
 		return nil, err
 	}
-	callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
+	callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
 	if err != nil {
 		return nil, err
 	}
@@ -419,6 +434,7 @@
 
 	if callHdr.SendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
 	}
 	if dl, ok := ctx.Deadline(); ok {
 		// Send out timeout regardless its value. The server can detect timeout context by itself.
@@ -493,6 +509,9 @@
 }
 
 func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
+	if len(t.perRPCCreds) == 0 {
+		return nil, nil
+	}
 	authData := map[string]string{}
 	for _, c := range t.perRPCCreds {
 		data, err := c.GetRequestMetadata(ctx, audience)
@@ -513,7 +532,7 @@
 }
 
 func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
-	callAuthData := map[string]string{}
+	var callAuthData map[string]string
 	// Check if credentials.PerRPCCredentials were provided via call options.
 	// Note: if these credentials are provided both via dial options and call
 	// options, then both sets of credentials will be applied.
@@ -525,6 +544,7 @@
 		if err != nil {
 			return nil, status.Errorf(codes.Internal, "transport: %v", err)
 		}
+		callAuthData = make(map[string]string, len(data))
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2
 			k = strings.ToLower(k)
@@ -556,12 +576,11 @@
 		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
 			close(s.headerChan)
 		}
-
 	}
 	hdr := &headerFrame{
 		hf:        headerFields,
 		endStream: false,
-		initStream: func(id uint32) (bool, error) {
+		initStream: func(id uint32) error {
 			t.mu.Lock()
 			if state := t.state; state != reachable {
 				t.mu.Unlock()
@@ -571,29 +590,19 @@
 					err = ErrConnClosing
 				}
 				cleanup(err)
-				return false, err
+				return err
 			}
 			t.activeStreams[id] = s
 			if channelz.IsOn() {
 				atomic.AddInt64(&t.czData.streamsStarted, 1)
 				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
 			}
-			var sendPing bool
-			// If the number of active streams change from 0 to 1, then check if keepalive
-			// has gone dormant. If so, wake it up.
-			if len(t.activeStreams) == 1 && t.keepaliveEnabled {
-				select {
-				case t.awakenKeepalive <- struct{}{}:
-					sendPing = true
-					// Fill the awakenKeepalive channel again as this channel must be
-					// kept non-writable except at the point that the keepalive()
-					// goroutine is waiting either to be awaken or shutdown.
-					t.awakenKeepalive <- struct{}{}
-				default:
-				}
+			// If the keepalive goroutine has gone dormant, wake it up.
+			if t.kpDormant {
+				t.kpDormancyCond.Signal()
 			}
 			t.mu.Unlock()
-			return sendPing, nil
+			return nil
 		},
 		onOrphaned: cleanup,
 		wq:         s.wq,
@@ -671,12 +680,14 @@
 		}
 	}
 	if t.statsHandler != nil {
+		header, _, _ := metadata.FromOutgoingContextRaw(ctx)
 		outHeader := &stats.OutHeader{
 			Client:      true,
 			FullMethod:  callHdr.Method,
 			RemoteAddr:  t.remoteAddr,
 			LocalAddr:   t.localAddr,
 			Compression: callHdr.SendCompress,
+			Header:      header.Copy(),
 		}
 		t.statsHandler.HandleRPC(s.ctx, outHeader)
 	}
@@ -769,9 +780,17 @@
 		t.mu.Unlock()
 		return nil
 	}
+	// Call t.onClose before setting the state to closing to prevent the client
+	// from attempting to create new streams ASAP.
+	t.onClose()
 	t.state = closing
 	streams := t.activeStreams
 	t.activeStreams = nil
+	if t.kpDormant {
+		// If the keepalive goroutine is blocked on this condition variable, we
+		// should unblock it so that the goroutine eventually exits.
+		t.kpDormancyCond.Signal()
+	}
 	t.mu.Unlock()
 	t.controlBuf.finish()
 	t.cancel()
@@ -789,7 +808,6 @@
 		}
 		t.statsHandler.HandleConn(t.ctx, connEnd)
 	}
-	t.onClose()
 	return err
 }
 
@@ -848,11 +866,11 @@
 	return t.controlBuf.put(df)
 }
 
-func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
+func (t *http2Client) getStream(f http2.Frame) *Stream {
 	t.mu.Lock()
-	defer t.mu.Unlock()
-	s, ok := t.activeStreams[f.Header().StreamID]
-	return s, ok
+	s := t.activeStreams[f.Header().StreamID]
+	t.mu.Unlock()
+	return s
 }
 
 // adjustWindow sends out extra window update over the initial window size
@@ -932,8 +950,8 @@
 		t.controlBuf.put(bdpPing)
 	}
 	// Select the right stream to dispatch.
-	s, ok := t.getStream(f)
-	if !ok {
+	s := t.getStream(f)
+	if s == nil {
 		return
 	}
 	if size > 0 {
@@ -964,8 +982,8 @@
 }
 
 func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
-	s, ok := t.getStream(f)
-	if !ok {
+	s := t.getStream(f)
+	if s == nil {
 		return
 	}
 	if f.ErrCode == http2.ErrCodeRefusedStream {
@@ -978,9 +996,9 @@
 		statusCode = codes.Unknown
 	}
 	if statusCode == codes.Canceled {
-		// Our deadline was already exceeded, and that was likely the cause of
-		// this cancelation.  Alter the status code accordingly.
-		if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
+		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
+			// Our deadline was already exceeded, and that was likely the cause
+			// of this cancelation.  Alter the status code accordingly.
 			statusCode = codes.DeadlineExceeded
 		}
 	}
@@ -1085,11 +1103,12 @@
 	default:
 		t.setGoAwayReason(f)
 		close(t.goAway)
-		t.state = draining
 		t.controlBuf.put(&incomingGoAway{})
-
-		// This has to be a new goroutine because we're still using the current goroutine to read in the transport.
+		// Notify the clientconn about the GOAWAY before we set the state to
+		// draining, to allow the client to stop attempting to create streams
+		// before disallowing new streams on this connection.
 		t.onGoAway(t.goAwayReason)
+		t.state = draining
 	}
 	// All streams with IDs greater than the GoAwayId
 	// and smaller than the previous GoAway ID should be killed.
@@ -1141,8 +1160,8 @@
 
 // operateHeaders takes action on the decoded headers.
 func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
-	s, ok := t.getStream(frame)
-	if !ok {
+	s := t.getStream(frame)
+	if s == nil {
 		return
 	}
 	endStream := frame.StreamEnded()
@@ -1171,12 +1190,14 @@
 				inHeader := &stats.InHeader{
 					Client:     true,
 					WireLength: int(frame.Header().Length),
+					Header:     s.header.Copy(),
 				}
 				t.statsHandler.HandleRPC(s.ctx, inHeader)
 			} else {
 				inTrailer := &stats.InTrailer{
 					Client:     true,
 					WireLength: int(frame.Header().Length),
+					Trailer:    s.trailer.Copy(),
 				}
 				t.statsHandler.HandleRPC(s.ctx, inTrailer)
 			}
@@ -1185,6 +1206,7 @@
 
 	// If headerChan hasn't been closed yet
 	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+		s.headerValid = true
 		if !endStream {
 			// HEADERS frame block carries a Response-Headers.
 			isHeader = true
@@ -1227,7 +1249,7 @@
 	}
 	t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
 	if t.keepaliveEnabled {
-		atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 	}
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
@@ -1239,9 +1261,10 @@
 
 	// loop to keep reading incoming messages on this transport.
 	for {
+		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
 		if t.keepaliveEnabled {
-			atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+			atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 		}
 		if err != nil {
 			// Abort an active stream if the http2.Framer returns a
@@ -1285,55 +1308,83 @@
 	}
 }
 
+func minTime(a, b time.Duration) time.Duration {
+	if a < b {
+		return a
+	}
+	return b
+}
+
 // keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
 func (t *http2Client) keepalive() {
 	p := &ping{data: [8]byte{}}
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	timeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
 	timer := time.NewTimer(t.kp.Time)
 	for {
 		select {
 		case <-timer.C:
-			if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-				timer.Reset(t.kp.Time)
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were here.
+				outstandingPing = false
+				// Next timer should fire at kp.Time seconds from lastRead time.
+				timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
 				continue
 			}
-			// Check if keepalive should go dormant.
+			if outstandingPing && timeoutLeft <= 0 {
+				t.Close()
+				return
+			}
 			t.mu.Lock()
+			if t.state == closing {
+				// If the transport is closing, we should exit from the
+				// keepalive goroutine here. If not, we could have a race
+				// between the call to Signal() from Close() and the call to
+				// Wait() here, whereby the keepalive goroutine ends up
+				// blocking on the condition variable which will never be
+				// signalled again.
+				t.mu.Unlock()
+				return
+			}
 			if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
-				// Make awakenKeepalive writable.
-				<-t.awakenKeepalive
-				t.mu.Unlock()
-				select {
-				case <-t.awakenKeepalive:
-					// If the control gets here a ping has been sent
-					// need to reset the timer with keepalive.Timeout.
-				case <-t.ctx.Done():
-					return
-				}
-			} else {
-				t.mu.Unlock()
+				// If a ping was sent out previously (because there were active
+				// streams at that point) which wasn't acked and its timeout
+				// hadn't fired, but we got here and are about to go dormant,
+				// we should make sure that we unconditionally send a ping once
+				// we awaken.
+				outstandingPing = false
+				t.kpDormant = true
+				t.kpDormancyCond.Wait()
+			}
+			t.kpDormant = false
+			t.mu.Unlock()
+
+			// We get here either because we were dormant and a new stream was
+			// created which unblocked the Wait() call, or because the
+			// keepalive timer expired. In both cases, we need to send a ping.
+			if !outstandingPing {
 				if channelz.IsOn() {
 					atomic.AddInt64(&t.czData.kpCount, 1)
 				}
-				// Send ping.
 				t.controlBuf.put(p)
+				timeoutLeft = t.kp.Timeout
+				outstandingPing = true
 			}
-
-			// By the time control gets here a ping has been sent one way or the other.
-			timer.Reset(t.kp.Timeout)
-			select {
-			case <-timer.C:
-				if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-					timer.Reset(t.kp.Time)
-					continue
-				}
-				t.Close()
-				return
-			case <-t.ctx.Done():
-				if !timer.Stop() {
-					<-timer.C
-				}
-				return
-			}
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := minTime(t.kp.Time, timeoutLeft)
+			timeoutLeft -= sleepDuration
+			timer.Reset(sleepDuration)
 		case <-t.ctx.Done():
 			if !timer.Stop() {
 				<-timer.C
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 150b73e..8b04b03 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -62,11 +62,15 @@
 	statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
 )
 
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
 // http2Server implements the ServerTransport interface with HTTP2.
 type http2Server struct {
+	lastRead    int64 // Keep this field 64-bit aligned. Accessed atomically.
 	ctx         context.Context
-	ctxDone     <-chan struct{} // Cache the context.Done() chan
-	cancel      context.CancelFunc
+	done        chan struct{}
 	conn        net.Conn
 	loopy       *loopyWriter
 	readerDone  chan struct{} // sync point to enable testing.
@@ -84,12 +88,8 @@
 	controlBuf *controlBuffer
 	fc         *trInFlow
 	stats      stats.Handler
-	// Flag to keep track of reading activity on transport.
-	// 1 is true and 0 is false.
-	activity uint32 // Accessed atomically.
 	// Keepalive and max-age parameters for the server.
 	kp keepalive.ServerParameters
-
 	// Keepalive enforcement policy.
 	kep keepalive.EnforcementPolicy
 	// The time instance last ping was received.
@@ -125,6 +125,8 @@
 	channelzID int64 // channelz unique identification number
 	czData     *channelzData
 	bufferPool *bufferPool
+
+	connectionID uint64
 }
 
 // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@@ -138,7 +140,10 @@
 	}
 	framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
 	// Send initial settings as connection preface to client.
-	var isettings []http2.Setting
+	isettings := []http2.Setting{{
+		ID:  http2.SettingMaxFrameSize,
+		Val: http2MaxFrameLen,
+	}}
 	// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
 	// permitted in the HTTP2 spec.
 	maxStreams := config.MaxStreams
@@ -172,6 +177,12 @@
 			Val: *config.MaxHeaderListSize,
 		})
 	}
+	if config.HeaderTableSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingHeaderTableSize,
+			Val: *config.HeaderTableSize,
+		})
+	}
 	if err := framer.fr.WriteSettings(isettings...); err != nil {
 		return nil, connectionErrorf(false, err, "transport: %v", err)
 	}
@@ -203,11 +214,10 @@
 	if kep.MinTime == 0 {
 		kep.MinTime = defaultKeepalivePolicyMinTime
 	}
-	ctx, cancel := context.WithCancel(context.Background())
+	done := make(chan struct{})
 	t := &http2Server{
-		ctx:               ctx,
-		cancel:            cancel,
-		ctxDone:           ctx.Done(),
+		ctx:               context.Background(),
+		done:              done,
 		conn:              conn,
 		remoteAddr:        conn.RemoteAddr(),
 		localAddr:         conn.LocalAddr(),
@@ -228,7 +238,7 @@
 		czData:            new(channelzData),
 		bufferPool:        newBufferPool(),
 	}
-	t.controlBuf = newControlBuffer(t.ctxDone)
+	t.controlBuf = newControlBuffer(t.done)
 	if dynamicWindow {
 		t.bdpEst = &bdpEstimator{
 			bdp:               initialWindowSize,
@@ -246,6 +256,9 @@
 	if channelz.IsOn() {
 		t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
 	}
+
+	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
+
 	t.framer.writer.Flush()
 
 	defer func() {
@@ -270,7 +283,7 @@
 	if err != nil {
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
 	}
-	atomic.StoreUint32(&t.activity, 1)
+	atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
 		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@@ -359,12 +372,14 @@
 				rstCode:  http2.ErrCodeRefusedStream,
 				onWrite:  func() {},
 			})
+			s.cancel()
 			return false
 		}
 	}
 	t.mu.Lock()
 	if t.state != reachable {
 		t.mu.Unlock()
+		s.cancel()
 		return false
 	}
 	if uint32(len(t.activeStreams)) >= t.maxStreams {
@@ -375,12 +390,14 @@
 			rstCode:  http2.ErrCodeRefusedStream,
 			onWrite:  func() {},
 		})
+		s.cancel()
 		return false
 	}
 	if streamID%2 != 1 || streamID <= t.maxStreamID {
 		t.mu.Unlock()
 		// illegal gRPC stream id.
 		errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+		s.cancel()
 		return true
 	}
 	t.maxStreamID = streamID
@@ -405,6 +422,7 @@
 			LocalAddr:   t.localAddr,
 			Compression: s.recvCompress,
 			WireLength:  int(frame.Header().Length),
+			Header:      metadata.MD(state.data.mdata).Copy(),
 		}
 		t.stats.HandleRPC(s.ctx, inHeader)
 	}
@@ -436,8 +454,9 @@
 func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
 	defer close(t.readerDone)
 	for {
+		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
-		atomic.StoreUint32(&t.activity, 1)
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 		if err != nil {
 			if se, ok := err.(http2.StreamError); ok {
 				warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
@@ -745,7 +764,7 @@
 	return true
 }
 
-// WriteHeader sends the header metedata md back to the client.
+// WriteHeader sends the header metadata md back to the client.
 func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 	if s.updateHeaderSent() || s.getState() == streamDone {
 		return ErrIllegalHeaderWrite
@@ -766,6 +785,10 @@
 	return nil
 }
 
+func (t *http2Server) setResetPingStrikes() {
+	atomic.StoreUint32(&t.resetPingStrikes, 1)
+}
+
 func (t *http2Server) writeHeaderLocked(s *Stream) error {
 	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
@@ -780,9 +803,7 @@
 		streamID:  s.id,
 		hf:        headerFields,
 		endStream: false,
-		onWrite: func() {
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
-		},
+		onWrite:   t.setResetPingStrikes,
 	})
 	if !success {
 		if err != nil {
@@ -794,7 +815,9 @@
 	if t.stats != nil {
 		// Note: WireLength is not set in outHeader.
 		// TODO(mmukhi): Revisit this later, if needed.
-		outHeader := &stats.OutHeader{}
+		outHeader := &stats.OutHeader{
+			Header: s.header.Copy(),
+		}
 		t.stats.HandleRPC(s.Context(), outHeader)
 	}
 	return nil
@@ -842,9 +865,7 @@
 		streamID:  s.id,
 		hf:        headerFields,
 		endStream: true,
-		onWrite: func() {
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
-		},
+		onWrite:   t.setResetPingStrikes,
 	}
 	s.hdrMu.Unlock()
 	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
@@ -859,7 +880,9 @@
 	rst := s.getState() == streamActive
 	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
 	if t.stats != nil {
-		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+			Trailer: s.trailer.Copy(),
+		})
 	}
 	return nil
 }
@@ -881,7 +904,7 @@
 			// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
 			s.cancel()
 			select {
-			case <-t.ctx.Done():
+			case <-t.done:
 				return ErrConnClosing
 			default:
 			}
@@ -896,16 +919,14 @@
 	hdr = append(hdr, data[:emptyLen]...)
 	data = data[emptyLen:]
 	df := &dataFrame{
-		streamID: s.id,
-		h:        hdr,
-		d:        data,
-		onEachWrite: func() {
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
-		},
+		streamID:    s.id,
+		h:           hdr,
+		d:           data,
+		onEachWrite: t.setResetPingStrikes,
 	}
 	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
 		select {
-		case <-t.ctx.Done():
+		case <-t.done:
 			return ErrConnClosing
 		default:
 		}
@@ -922,32 +943,35 @@
 // after an additional duration of keepalive.Timeout.
 func (t *http2Server) keepalive() {
 	p := &ping{}
-	var pingSent bool
-	maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
-	maxAge := time.NewTimer(t.kp.MaxConnectionAge)
-	keepalive := time.NewTimer(t.kp.Time)
-	// NOTE: All exit paths of this function should reset their
-	// respective timers. A failure to do so will cause the
-	// following clean-up to deadlock and eventually leak.
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	kpTimeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	// Initialize the different timers to their default values.
+	idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+	ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+	kpTimer := time.NewTimer(t.kp.Time)
 	defer func() {
-		if !maxIdle.Stop() {
-			<-maxIdle.C
-		}
-		if !maxAge.Stop() {
-			<-maxAge.C
-		}
-		if !keepalive.Stop() {
-			<-keepalive.C
-		}
+		// We need to drain the underlying channel in these timers after a call
+		// to Stop(), only if we are interested in resetting them. Clearly we
+		// are not interested in resetting them here.
+		idleTimer.Stop()
+		ageTimer.Stop()
+		kpTimer.Stop()
 	}()
+
 	for {
 		select {
-		case <-maxIdle.C:
+		case <-idleTimer.C:
 			t.mu.Lock()
 			idle := t.idle
 			if idle.IsZero() { // The connection is non-idle.
 				t.mu.Unlock()
-				maxIdle.Reset(t.kp.MaxConnectionIdle)
+				idleTimer.Reset(t.kp.MaxConnectionIdle)
 				continue
 			}
 			val := t.kp.MaxConnectionIdle - time.Since(idle)
@@ -956,42 +980,52 @@
 				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
 				// Gracefully close the connection.
 				t.drain(http2.ErrCodeNo, []byte{})
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				maxIdle.Reset(infinity)
 				return
 			}
-			maxIdle.Reset(val)
-		case <-maxAge.C:
+			idleTimer.Reset(val)
+		case <-ageTimer.C:
 			t.drain(http2.ErrCodeNo, []byte{})
-			maxAge.Reset(t.kp.MaxConnectionAgeGrace)
+			ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
 			select {
-			case <-maxAge.C:
+			case <-ageTimer.C:
 				// Close the connection after grace period.
+				infof("transport: closing server transport due to maximum connection age.")
 				t.Close()
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				maxAge.Reset(infinity)
-			case <-t.ctx.Done():
+			case <-t.done:
 			}
 			return
-		case <-keepalive.C:
-			if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-				pingSent = false
-				keepalive.Reset(t.kp.Time)
+		case <-kpTimer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were
+				// here. Setup the timer to fire at kp.Time seconds from
+				// lastRead time and continue.
+				outstandingPing = false
+				kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
 				continue
 			}
-			if pingSent {
+			if outstandingPing && kpTimeoutLeft <= 0 {
+				infof("transport: closing server transport due to idleness.")
 				t.Close()
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				keepalive.Reset(infinity)
 				return
 			}
-			pingSent = true
-			if channelz.IsOn() {
-				atomic.AddInt64(&t.czData.kpCount, 1)
+			if !outstandingPing {
+				if channelz.IsOn() {
+					atomic.AddInt64(&t.czData.kpCount, 1)
+				}
+				t.controlBuf.put(p)
+				kpTimeoutLeft = t.kp.Timeout
+				outstandingPing = true
 			}
-			t.controlBuf.put(p)
-			keepalive.Reset(t.kp.Timeout)
-		case <-t.ctx.Done():
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+			kpTimeoutLeft -= sleepDuration
+			kpTimer.Reset(sleepDuration)
+		case <-t.done:
 			return
 		}
 	}
@@ -1011,7 +1045,7 @@
 	t.activeStreams = nil
 	t.mu.Unlock()
 	t.controlBuf.finish()
-	t.cancel()
+	close(t.done)
 	err := t.conn.Close()
 	if channelz.IsOn() {
 		channelz.RemoveEntry(t.channelzID)
@@ -1151,7 +1185,7 @@
 		select {
 		case <-t.drainChan:
 		case <-timer.C:
-		case <-t.ctx.Done():
+		case <-t.done:
 			return
 		}
 		t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
@@ -1201,7 +1235,7 @@
 	select {
 	case sz := <-resp:
 		return int64(sz)
-	case <-t.ctxDone:
+	case <-t.done:
 		return -1
 	case <-timer.C:
 		return -2
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 9d21286..8f5f334 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -667,6 +667,7 @@
 		writer: w,
 		fr:     http2.NewFramer(w, r),
 	}
+	f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
 	// Opt-in to Frame reuse API on framer to reduce garbage.
 	// Frames aren't safe to read from after a subsequent call to ReadFrame.
 	f.fr.SetReuseFrames()
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 0f33c9c..a30da9e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -73,10 +73,11 @@
 }
 
 // recvBuffer is an unbounded channel of recvMsg structs.
-// Note recvBuffer differs from controlBuffer only in that recvBuffer
-// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
-// recvBuffer is written to much more often than
-// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
 type recvBuffer struct {
 	c       chan recvMsg
 	mu      sync.Mutex
@@ -184,6 +185,19 @@
 	// r.readAdditional acts on that message and returns the necessary error.
 	select {
 	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
 		r.closeStream(ContextErr(r.ctx.Err()))
 		m := <-r.recv.get()
 		return r.readAdditional(m, p)
@@ -220,6 +234,7 @@
 type Stream struct {
 	id           uint32
 	st           ServerTransport    // nil for client side Stream
+	ct           *http2Client       // nil for server side Stream
 	ctx          context.Context    // the associated context of the stream
 	cancel       context.CancelFunc // always nil for client side Stream
 	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
@@ -238,6 +253,10 @@
 
 	headerChan       chan struct{} // closed to indicate the end of header metadata.
 	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	// headerValid indicates whether a valid header was received.  Only
+	// meaningful after headerChan is closed (always call waitOnHeader() before
+	// reading its value).  Not valid on server side.
+	headerValid bool
 
 	// hdrMu protects header and trailer metadata on the server-side.
 	hdrMu sync.Mutex
@@ -290,26 +309,28 @@
 	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
 }
 
-func (s *Stream) waitOnHeader() error {
+func (s *Stream) waitOnHeader() {
 	if s.headerChan == nil {
 		// On the server headerChan is always nil since a stream originates
 		// only after having received headers.
-		return nil
+		return
 	}
 	select {
 	case <-s.ctx.Done():
-		return ContextErr(s.ctx.Err())
+		// Close the stream to prevent headers/trailers from changing after
+		// this function returns.
+		s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
+		// headerChan could possibly not be closed yet if closeStream raced
+		// with operateHeaders; wait until it is closed explicitly here.
+		<-s.headerChan
 	case <-s.headerChan:
-		return nil
 	}
 }
 
 // RecvCompress returns the compression algorithm applied to the inbound
 // message. It is empty string if there is no compression applied.
 func (s *Stream) RecvCompress() string {
-	if err := s.waitOnHeader(); err != nil {
-		return ""
-	}
+	s.waitOnHeader()
 	return s.recvCompress
 }
 
@@ -330,36 +351,27 @@
 // available. It blocks until i) the metadata is ready or ii) there is no header
 // metadata or iii) the stream is canceled/expired.
 //
-// On server side, it returns the out header after t.WriteHeader is called.
+// On server side, it returns the out header after t.WriteHeader is called.  It
+// does not block and must not be called until after WriteHeader.
 func (s *Stream) Header() (metadata.MD, error) {
-	if s.headerChan == nil && s.header != nil {
+	if s.headerChan == nil {
 		// On server side, return the header in stream. It will be the out
 		// header after t.WriteHeader is called.
 		return s.header.Copy(), nil
 	}
-	err := s.waitOnHeader()
-	// Even if the stream is closed, header is returned if available.
-	select {
-	case <-s.headerChan:
-		if s.header == nil {
-			return nil, nil
-		}
-		return s.header.Copy(), nil
-	default:
+	s.waitOnHeader()
+	if !s.headerValid {
+		return nil, s.status.Err()
 	}
-	return nil, err
+	return s.header.Copy(), nil
 }
 
 // TrailersOnly blocks until a header or trailers-only frame is received and
 // then returns true if the stream was trailers-only.  If the stream ends
-// before headers are received, returns true, nil.  If a context error happens
-// first, returns it as a status error.  Client-side only.
-func (s *Stream) TrailersOnly() (bool, error) {
-	err := s.waitOnHeader()
-	if err != nil {
-		return false, err
-	}
-	return s.noHeaders, nil
+// before headers are received, returns true, nil.  Client-side only.
+func (s *Stream) TrailersOnly() bool {
+	s.waitOnHeader()
+	return s.noHeaders
 }
 
 // Trailer returns the cached trailer metedata. Note that if it is not called
@@ -513,6 +525,7 @@
 	ReadBufferSize        int
 	ChannelzParentID      int64
 	MaxHeaderListSize     *uint32
+	HeaderTableSize       *uint32
 }
 
 // NewServerTransport creates a ServerTransport with conn or non-nil error
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 45baa2a..0044789 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,6 +20,7 @@
 
 import (
 	"context"
+	"fmt"
 	"io"
 	"sync"
 
@@ -31,49 +32,78 @@
 	"google.golang.org/grpc/status"
 )
 
+// v2PickerWrapper wraps a balancer.Picker while providing the
+// balancer.V2Picker API.  It requires a pickerWrapper to generate errors
+// including the latest connectionError.  To be deleted when balancer.Picker is
+// updated to the balancer.V2Picker API.
+type v2PickerWrapper struct {
+	picker  balancer.Picker
+	connErr *connErr
+}
+
+func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	sc, done, err := v.picker.Pick(info.Ctx, info)
+	if err != nil {
+		if err == balancer.ErrTransientFailure {
+			return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
+		}
+		return balancer.PickResult{}, err
+	}
+	return balancer.PickResult{SubConn: sc, Done: done}, nil
+}
+
 // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
 // actions and unblock when there's a picker update.
 type pickerWrapper struct {
 	mu         sync.Mutex
 	done       bool
 	blockingCh chan struct{}
-	picker     balancer.Picker
+	picker     balancer.V2Picker
 
-	// The latest connection happened.
-	connErrMu sync.Mutex
-	connErr   error
+	// The latest connection error.  TODO: remove when V1 picker is deprecated;
+	// balancer should be responsible for providing the error.
+	*connErr
 }
 
-func newPickerWrapper() *pickerWrapper {
-	bp := &pickerWrapper{blockingCh: make(chan struct{})}
-	return bp
+type connErr struct {
+	mu  sync.Mutex
+	err error
 }
 
-func (bp *pickerWrapper) updateConnectionError(err error) {
-	bp.connErrMu.Lock()
-	bp.connErr = err
-	bp.connErrMu.Unlock()
+func (c *connErr) updateConnectionError(err error) {
+	c.mu.Lock()
+	c.err = err
+	c.mu.Unlock()
 }
 
-func (bp *pickerWrapper) connectionError() error {
-	bp.connErrMu.Lock()
-	err := bp.connErr
-	bp.connErrMu.Unlock()
+func (c *connErr) connectionError() error {
+	c.mu.Lock()
+	err := c.err
+	c.mu.Unlock()
 	return err
 }
 
+func newPickerWrapper() *pickerWrapper {
+	return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
+}
+
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
-	bp.mu.Lock()
-	if bp.done {
-		bp.mu.Unlock()
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+	pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
+}
+
+// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
+	pw.mu.Lock()
+	if pw.done {
+		pw.mu.Unlock()
 		return
 	}
-	bp.picker = p
-	// bp.blockingCh should never be nil.
-	close(bp.blockingCh)
-	bp.blockingCh = make(chan struct{})
-	bp.mu.Unlock()
+	pw.picker = p
+	// pw.blockingCh should never be nil.
+	close(pw.blockingCh)
+	pw.blockingCh = make(chan struct{})
+	pw.mu.Unlock()
 }
 
 func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
@@ -100,83 +130,85 @@
 // - the current picker returns other errors and failfast is false.
 // - the subConn returned by the current picker is not READY
 // When one of these situations happens, pick blocks until the picker gets updated.
-func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
 	var ch chan struct{}
 
+	var lastPickErr error
 	for {
-		bp.mu.Lock()
-		if bp.done {
-			bp.mu.Unlock()
+		pw.mu.Lock()
+		if pw.done {
+			pw.mu.Unlock()
 			return nil, nil, ErrClientConnClosing
 		}
 
-		if bp.picker == nil {
-			ch = bp.blockingCh
+		if pw.picker == nil {
+			ch = pw.blockingCh
 		}
-		if ch == bp.blockingCh {
+		if ch == pw.blockingCh {
 			// This could happen when either:
-			// - bp.picker is nil (the previous if condition), or
+			// - pw.picker is nil (the previous if condition), or
 			// - has called pick on the current picker.
-			bp.mu.Unlock()
+			pw.mu.Unlock()
 			select {
 			case <-ctx.Done():
-				if connectionErr := bp.connectionError(); connectionErr != nil {
-					switch ctx.Err() {
-					case context.DeadlineExceeded:
-						return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
-					case context.Canceled:
-						return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
-					}
+				var errStr string
+				if lastPickErr != nil {
+					errStr = "latest balancer error: " + lastPickErr.Error()
+				} else if connectionErr := pw.connectionError(); connectionErr != nil {
+					errStr = "latest connection error: " + connectionErr.Error()
+				} else {
+					errStr = ctx.Err().Error()
 				}
-				return nil, nil, ctx.Err()
+				switch ctx.Err() {
+				case context.DeadlineExceeded:
+					return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
+				case context.Canceled:
+					return nil, nil, status.Error(codes.Canceled, errStr)
+				}
 			case <-ch:
 			}
 			continue
 		}
 
-		ch = bp.blockingCh
-		p := bp.picker
-		bp.mu.Unlock()
+		ch = pw.blockingCh
+		p := pw.picker
+		pw.mu.Unlock()
 
-		subConn, done, err := p.Pick(ctx, opts)
+		pickResult, err := p.Pick(info)
 
 		if err != nil {
-			switch err {
-			case balancer.ErrNoSubConnAvailable:
+			if err == balancer.ErrNoSubConnAvailable {
 				continue
-			case balancer.ErrTransientFailure:
+			}
+			if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
 				if !failfast {
+					lastPickErr = err
 					continue
 				}
-				return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
-			case context.DeadlineExceeded:
-				return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
-			case context.Canceled:
-				return nil, nil, status.Error(codes.Canceled, err.Error())
-			default:
-				if _, ok := status.FromError(err); ok {
-					return nil, nil, err
-				}
-				// err is some other error.
-				return nil, nil, status.Error(codes.Unknown, err.Error())
+				return nil, nil, status.Error(codes.Unavailable, err.Error())
 			}
+			if _, ok := status.FromError(err); ok {
+				return nil, nil, err
+			}
+			// err is some other error.
+			return nil, nil, status.Error(codes.Unknown, err.Error())
 		}
 
-		acw, ok := subConn.(*acBalancerWrapper)
+		acw, ok := pickResult.SubConn.(*acBalancerWrapper)
 		if !ok {
 			grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
 			continue
 		}
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
 			if channelz.IsOn() {
-				return t, doneChannelzWrapper(acw, done), nil
+				return t, doneChannelzWrapper(acw, pickResult.Done), nil
 			}
-			return t, done, nil
+			return t, pickResult.Done, nil
 		}
-		if done != nil {
+		if pickResult.Done != nil {
 			// Calling done with nil error, no bytes sent and no bytes received.
 			// DoneInfo with default value works.
-			done(balancer.DoneInfo{})
+			pickResult.Done(balancer.DoneInfo{})
 		}
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.
@@ -186,12 +218,12 @@
 	}
 }
 
-func (bp *pickerWrapper) close() {
-	bp.mu.Lock()
-	defer bp.mu.Unlock()
-	if bp.done {
+func (pw *pickerWrapper) close() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	if pw.done {
 		return
 	}
-	bp.done = true
-	close(bp.blockingCh)
+	pw.done = true
+	close(pw.blockingCh)
 }
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index d1e38aa..c43dac9 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -19,12 +19,14 @@
 package grpc
 
 import (
-	"context"
+	"errors"
 
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/status"
 )
 
 // PickFirstBalancerName is the name of the pick_first balancer.
@@ -45,48 +47,98 @@
 }
 
 type pickfirstBalancer struct {
-	cc balancer.ClientConn
-	sc balancer.SubConn
+	state connectivity.State
+	cc    balancer.ClientConn
+	sc    balancer.SubConn
 }
 
+var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
+
 func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
 	if err != nil {
-		grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
+		b.ResolverError(err)
 		return
 	}
-	if b.sc == nil {
-		b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
-		if err != nil {
-			//TODO(yuxuanli): why not change the cc state to Idle?
-			grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
-			return
-		}
-		b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
-		b.sc.Connect()
-	} else {
-		b.sc.UpdateAddresses(addrs)
-		b.sc.Connect()
-	}
+	b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
 }
 
 func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+	b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+	switch b.state {
+	case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
+		// Set a failing picker if we don't have a good picker.
+		b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
+			Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
+		)
+	}
+	if grpclog.V(2) {
+		grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
+	}
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
+	if len(cs.ResolverState.Addresses) == 0 {
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+	if b.sc == nil {
+		var err error
+		b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
+		if err != nil {
+			if grpclog.V(2) {
+				grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+			}
+			b.state = connectivity.TransientFailure
+			b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
+				Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
+			)
+			return balancer.ErrBadResolverState
+		}
+		b.state = connectivity.Idle
+		b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
+		b.sc.Connect()
+	} else {
+		b.sc.UpdateAddresses(cs.ResolverState.Addresses)
+		b.sc.Connect()
+	}
+	return nil
+}
+
+func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
+	if grpclog.V(2) {
+		grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+	}
 	if b.sc != sc {
-		grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+		if grpclog.V(2) {
+			grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+		}
 		return
 	}
-	if s == connectivity.Shutdown {
+	b.state = s.ConnectivityState
+	if s.ConnectivityState == connectivity.Shutdown {
 		b.sc = nil
 		return
 	}
 
-	switch s {
+	switch s.ConnectivityState {
 	case connectivity.Ready, connectivity.Idle:
-		b.cc.UpdateBalancerState(s, &picker{sc: sc})
+		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
 	case connectivity.Connecting:
-		b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
+		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
 	case connectivity.TransientFailure:
-		b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
+		err := balancer.ErrTransientFailure
+		// TODO: this can be unconditional after the V1 API is removed, as
+		// SubConnState will always contain a connection error.
+		if s.ConnectionError != nil {
+			err = balancer.TransientFailureError(s.ConnectionError)
+		}
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: s.ConnectivityState,
+			Picker:            &picker{err: err},
+		})
 	}
 }
 
@@ -94,15 +146,12 @@
 }
 
 type picker struct {
-	err error
-	sc  balancer.SubConn
+	result balancer.PickResult
+	err    error
 }
 
-func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	if p.err != nil {
-		return nil, nil, p.err
-	}
-	return p.sc, nil, nil
+func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
 }
 
 func init() {
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
index ae5aa7d..0a12ad2 100644
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
@@ -3,13 +3,14 @@
 
 package grpc_reflection_v1alpha
 
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
 import (
-	context "golang.org/x/net/context"
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
 	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
 )
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -21,7 +22,7 @@
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 
 // The message sent by the client when calling ServerReflectionInfo method.
 type ServerReflectionRequest struct {
@@ -46,16 +47,17 @@
 func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) }
 func (*ServerReflectionRequest) ProtoMessage()    {}
 func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{0}
+	return fileDescriptor_42a8ac412db3cb03, []int{0}
 }
+
 func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b)
 }
 func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic)
 }
-func (dst *ServerReflectionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerReflectionRequest.Merge(dst, src)
+func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServerReflectionRequest.Merge(m, src)
 }
 func (m *ServerReflectionRequest) XXX_Size() int {
 	return xxx_messageInfo_ServerReflectionRequest.Size(m)
@@ -149,9 +151,9 @@
 	return ""
 }
 
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ServerReflectionRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ServerReflectionRequest_OneofMarshaler, _ServerReflectionRequest_OneofUnmarshaler, _ServerReflectionRequest_OneofSizer, []interface{}{
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
 		(*ServerReflectionRequest_FileByFilename)(nil),
 		(*ServerReflectionRequest_FileContainingSymbol)(nil),
 		(*ServerReflectionRequest_FileContainingExtension)(nil),
@@ -160,110 +162,6 @@
 	}
 }
 
-func _ServerReflectionRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ServerReflectionRequest)
-	// message_request
-	switch x := m.MessageRequest.(type) {
-	case *ServerReflectionRequest_FileByFilename:
-		b.EncodeVarint(3<<3 | proto.WireBytes)
-		b.EncodeStringBytes(x.FileByFilename)
-	case *ServerReflectionRequest_FileContainingSymbol:
-		b.EncodeVarint(4<<3 | proto.WireBytes)
-		b.EncodeStringBytes(x.FileContainingSymbol)
-	case *ServerReflectionRequest_FileContainingExtension:
-		b.EncodeVarint(5<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.FileContainingExtension); err != nil {
-			return err
-		}
-	case *ServerReflectionRequest_AllExtensionNumbersOfType:
-		b.EncodeVarint(6<<3 | proto.WireBytes)
-		b.EncodeStringBytes(x.AllExtensionNumbersOfType)
-	case *ServerReflectionRequest_ListServices:
-		b.EncodeVarint(7<<3 | proto.WireBytes)
-		b.EncodeStringBytes(x.ListServices)
-	case nil:
-	default:
-		return fmt.Errorf("ServerReflectionRequest.MessageRequest has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ServerReflectionRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ServerReflectionRequest)
-	switch tag {
-	case 3: // message_request.file_by_filename
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeStringBytes()
-		m.MessageRequest = &ServerReflectionRequest_FileByFilename{x}
-		return true, err
-	case 4: // message_request.file_containing_symbol
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeStringBytes()
-		m.MessageRequest = &ServerReflectionRequest_FileContainingSymbol{x}
-		return true, err
-	case 5: // message_request.file_containing_extension
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ExtensionRequest)
-		err := b.DecodeMessage(msg)
-		m.MessageRequest = &ServerReflectionRequest_FileContainingExtension{msg}
-		return true, err
-	case 6: // message_request.all_extension_numbers_of_type
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeStringBytes()
-		m.MessageRequest = &ServerReflectionRequest_AllExtensionNumbersOfType{x}
-		return true, err
-	case 7: // message_request.list_services
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeStringBytes()
-		m.MessageRequest = &ServerReflectionRequest_ListServices{x}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ServerReflectionRequest_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ServerReflectionRequest)
-	// message_request
-	switch x := m.MessageRequest.(type) {
-	case *ServerReflectionRequest_FileByFilename:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(len(x.FileByFilename)))
-		n += len(x.FileByFilename)
-	case *ServerReflectionRequest_FileContainingSymbol:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(len(x.FileContainingSymbol)))
-		n += len(x.FileContainingSymbol)
-	case *ServerReflectionRequest_FileContainingExtension:
-		s := proto.Size(x.FileContainingExtension)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ServerReflectionRequest_AllExtensionNumbersOfType:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(len(x.AllExtensionNumbersOfType)))
-		n += len(x.AllExtensionNumbersOfType)
-	case *ServerReflectionRequest_ListServices:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(len(x.ListServices)))
-		n += len(x.ListServices)
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
 // The type name and extension number sent by the client when requesting
 // file_containing_extension.
 type ExtensionRequest struct {
@@ -279,16 +177,17 @@
 func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) }
 func (*ExtensionRequest) ProtoMessage()    {}
 func (*ExtensionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{1}
+	return fileDescriptor_42a8ac412db3cb03, []int{1}
 }
+
 func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b)
 }
 func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic)
 }
-func (dst *ExtensionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionRequest.Merge(dst, src)
+func (m *ExtensionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRequest.Merge(m, src)
 }
 func (m *ExtensionRequest) XXX_Size() int {
 	return xxx_messageInfo_ExtensionRequest.Size(m)
@@ -335,16 +234,17 @@
 func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) }
 func (*ServerReflectionResponse) ProtoMessage()    {}
 func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{2}
+	return fileDescriptor_42a8ac412db3cb03, []int{2}
 }
+
 func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b)
 }
 func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic)
 }
-func (dst *ServerReflectionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerReflectionResponse.Merge(dst, src)
+func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServerReflectionResponse.Merge(m, src)
 }
 func (m *ServerReflectionResponse) XXX_Size() int {
 	return xxx_messageInfo_ServerReflectionResponse.Size(m)
@@ -433,9 +333,9 @@
 	return nil
 }
 
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ServerReflectionResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ServerReflectionResponse_OneofMarshaler, _ServerReflectionResponse_OneofUnmarshaler, _ServerReflectionResponse_OneofSizer, []interface{}{
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
 		(*ServerReflectionResponse_FileDescriptorResponse)(nil),
 		(*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
 		(*ServerReflectionResponse_ListServicesResponse)(nil),
@@ -443,108 +343,6 @@
 	}
 }
 
-func _ServerReflectionResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ServerReflectionResponse)
-	// message_response
-	switch x := m.MessageResponse.(type) {
-	case *ServerReflectionResponse_FileDescriptorResponse:
-		b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.FileDescriptorResponse); err != nil {
-			return err
-		}
-	case *ServerReflectionResponse_AllExtensionNumbersResponse:
-		b.EncodeVarint(5<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.AllExtensionNumbersResponse); err != nil {
-			return err
-		}
-	case *ServerReflectionResponse_ListServicesResponse:
-		b.EncodeVarint(6<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ListServicesResponse); err != nil {
-			return err
-		}
-	case *ServerReflectionResponse_ErrorResponse:
-		b.EncodeVarint(7<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ErrorResponse); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("ServerReflectionResponse.MessageResponse has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ServerReflectionResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ServerReflectionResponse)
-	switch tag {
-	case 4: // message_response.file_descriptor_response
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(FileDescriptorResponse)
-		err := b.DecodeMessage(msg)
-		m.MessageResponse = &ServerReflectionResponse_FileDescriptorResponse{msg}
-		return true, err
-	case 5: // message_response.all_extension_numbers_response
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ExtensionNumberResponse)
-		err := b.DecodeMessage(msg)
-		m.MessageResponse = &ServerReflectionResponse_AllExtensionNumbersResponse{msg}
-		return true, err
-	case 6: // message_response.list_services_response
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ListServiceResponse)
-		err := b.DecodeMessage(msg)
-		m.MessageResponse = &ServerReflectionResponse_ListServicesResponse{msg}
-		return true, err
-	case 7: // message_response.error_response
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ErrorResponse)
-		err := b.DecodeMessage(msg)
-		m.MessageResponse = &ServerReflectionResponse_ErrorResponse{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ServerReflectionResponse_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ServerReflectionResponse)
-	// message_response
-	switch x := m.MessageResponse.(type) {
-	case *ServerReflectionResponse_FileDescriptorResponse:
-		s := proto.Size(x.FileDescriptorResponse)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ServerReflectionResponse_AllExtensionNumbersResponse:
-		s := proto.Size(x.AllExtensionNumbersResponse)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ServerReflectionResponse_ListServicesResponse:
-		s := proto.Size(x.ListServicesResponse)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ServerReflectionResponse_ErrorResponse:
-		s := proto.Size(x.ErrorResponse)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
 // Serialized FileDescriptorProto messages sent by the server answering
 // a file_by_filename, file_containing_symbol, or file_containing_extension
 // request.
@@ -562,16 +360,17 @@
 func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) }
 func (*FileDescriptorResponse) ProtoMessage()    {}
 func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{3}
+	return fileDescriptor_42a8ac412db3cb03, []int{3}
 }
+
 func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b)
 }
 func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic)
 }
-func (dst *FileDescriptorResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorResponse.Merge(dst, src)
+func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorResponse.Merge(m, src)
 }
 func (m *FileDescriptorResponse) XXX_Size() int {
 	return xxx_messageInfo_FileDescriptorResponse.Size(m)
@@ -605,16 +404,17 @@
 func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) }
 func (*ExtensionNumberResponse) ProtoMessage()    {}
 func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{4}
+	return fileDescriptor_42a8ac412db3cb03, []int{4}
 }
+
 func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b)
 }
 func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic)
 }
-func (dst *ExtensionNumberResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionNumberResponse.Merge(dst, src)
+func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionNumberResponse.Merge(m, src)
 }
 func (m *ExtensionNumberResponse) XXX_Size() int {
 	return xxx_messageInfo_ExtensionNumberResponse.Size(m)
@@ -653,16 +453,17 @@
 func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) }
 func (*ListServiceResponse) ProtoMessage()    {}
 func (*ListServiceResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{5}
+	return fileDescriptor_42a8ac412db3cb03, []int{5}
 }
+
 func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b)
 }
 func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic)
 }
-func (dst *ListServiceResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ListServiceResponse.Merge(dst, src)
+func (m *ListServiceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListServiceResponse.Merge(m, src)
 }
 func (m *ListServiceResponse) XXX_Size() int {
 	return xxx_messageInfo_ListServiceResponse.Size(m)
@@ -695,16 +496,17 @@
 func (m *ServiceResponse) String() string { return proto.CompactTextString(m) }
 func (*ServiceResponse) ProtoMessage()    {}
 func (*ServiceResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{6}
+	return fileDescriptor_42a8ac412db3cb03, []int{6}
 }
+
 func (m *ServiceResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ServiceResponse.Unmarshal(m, b)
 }
 func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic)
 }
-func (dst *ServiceResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceResponse.Merge(dst, src)
+func (m *ServiceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceResponse.Merge(m, src)
 }
 func (m *ServiceResponse) XXX_Size() int {
 	return xxx_messageInfo_ServiceResponse.Size(m)
@@ -736,16 +538,17 @@
 func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
 func (*ErrorResponse) ProtoMessage()    {}
 func (*ErrorResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_reflection_178bd1e101bf8b63, []int{7}
+	return fileDescriptor_42a8ac412db3cb03, []int{7}
 }
+
 func (m *ErrorResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ErrorResponse.Unmarshal(m, b)
 }
 func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic)
 }
-func (dst *ErrorResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ErrorResponse.Merge(dst, src)
+func (m *ErrorResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ErrorResponse.Merge(m, src)
 }
 func (m *ErrorResponse) XXX_Size() int {
 	return xxx_messageInfo_ErrorResponse.Size(m)
@@ -781,6 +584,55 @@
 	proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse")
 }
 
+func init() {
+	proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03)
+}
+
+var fileDescriptor_42a8ac412db3cb03 = []byte{
+	// 656 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40,
+	0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75,
+	0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e,
+	0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e,
+	0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7,
+	0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7,
+	0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e,
+	0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94,
+	0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43,
+	0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36,
+	0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d,
+	0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3,
+	0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd,
+	0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde,
+	0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd,
+	0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc,
+	0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8,
+	0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f,
+	0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33,
+	0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb,
+	0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58,
+	0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a,
+	0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce,
+	0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e,
+	0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25,
+	0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d,
+	0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8,
+	0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62,
+	0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77,
+	0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97,
+	0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0,
+	0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39,
+	0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2,
+	0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad,
+	0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29,
+	0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0,
+	0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73,
+	0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d,
+	0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6,
+	0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5,
+	0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -844,6 +696,14 @@
 	ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error
 }
 
+// UnimplementedServerReflectionServer can be embedded to have forward compatible implementations.
+type UnimplementedServerReflectionServer struct {
+}
+
+func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error {
+	return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented")
+}
+
 func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) {
 	s.RegisterService(&_ServerReflection_serviceDesc, srv)
 }
@@ -888,52 +748,3 @@
 	},
 	Metadata: "grpc_reflection_v1alpha/reflection.proto",
 }
-
-func init() {
-	proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_reflection_178bd1e101bf8b63)
-}
-
-var fileDescriptor_reflection_178bd1e101bf8b63 = []byte{
-	// 656 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40,
-	0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75,
-	0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e,
-	0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e,
-	0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7,
-	0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7,
-	0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e,
-	0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94,
-	0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43,
-	0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36,
-	0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d,
-	0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3,
-	0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd,
-	0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde,
-	0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd,
-	0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc,
-	0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8,
-	0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f,
-	0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33,
-	0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb,
-	0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58,
-	0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a,
-	0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce,
-	0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e,
-	0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25,
-	0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d,
-	0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8,
-	0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62,
-	0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77,
-	0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97,
-	0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0,
-	0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39,
-	0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2,
-	0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad,
-	0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29,
-	0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0,
-	0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73,
-	0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d,
-	0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6,
-	0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5,
-	0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index e83da34..fe14b2f 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -21,6 +21,11 @@
 package resolver
 
 import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/attributes"
+	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/serviceconfig"
 )
 
@@ -69,12 +74,18 @@
 }
 
 // AddressType indicates the address type returned by name resolution.
+//
+// Deprecated: use Attributes in Address instead.
 type AddressType uint8
 
 const (
 	// Backend indicates the address is for a backend server.
+	//
+	// Deprecated: use Attributes in Address instead.
 	Backend AddressType = iota
 	// GRPCLB indicates the address is for a grpclb load balancer.
+	//
+	// Deprecated: use Attributes in Address instead.
 	GRPCLB
 )
 
@@ -83,33 +94,75 @@
 type Address struct {
 	// Addr is the server address on which a connection will be established.
 	Addr string
-	// Type is the type of this address.
-	Type AddressType
+
 	// ServerName is the name of this address.
+	// If non-empty, the ServerName is used as the transport certification authority for
+	// the address, instead of the hostname from the Dial target string. In most cases,
+	// this should not be set.
 	//
-	// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
+	// If Type is GRPCLB, ServerName should be the name of the remote load
 	// balancer, not the name of the backend.
+	//
+	// WARNING: ServerName must only be populated with trusted values. It
+	// is insecure to populate it with data from untrusted inputs since untrusted
+	// values could be used to bypass the authority checks performed by TLS.
 	ServerName string
+
+	// Attributes contains arbitrary data about this address intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
+
+	// Type is the type of this address.
+	//
+	// Deprecated: use Attributes instead.
+	Type AddressType
+
 	// Metadata is the information associated with Addr, which may be used
 	// to make load balancing decision.
+	//
+	// Deprecated: use Attributes instead.
 	Metadata interface{}
 }
 
-// BuildOption includes additional information for the builder to create
+// BuildOptions includes additional information for the builder to create
 // the resolver.
-type BuildOption struct {
-	// DisableServiceConfig indicates whether resolver should fetch service config data.
+type BuildOptions struct {
+	// DisableServiceConfig indicates whether a resolver implementation should
+	// fetch service config data.
 	DisableServiceConfig bool
+	// DialCreds is the transport credentials used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithTransportCredentials). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithCredentialsBundle). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	CredsBundle credentials.Bundle
+	// Dialer is the custom dialer used by the ClientConn for dialling the
+	// target gRPC service (set via WithDialer). In cases where a name
+	// resolution service requires the same dialer, the resolver may use this
+	// field. In most cases though, it is not appropriate, and this field may
+	// be ignored.
+	Dialer func(context.Context, string) (net.Conn, error)
 }
 
 // State contains the current Resolver state relevant to the ClientConn.
 type State struct {
-	Addresses []Address // Resolved addresses for the target
-	// ServiceConfig is the parsed service config; obtained from
-	// serviceconfig.Parse.
-	ServiceConfig serviceconfig.Config
+	// Addresses is the latest set of resolved addresses for the target.
+	Addresses []Address
 
-	// TODO: add Err error
+	// ServiceConfig contains the result from parsing the latest service
+	// config.  If it is nil, it indicates no service config is present or the
+	// resolver does not provide service configs.
+	ServiceConfig *serviceconfig.ParseResult
+
+	// Attributes contains arbitrary data about the resolver intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
 }
 
 // ClientConn contains the callbacks for resolver to notify any updates
@@ -122,6 +175,10 @@
 type ClientConn interface {
 	// UpdateState updates the state of the ClientConn appropriately.
 	UpdateState(State)
+	// ReportError notifies the ClientConn that the Resolver encountered an
+	// error.  The ClientConn will notify the load balancer and begin calling
+	// ResolveNow on the Resolver with exponential backoff.
+	ReportError(error)
 	// NewAddress is called by resolver to notify ClientConn a new list
 	// of resolved addresses.
 	// The address list should be the complete list of resolved addresses.
@@ -133,6 +190,9 @@
 	//
 	// Deprecated: Use UpdateState instead.
 	NewServiceConfig(serviceConfig string)
+	// ParseServiceConfig parses the provided service config and returns an
+	// object that provides the parsed config.
+	ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
 }
 
 // Target represents a target for gRPC, as specified in:
@@ -164,14 +224,14 @@
 	//
 	// gRPC dial calls Build synchronously, and fails if the returned error is
 	// not nil.
-	Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
+	Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
 	// Scheme returns the scheme supported by this resolver.
 	// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
 	Scheme() string
 }
 
-// ResolveNowOption includes additional information for ResolveNow.
-type ResolveNowOption struct{}
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
 
 // Resolver watches for the updates on the specified target.
 // Updates include address updates and service config updates.
@@ -180,7 +240,7 @@
 	// again. It's just a hint, resolver can ignore this if it's not necessary.
 	//
 	// It could be called multiple times concurrently.
-	ResolveNow(ResolveNowOption)
+	ResolveNow(ResolveNowOptions)
 	// Close closes the resolver.
 	Close()
 }
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index 6934905..3eaf724 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -21,22 +21,29 @@
 import (
 	"fmt"
 	"strings"
-	"sync/atomic"
+	"sync"
+	"time"
 
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
 )
 
 // ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConnection interface.
+// It implements resolver.ClientConn interface.
 type ccResolverWrapper struct {
-	cc       *ClientConn
-	resolver resolver.Resolver
-	addrCh   chan []resolver.Address
-	scCh     chan string
-	done     uint32 // accessed atomically; set to 1 when closed.
-	curState resolver.State
+	cc         *ClientConn
+	resolverMu sync.Mutex
+	resolver   resolver.Resolver
+	done       *grpcsync.Event
+	curState   resolver.State
+
+	pollingMu sync.Mutex
+	polling   chan struct{}
 }
 
 // split2 returns the values from strings.SplitN(s, sep, 2).
@@ -67,60 +74,126 @@
 	return ret
 }
 
-// newCCResolverWrapper parses cc.target for scheme and gets the resolver
-// builder for this scheme and builds the resolver. The monitoring goroutine
-// for it is not started yet and can be created by calling start().
-//
-// If withResolverBuilder dial option is set, the specified resolver will be
-// used instead.
-func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
-	rb := cc.dopts.resolverBuilder
-	if rb == nil {
-		return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
+// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
+// returns a ccResolverWrapper object which wraps the newly built resolver.
+func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
+	ccr := &ccResolverWrapper{
+		cc:   cc,
+		done: grpcsync.NewEvent(),
 	}
 
-	ccr := &ccResolverWrapper{
-		cc:     cc,
-		addrCh: make(chan []resolver.Address, 1),
-		scCh:   make(chan string, 1),
+	var credsClone credentials.TransportCredentials
+	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+		credsClone = creds.Clone()
+	}
+	rbo := resolver.BuildOptions{
+		DisableServiceConfig: cc.dopts.disableServiceConfig,
+		DialCreds:            credsClone,
+		CredsBundle:          cc.dopts.copts.CredsBundle,
+		Dialer:               cc.dopts.copts.Dialer,
 	}
 
 	var err error
-	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
+	// We need to hold the lock here while we assign to the ccr.resolver field
+	// to guard against a data race caused by the following code path,
+	// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
+	// accessing ccr.resolver which is being assigned here.
+	ccr.resolverMu.Lock()
+	defer ccr.resolverMu.Unlock()
+	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
 	if err != nil {
 		return nil, err
 	}
 	return ccr, nil
 }
 
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
-	ccr.resolver.ResolveNow(o)
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+	ccr.resolverMu.Lock()
+	if !ccr.done.HasFired() {
+		ccr.resolver.ResolveNow(o)
+	}
+	ccr.resolverMu.Unlock()
 }
 
 func (ccr *ccResolverWrapper) close() {
+	ccr.resolverMu.Lock()
 	ccr.resolver.Close()
-	atomic.StoreUint32(&ccr.done, 1)
+	ccr.done.Fire()
+	ccr.resolverMu.Unlock()
 }
 
-func (ccr *ccResolverWrapper) isDone() bool {
-	return atomic.LoadUint32(&ccr.done) == 1
+// poll begins or ends asynchronous polling of the resolver based on whether
+// err is ErrBadResolverState.
+func (ccr *ccResolverWrapper) poll(err error) {
+	ccr.pollingMu.Lock()
+	defer ccr.pollingMu.Unlock()
+	if err != balancer.ErrBadResolverState {
+		// stop polling
+		if ccr.polling != nil {
+			close(ccr.polling)
+			ccr.polling = nil
+		}
+		return
+	}
+	if ccr.polling != nil {
+		// already polling
+		return
+	}
+	p := make(chan struct{})
+	ccr.polling = p
+	go func() {
+		for i := 0; ; i++ {
+			ccr.resolveNow(resolver.ResolveNowOptions{})
+			t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
+			select {
+			case <-p:
+				t.Stop()
+				return
+			case <-ccr.done.Done():
+				// Resolver has been closed.
+				t.Stop()
+				return
+			case <-t.C:
+				select {
+				case <-p:
+					return
+				default:
+				}
+				// Timer expired; re-resolve.
+			}
+		}
+	}()
 }
 
 func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
-	if ccr.isDone() {
+	if ccr.done.HasFired() {
 		return
 	}
 	grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
 	if channelz.IsOn() {
 		ccr.addChannelzTraceEvent(s)
 	}
-	ccr.cc.updateResolverState(s)
 	ccr.curState = s
+	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+}
+
+func (ccr *ccResolverWrapper) ReportError(err error) {
+	if ccr.done.HasFired() {
+		return
+	}
+	grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err)
+	if channelz.IsOn() {
+		channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
+			Desc:     fmt.Sprintf("Resolver reported error: %v", err),
+			Severity: channelz.CtWarning,
+		})
+	}
+	ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
 }
 
 // NewAddress is called by the resolver implementation to send addresses to gRPC.
 func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
-	if ccr.isDone() {
+	if ccr.done.HasFired() {
 		return
 	}
 	grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
@@ -128,31 +201,53 @@
 		ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
 	}
 	ccr.curState.Addresses = addrs
-	ccr.cc.updateResolverState(ccr.curState)
+	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
 }
 
 // NewServiceConfig is called by the resolver implementation to send service
 // configs to gRPC.
 func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
-	if ccr.isDone() {
+	if ccr.done.HasFired() {
 		return
 	}
 	grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
-	c, err := parseServiceConfig(sc)
-	if err != nil {
+	if ccr.cc.dopts.disableServiceConfig {
+		grpclog.Infof("Service config lookups disabled; ignoring config")
+		return
+	}
+	scpr := parseServiceConfig(sc)
+	if scpr.Err != nil {
+		grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err)
+		if channelz.IsOn() {
+			channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Error parsing service config: %v", scpr.Err),
+				Severity: channelz.CtWarning,
+			})
+		}
+		ccr.poll(balancer.ErrBadResolverState)
 		return
 	}
 	if channelz.IsOn() {
-		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
+		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
 	}
-	ccr.curState.ServiceConfig = c
-	ccr.cc.updateResolverState(ccr.curState)
+	ccr.curState.ServiceConfig = scpr
+	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+}
+
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+	return parseServiceConfig(scJSON)
 }
 
 func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
 	var updates []string
-	oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
-	newSC, newOK := s.ServiceConfig.(*ServiceConfig)
+	var oldSC, newSC *ServiceConfig
+	var oldOK, newOK bool
+	if ccr.curState.ServiceConfig != nil {
+		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if s.ServiceConfig != nil {
+		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+	}
 	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
 		updates = append(updates, "service config updated")
 	}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 088c3f1..d3a4adc 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -648,35 +648,58 @@
 		return nil, st.Err()
 	}
 
+	var size int
 	if pf == compressionMade {
 		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
 		// use this decompressor as the default.
 		if dc != nil {
 			d, err = dc.Do(bytes.NewReader(d))
-			if err != nil {
-				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
+			size = len(d)
 		} else {
-			dcReader, err := compressor.Decompress(bytes.NewReader(d))
-			if err != nil {
-				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
-			// Read from LimitReader with limit max+1. So if the underlying
-			// reader is over limit, the result will be bigger than max.
-			d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
-			if err != nil {
-				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
+			d, size, err = decompress(compressor, d, maxReceiveMessageSize)
 		}
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+		}
+	} else {
+		size = len(d)
 	}
-	if len(d) > maxReceiveMessageSize {
+	if size > maxReceiveMessageSize {
 		// TODO: Revisit the error code. Currently keep it consistent with java
 		// implementation.
-		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
 	}
 	return d, nil
 }
 
+// Using compressor, decompress d, returning data and size.
+// Optionally, if data will be over maxReceiveMessageSize, just return the size.
+func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
+	dcReader, err := compressor.Decompress(bytes.NewReader(d))
+	if err != nil {
+		return nil, 0, err
+	}
+	if sizer, ok := compressor.(interface {
+		DecompressedSize(compressedBytes []byte) int
+	}); ok {
+		if size := sizer.DecompressedSize(d); size >= 0 {
+			if size > maxReceiveMessageSize {
+				return nil, size, nil
+			}
+			// size is used as an estimate to size the buffer, but we
+			// will read more data if available.
+			// +MinRead so ReadFrom will not reallocate if size is correct.
+			buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+			bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+			return buf.Bytes(), int(bytesRead), err
+		}
+	}
+	// Read from LimitReader with limit max+1. So if the underlying
+	// reader is over limit, the result will be bigger than max.
+	d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+	return d, len(d), err
+}
+
 // For the two compressor parameters, both should not be set, but if they are,
 // dc takes precedence over compressor.
 // TODO(dfawley): wrap the old compressor/decompressor using the new API?
@@ -848,7 +871,7 @@
 
 // The SupportPackageIsVersion variables are referenced from generated protocol
 // buffer files to ensure compatibility with the gRPC version used.  The latest
-// support package version is 5.
+// support package version is 6.
 //
 // Older versions are kept for compatibility. They may be removed if
 // compatibility cannot be maintained.
@@ -858,6 +881,7 @@
 	SupportPackageIsVersion3 = true
 	SupportPackageIsVersion4 = true
 	SupportPackageIsVersion5 = true
+	SupportPackageIsVersion6 = true
 )
 
 const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 617289e..0d75cb1 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -42,6 +42,7 @@
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
@@ -56,6 +57,8 @@
 	defaultServerMaxSendMessageSize    = math.MaxInt32
 )
 
+var statusOK = status.New(codes.OK, "")
+
 type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
 
 // MethodDesc represents an RPC service's method specification.
@@ -97,10 +100,8 @@
 	m      map[string]*service // service name -> service info
 	events trace.EventLog
 
-	quit               chan struct{}
-	done               chan struct{}
-	quitOnce           sync.Once
-	doneOnce           sync.Once
+	quit               *grpcsync.Event
+	done               *grpcsync.Event
 	channelzRemoveOnce sync.Once
 	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
 
@@ -129,6 +130,7 @@
 	readBufferSize        int
 	connectionTimeout     time.Duration
 	maxHeaderListSize     *uint32
+	headerTableSize       *uint32
 }
 
 var defaultServerOptions = serverOptions{
@@ -342,8 +344,8 @@
 // unknown service handler. The provided method is a bidi-streaming RPC service
 // handler that will be invoked instead of returning the "unimplemented" gRPC
 // error whenever a request is received for an unregistered service or method.
-// The handling function has full access to the Context of the request and the
-// stream, and the invocation bypasses interceptors.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
 func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.unknownStreamDesc = &StreamDesc{
@@ -376,6 +378,16 @@
 	})
 }
 
+// HeaderTableSize returns a ServerOption that sets the size of dynamic
+// header table for stream.
+//
+// This API is EXPERIMENTAL.
+func HeaderTableSize(s uint32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.headerTableSize = &s
+	})
+}
+
 // NewServer creates a gRPC server which has no service registered and has not
 // started to accept requests yet.
 func NewServer(opt ...ServerOption) *Server {
@@ -388,8 +400,8 @@
 		opts:   opts,
 		conns:  make(map[transport.ServerTransport]bool),
 		m:      make(map[string]*service),
-		quit:   make(chan struct{}),
-		done:   make(chan struct{}),
+		quit:   grpcsync.NewEvent(),
+		done:   grpcsync.NewEvent(),
 		czData: new(channelzData),
 	}
 	s.cv = sync.NewCond(&s.mu)
@@ -556,11 +568,9 @@
 	s.serveWG.Add(1)
 	defer func() {
 		s.serveWG.Done()
-		select {
-		// Stop or GracefulStop called; block until done and return nil.
-		case <-s.quit:
-			<-s.done
-		default:
+		if s.quit.HasFired() {
+			// Stop or GracefulStop called; block until done and return nil.
+			<-s.done.Done()
 		}
 	}()
 
@@ -603,7 +613,7 @@
 				timer := time.NewTimer(tempDelay)
 				select {
 				case <-timer.C:
-				case <-s.quit:
+				case <-s.quit.Done():
 					timer.Stop()
 					return nil
 				}
@@ -613,10 +623,8 @@
 			s.printf("done serving; Accept = %v", err)
 			s.mu.Unlock()
 
-			select {
-			case <-s.quit:
+			if s.quit.HasFired() {
 				return nil
-			default:
 			}
 			return err
 		}
@@ -637,6 +645,10 @@
 // handleRawConn forks a goroutine to handle a just-accepted connection that
 // has not had any I/O performed on it yet.
 func (s *Server) handleRawConn(rawConn net.Conn) {
+	if s.quit.HasFired() {
+		rawConn.Close()
+		return
+	}
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
 	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
 	if err != nil {
@@ -653,14 +665,6 @@
 		return
 	}
 
-	s.mu.Lock()
-	if s.conns == nil {
-		s.mu.Unlock()
-		conn.Close()
-		return
-	}
-	s.mu.Unlock()
-
 	// Finish handshaking (HTTP2)
 	st := s.newHTTP2Transport(conn, authInfo)
 	if st == nil {
@@ -693,6 +697,7 @@
 		ReadBufferSize:        s.opts.readBufferSize,
 		ChannelzParentID:      s.channelzID,
 		MaxHeaderListSize:     s.opts.maxHeaderListSize,
+		HeaderTableSize:       s.opts.headerTableSize,
 	}
 	st, err := transport.NewServerTransport("http2", c, config)
 	if err != nil {
@@ -768,6 +773,9 @@
 // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
 // If tracing is not enabled, it returns nil.
 func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
+	if !EnableTracing {
+		return nil
+	}
 	tr, ok := trace.FromContext(stream.Context())
 	if !ok {
 		return nil
@@ -857,41 +865,58 @@
 }
 
 func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
-	if channelz.IsOn() {
-		s.incrCallsStarted()
-		defer func() {
-			if err != nil && err != io.EOF {
-				s.incrCallsFailed()
-			} else {
-				s.incrCallsSucceeded()
-			}
-		}()
-	}
 	sh := s.opts.statsHandler
-	if sh != nil {
-		beginTime := time.Now()
-		begin := &stats.Begin{
-			BeginTime: beginTime,
+	if sh != nil || trInfo != nil || channelz.IsOn() {
+		if channelz.IsOn() {
+			s.incrCallsStarted()
 		}
-		sh.HandleRPC(stream.Context(), begin)
-		defer func() {
-			end := &stats.End{
+		var statsBegin *stats.Begin
+		if sh != nil {
+			beginTime := time.Now()
+			statsBegin = &stats.Begin{
 				BeginTime: beginTime,
-				EndTime:   time.Now(),
 			}
-			if err != nil && err != io.EOF {
-				end.Error = toRPCErr(err)
-			}
-			sh.HandleRPC(stream.Context(), end)
-		}()
-	}
-	if trInfo != nil {
-		defer trInfo.tr.Finish()
-		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+			sh.HandleRPC(stream.Context(), statsBegin)
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		}
+		// The deferred error handling for tracing, stats handler and channelz are
+		// combined into one function to reduce stack usage -- a defer takes ~56-64
+		// bytes on the stack, so overflowing the stack will require a stack
+		// re-allocation, which is expensive.
+		//
+		// To maintain behavior similar to separate deferred statements, statements
+		// should be executed in the reverse order. That is, tracing first, stats
+		// handler second, and channelz last. Note that panics *within* defers will
+		// lead to different behavior, but that's an acceptable compromise; that
+		// would be undefined behavior territory anyway.
 		defer func() {
-			if err != nil && err != io.EOF {
-				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				trInfo.tr.SetError()
+			if trInfo != nil {
+				if err != nil && err != io.EOF {
+					trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					trInfo.tr.SetError()
+				}
+				trInfo.tr.Finish()
+			}
+
+			if sh != nil {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(stream.Context(), end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
 			}
 		}()
 	}
@@ -1078,7 +1103,7 @@
 	// TODO: Should we be logging if writing status failed here, like above?
 	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
 	// error or allow the stats handler to see it?
-	err = t.WriteStatus(stream, status.New(codes.OK, ""))
+	err = t.WriteStatus(stream, statusOK)
 	if binlog != nil {
 		binlog.Log(&binarylog.ServerTrailer{
 			Trailer: stream.Trailer(),
@@ -1091,31 +1116,15 @@
 func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
 	if channelz.IsOn() {
 		s.incrCallsStarted()
-		defer func() {
-			if err != nil && err != io.EOF {
-				s.incrCallsFailed()
-			} else {
-				s.incrCallsSucceeded()
-			}
-		}()
 	}
 	sh := s.opts.statsHandler
+	var statsBegin *stats.Begin
 	if sh != nil {
 		beginTime := time.Now()
-		begin := &stats.Begin{
+		statsBegin = &stats.Begin{
 			BeginTime: beginTime,
 		}
-		sh.HandleRPC(stream.Context(), begin)
-		defer func() {
-			end := &stats.End{
-				BeginTime: beginTime,
-				EndTime:   time.Now(),
-			}
-			if err != nil && err != io.EOF {
-				end.Error = toRPCErr(err)
-			}
-			sh.HandleRPC(stream.Context(), end)
-		}()
+		sh.HandleRPC(stream.Context(), statsBegin)
 	}
 	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
 	ss := &serverStream{
@@ -1130,6 +1139,41 @@
 		statsHandler:          sh,
 	}
 
+	if sh != nil || trInfo != nil || channelz.IsOn() {
+		// See comment in processUnaryRPC on defers.
+		defer func() {
+			if trInfo != nil {
+				ss.mu.Lock()
+				if err != nil && err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+				ss.trInfo.tr.Finish()
+				ss.trInfo.tr = nil
+				ss.mu.Unlock()
+			}
+
+			if sh != nil {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(stream.Context(), end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+
 	ss.binlog = binarylog.GetMethodLogger(stream.Method())
 	if ss.binlog != nil {
 		md, _ := metadata.FromIncomingContext(ctx)
@@ -1183,16 +1227,6 @@
 
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
-		defer func() {
-			ss.mu.Lock()
-			if err != nil && err != io.EOF {
-				ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				ss.trInfo.tr.SetError()
-			}
-			ss.trInfo.tr.Finish()
-			ss.trInfo.tr = nil
-			ss.mu.Unlock()
-		}()
 	}
 	var appErr error
 	var server interface{}
@@ -1236,7 +1270,7 @@
 		ss.trInfo.tr.LazyLog(stringer("OK"), false)
 		ss.mu.Unlock()
 	}
-	err = t.WriteStatus(ss.s, status.New(codes.OK, ""))
+	err = t.WriteStatus(ss.s, statusOK)
 	if ss.binlog != nil {
 		ss.binlog.Log(&binarylog.ServerTrailer{
 			Trailer: ss.s.Trailer(),
@@ -1353,15 +1387,11 @@
 // pending RPCs on the client side will get notified by connection
 // errors.
 func (s *Server) Stop() {
-	s.quitOnce.Do(func() {
-		close(s.quit)
-	})
+	s.quit.Fire()
 
 	defer func() {
 		s.serveWG.Wait()
-		s.doneOnce.Do(func() {
-			close(s.done)
-		})
+		s.done.Fire()
 	}()
 
 	s.channelzRemoveOnce.Do(func() {
@@ -1398,15 +1428,8 @@
 // accepting new connections and RPCs and blocks until all the pending RPCs are
 // finished.
 func (s *Server) GracefulStop() {
-	s.quitOnce.Do(func() {
-		close(s.quit)
-	})
-
-	defer func() {
-		s.doneOnce.Do(func() {
-			close(s.done)
-		})
-	}()
+	s.quit.Fire()
+	defer s.done.Fire()
 
 	s.channelzRemoveOnce.Do(func() {
 		if channelz.IsOn() {
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index d0787f1..5a80a57 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -136,9 +136,9 @@
 	maxAttempts int
 
 	// Exponential backoff parameters. The initial retry attempt will occur at
-	// random(0, initialBackoffMS). In general, the nth attempt will occur at
+	// random(0, initialBackoff). In general, the nth attempt will occur at
 	// random(0,
-	//   min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
+	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
 	//
 	// These fields are required and must be greater than zero.
 	initialBackoff    time.Duration
@@ -261,20 +261,17 @@
 }
 
 func init() {
-	internal.ParseServiceConfig = func(sc string) (interface{}, error) {
-		return parseServiceConfig(sc)
-	}
+	internal.ParseServiceConfigForTesting = parseServiceConfig
 }
-
-func parseServiceConfig(js string) (*ServiceConfig, error) {
+func parseServiceConfig(js string) *serviceconfig.ParseResult {
 	if len(js) == 0 {
-		return nil, fmt.Errorf("no JSON service config provided")
+		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
 	}
 	var rsc jsonSC
 	err := json.Unmarshal([]byte(js), &rsc)
 	if err != nil {
 		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-		return nil, err
+		return &serviceconfig.ParseResult{Err: err}
 	}
 	sc := ServiceConfig{
 		LB:                rsc.LoadBalancingPolicy,
@@ -288,7 +285,7 @@
 			if len(lbcfg) != 1 {
 				err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
 				grpclog.Warningf(err.Error())
-				return nil, err
+				return &serviceconfig.ParseResult{Err: err}
 			}
 			var name string
 			var jsonCfg json.RawMessage
@@ -303,17 +300,25 @@
 				var err error
 				sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
 				if err != nil {
-					return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+					return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
 				}
 			} else if string(jsonCfg) != "{}" {
 				grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
 			}
 			break
 		}
+		if sc.lbConfig == nil {
+			// We had a loadBalancingConfig field but did not encounter a
+			// supported policy.  The config is considered invalid in this
+			// case.
+			err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
+			grpclog.Warningf(err.Error())
+			return &serviceconfig.ParseResult{Err: err}
+		}
 	}
 
 	if rsc.MethodConfig == nil {
-		return &sc, nil
+		return &serviceconfig.ParseResult{Config: &sc}
 	}
 	for _, m := range *rsc.MethodConfig {
 		if m.Name == nil {
@@ -322,7 +327,7 @@
 		d, err := parseDuration(m.Timeout)
 		if err != nil {
 			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return nil, err
+			return &serviceconfig.ParseResult{Err: err}
 		}
 
 		mc := MethodConfig{
@@ -331,7 +336,7 @@
 		}
 		if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
 			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return nil, err
+			return &serviceconfig.ParseResult{Err: err}
 		}
 		if m.MaxRequestMessageBytes != nil {
 			if *m.MaxRequestMessageBytes > int64(maxInt) {
@@ -356,13 +361,13 @@
 
 	if sc.retryThrottling != nil {
 		if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
-			return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
 		}
 		if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
-			return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
 		}
 	}
-	return &sc, nil
+	return &serviceconfig.ParseResult{Config: &sc}
 }
 
 func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
index 53b2787..187c304 100644
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -22,27 +22,20 @@
 // This package is EXPERIMENTAL.
 package serviceconfig
 
-import (
-	"google.golang.org/grpc/internal"
-)
-
 // Config represents an opaque data structure holding a service config.
 type Config interface {
-	isConfig()
+	isServiceConfig()
 }
 
 // LoadBalancingConfig represents an opaque data structure holding a load
-// balancer config.
+// balancing config.
 type LoadBalancingConfig interface {
 	isLoadBalancingConfig()
 }
 
-// Parse parses the JSON service config provided into an internal form or
-// returns an error if the config is invalid.
-func Parse(ServiceConfigJSON string) (Config, error) {
-	c, err := internal.ParseServiceConfig(ServiceConfigJSON)
-	if err != nil {
-		return nil, err
-	}
-	return c.(Config), err
+// ParseResult contains a service config or an error.  Exactly one must be
+// non-nil.
+type ParseResult struct {
+	Config Config
+	Err    error
 }
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index f3f593c..9e22c39 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -91,6 +91,8 @@
 	LocalAddr net.Addr
 	// Compression is the compression algorithm used for the RPC.
 	Compression string
+	// Header contains the header metadata received.
+	Header metadata.MD
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -104,6 +106,9 @@
 	Client bool
 	// WireLength is the wire length of trailer.
 	WireLength int
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this InTrailer is from the client side.
+	Trailer metadata.MD
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -146,6 +151,8 @@
 	LocalAddr net.Addr
 	// Compression is the compression algorithm used for the RPC.
 	Compression string
+	// Header contains the header metadata sent.
+	Header metadata.MD
 }
 
 // IsClient indicates if this stats information is from client side.
@@ -159,6 +166,9 @@
 	Client bool
 	// WireLength is the wire length of trailer.
 	WireLength int
+	// Trailer contains the trailer metadata sent to the client. This
+	// field is only valid if this OutTrailer is from the server side.
+	Trailer metadata.MD
 }
 
 // IsClient indicates if this stats information is from client side.
@@ -176,6 +186,7 @@
 	EndTime time.Time
 	// Trailer contains the trailer metadata received from the server. This
 	// field is only valid if this End is from the client side.
+	// Deprecated: use Trailer in InTrailer instead.
 	Trailer metadata.MD
 	// Error is the error the RPC ended with. It is an error generated from
 	// status.Status and can be converted back to status.Status using
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 641c45c..a1348e9 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -58,6 +58,17 @@
 	return &Status{s: (*spb.Status)(se)}
 }
 
+// Is implements future error.Is functionality.
+// A statusError is equivalent if the code and message are identical.
+func (se *statusError) Is(target error) bool {
+	tse, ok := target.(*statusError)
+	if !ok {
+		return false
+	}
+
+	return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
+}
+
 // Status represents an RPC status code, message, and details.  It is immutable
 // and should be created with New, Newf, or FromProto.
 type Status struct {
@@ -132,7 +143,7 @@
 // Status is returned with codes.Unknown and the original error message.
 func FromError(err error) (s *Status, ok bool) {
 	if err == nil {
-		return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
+		return nil, true
 	}
 	if se, ok := err.(interface {
 		GRPCStatus() *Status
@@ -206,7 +217,7 @@
 func FromContextError(err error) *Status {
 	switch err {
 	case nil:
-		return New(codes.OK, "")
+		return nil
 	case context.DeadlineExceeded:
 		return New(codes.DeadlineExceeded, err.Error())
 	case context.Canceled:
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index db14c32..bb99940 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -327,13 +327,23 @@
 	return cs, nil
 }
 
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
-	cs.attempt = &csAttempt{
+// newAttemptLocked creates a new attempt with a transport.
+// If it succeeds, then it replaces clientStream's attempt with this new attempt.
+func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
+	newAttempt := &csAttempt{
 		cs:           cs,
 		dc:           cs.cc.dopts.dc,
 		statsHandler: sh,
 		trInfo:       trInfo,
 	}
+	defer func() {
+		if retErr != nil {
+			// This attempt is not set in the clientStream, so it's finish won't
+			// be called. Call it here for stats and trace in case they are not
+			// nil.
+			newAttempt.finish(retErr)
+		}
+	}()
 
 	if err := cs.ctx.Err(); err != nil {
 		return toRPCErr(err)
@@ -345,8 +355,9 @@
 	if trInfo != nil {
 		trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
 	}
-	cs.attempt.t = t
-	cs.attempt.done = done
+	newAttempt.t = t
+	newAttempt.done = done
+	cs.attempt = newAttempt
 	return nil
 }
 
@@ -395,11 +406,18 @@
 	serverHeaderBinlogged bool
 
 	mu                      sync.Mutex
-	firstAttempt            bool       // if true, transparent retry is valid
-	numRetries              int        // exclusive of transparent retry attempt(s)
-	numRetriesSincePushback int        // retries since pushback; to reset backoff
-	finished                bool       // TODO: replace with atomic cmpxchg or sync.Once?
-	attempt                 *csAttempt // the active client stream attempt
+	firstAttempt            bool // if true, transparent retry is valid
+	numRetries              int  // exclusive of transparent retry attempt(s)
+	numRetriesSincePushback int  // retries since pushback; to reset backoff
+	finished                bool // TODO: replace with atomic cmpxchg or sync.Once?
+	// attempt is the active client stream attempt.
+	// The only place where it is written is the newAttemptLocked method and this method never writes nil.
+	// So, attempt can be nil only inside newClientStream function when clientStream is first created.
+	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
+	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
+	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
+	// place where we need to check if the attempt is nil.
+	attempt *csAttempt
 	// TODO(hedging): hedging will have multiple attempts simultaneously.
 	committed  bool                       // active attempt committed for retry?
 	buffer     []func(a *csAttempt) error // operations to replay on retry
@@ -457,8 +475,8 @@
 	if cs.attempt.s != nil {
 		<-cs.attempt.s.Done()
 	}
-	if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
-		// First attempt, wait-for-ready, stream unprocessed: transparently retry.
+	if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+		// First attempt, stream unprocessed: transparently retry.
 		cs.firstAttempt = false
 		return nil
 	}
@@ -470,7 +488,7 @@
 	pushback := 0
 	hasPushback := false
 	if cs.attempt.s != nil {
-		if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
+		if !cs.attempt.s.TrailersOnly() {
 			return err
 		}
 
@@ -805,11 +823,11 @@
 	}
 	if cs.attempt != nil {
 		cs.attempt.finish(err)
-	}
-	// after functions all rely upon having a stream.
-	if cs.attempt.s != nil {
-		for _, o := range cs.opts {
-			o.after(cs.callInfo)
+		// after functions all rely upon having a stream.
+		if cs.attempt.s != nil {
+			for _, o := range cs.opts {
+				o.after(cs.callInfo)
+			}
 		}
 	}
 	cs.cancel()
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
index 0a57b99..07a2d26 100644
--- a/vendor/google.golang.org/grpc/trace.go
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -41,9 +41,6 @@
 	if i := strings.Index(m, "/"); i >= 0 {
 		m = m[:i] // remove everything from second slash
 	}
-	if i := strings.LastIndex(m, "."); i >= 0 {
-		m = m[i+1:] // cut down to last dotted component
-	}
 	return m
 }
 
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 70b739f..cf19382 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.22.1"
+const Version = "1.27.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index 11037b9..0e73707 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -31,12 +31,15 @@
 if [[ "$1" = "-install" ]]; then
   # Check for module support
   if go help mod >& /dev/null; then
+    # Install the pinned versions as defined in module tools.
+    pushd ./test/tools
     go install \
       golang.org/x/lint/golint \
       golang.org/x/tools/cmd/goimports \
       honnef.co/go/tools/cmd/staticcheck \
       github.com/client9/misspell/cmd/misspell \
       github.com/golang/protobuf/protoc-gen-go
+    popd
   else
     # Ye olde `go get` incantation.
     # Note: this gets the latest version of all tools (vs. the pinned versions
@@ -67,18 +70,21 @@
 fi
 
 # - Ensure all source files contain a copyright message.
-git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output
+(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go')
 
 # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
 (! grep 'func Test[^(]' *_test.go)
 (! grep 'func Test[^(]' test/*.go)
 
+# - Do not import x/net/context.
+(! git grep -l 'x/net/context' -- "*.go")
+
 # - Do not import math/rand for real library code.  Use internal/grpcrand for
 #   thread safety.
-git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test')
+git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test')
 
 # - Ensure all ptypes proto packages are renamed when importing.
-git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/")
+(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go")
 
 # - Check imports that are illegal in appengine (until Go 1.11).
 # TODO: Remove when we drop Go 1.10 support
@@ -86,10 +92,12 @@
 
 # - gofmt, goimports, golint (with exceptions for generated code), go vet.
 gofmt -s -d -l . 2>&1 | fail_on_output
-goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output
+goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go")
 golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:")
 go vet -all .
 
+misspell -error .
+
 # - Check that generated proto files are up to date.
 if [[ -z "${VET_SKIP_PROTO}" ]]; then
   PATH="/home/travis/bin:${PATH}" make proto && \
@@ -105,25 +113,47 @@
 fi
 
 # - Collection of static analysis checks
-# TODO(menghanl): fix errors in transport_test.
-staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore '
-google.golang.org/grpc/balancer.go:SA1019
-google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019
-google.golang.org/grpc/balancer/xds/edsbalancer/balancergroup.go:SA1019
-google.golang.org/grpc/balancer/xds/xds.go:SA1019
-google.golang.org/grpc/balancer_conn_wrappers.go:SA1019
-google.golang.org/grpc/balancer_test.go:SA1019
-google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
-google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019
-google.golang.org/grpc/clientconn.go:S1024
-google.golang.org/grpc/clientconn_state_transition_test.go:SA1019
-google.golang.org/grpc/clientconn_test.go:SA1019
-google.golang.org/grpc/internal/transport/handler_server.go:SA1019
-google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019
-google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019
-google.golang.org/grpc/stats/stats_test.go:SA1019
-google.golang.org/grpc/test/channelz_test.go:SA1019
-google.golang.org/grpc/test/end2end_test.go:SA1019
-google.golang.org/grpc/test/healthcheck_test.go:SA1019
-' ./...
-misspell -error .
+#
+# TODO(dfawley): don't use deprecated functions in examples or first-party
+# plugins.
+SC_OUT="$(mktemp)"
+staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
+# Error if anything other than deprecation warnings are printed.
+(! grep -v "is deprecated:.*SA1019" "${SC_OUT}")
+# Only ignore the following deprecated types/fields/functions.
+(! grep -Fv '.HandleResolvedAddrs
+.HandleSubConnStateChange
+.HeaderMap
+.NewAddress
+.NewServiceConfig
+.Metadata is deprecated: use Attributes
+.Type is deprecated: use Attributes
+.UpdateBalancerState
+balancer.Picker
+grpc.CallCustomCodec
+grpc.Code
+grpc.Compressor
+grpc.Decompressor
+grpc.MaxMsgSize
+grpc.MethodConfig
+grpc.NewGZIPCompressor
+grpc.NewGZIPDecompressor
+grpc.RPCCompressor
+grpc.RPCDecompressor
+grpc.RoundRobin
+grpc.ServiceConfig
+grpc.WithBalancer
+grpc.WithBalancerName
+grpc.WithCompressor
+grpc.WithDecompressor
+grpc.WithDialer
+grpc.WithMaxMsgSize
+grpc.WithServiceConfig
+grpc.WithTimeout
+http.CloseNotifier
+naming.Resolver
+naming.Update
+naming.Watcher
+resolver.Backend
+resolver.GRPCLB' "${SC_OUT}"
+)