Updating to latest protos and device-management interface, releasing 2.0

Change-Id: I2d2ebf5b305d6d06b8d01c49d4d67e7ff050f5d4
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
deleted file mode 100644
index a11e8cb..0000000
--- a/vendor/google.golang.org/grpc/.travis.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-language: go
-
-matrix:
-  include:
-  - go: 1.13.x
-    env: VET=1 GO111MODULE=on
-  - go: 1.13.x
-    env: RACE=1 GO111MODULE=on
-  - go: 1.13.x
-    env: RUN386=1
-  - go: 1.13.x
-    env: GRPC_GO_RETRY=on
-  - go: 1.13.x
-    env: TESTEXTRAS=1
-  - go: 1.12.x
-    env: GO111MODULE=on
-  - go: 1.11.x
-    env: GO111MODULE=on
-  - go: 1.9.x
-    env: GAE=1
-
-go_import_path: google.golang.org/grpc
-
-before_install:
-  - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
-  - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
-  - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
-  - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi
-
-install:
-  - try3() { eval "$*" || eval "$*" || eval "$*"; }
-  - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
-  - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi
-  - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi
-
-script:
-  - set -e
-  - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi
-  - if [[ -n "${VET}" ]]; then ./vet.sh; fi
-  - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi
-  - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi
-  - make test
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 4f1567e..cd03f8c 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -57,6 +57,5 @@
   - `make vet` to catch vet errors
   - `make test` to run the tests
   - `make testrace` to run tests in race mode
-  - optional `make testappengine` to run tests with appengine
 
 - Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 093c82b..c6672c0 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -8,17 +8,18 @@
 for general contribution guidelines.
 
 ## Maintainers (in alphabetical order)
-- [canguler](https://github.com/canguler), Google LLC
+
 - [cesarghali](https://github.com/cesarghali), Google LLC
 - [dfawley](https://github.com/dfawley), Google LLC
 - [easwars](https://github.com/easwars), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
 - [menghanl](https://github.com/menghanl), Google LLC
 - [srini100](https://github.com/srini100), Google LLC
 
 ## Emeritus Maintainers (in alphabetical order)
 - [adelez](https://github.com/adelez), Google LLC
+- [canguler](https://github.com/canguler), Google LLC
 - [iamqizhao](https://github.com/iamqizhao), Google LLC
+- [jadekler](https://github.com/jadekler), Google LLC
 - [jtattermusch](https://github.com/jtattermusch), Google LLC
 - [lyuxuan](https://github.com/lyuxuan), Google LLC
 - [makmukhi](https://github.com/makmukhi), Google LLC
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index 410f7d5..1f89609 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -1,13 +1,13 @@
 all: vet test testrace
 
-build: deps
+build:
 	go build google.golang.org/grpc/...
 
 clean:
 	go clean -i google.golang.org/grpc/...
 
 deps:
-	go get -d -v google.golang.org/grpc/...
+	GO111MODULE=on go get -d -v google.golang.org/grpc/...
 
 proto:
 	@ if ! which protoc > /dev/null; then \
@@ -16,29 +16,18 @@
 	fi
 	go generate google.golang.org/grpc/...
 
-test: testdeps
+test:
 	go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
 
-testsubmodule: testdeps
+testsubmodule:
 	cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+	cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
 
-testappengine: testappenginedeps
-	goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testappenginedeps:
-	goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
-
-testdeps:
-	go get -d -v -t google.golang.org/grpc/...
-
-testrace: testdeps
+testrace:
 	go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
 
-updatedeps:
-	go get -d -v -u -f google.golang.org/grpc/...
-
-updatetestdeps:
-	go get -d -v -t -u -f google.golang.org/grpc/...
+testdeps:
+	GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
 
 vet: vetdeps
 	./vet.sh
@@ -50,14 +39,8 @@
 	all \
 	build \
 	clean \
-	deps \
 	proto \
 	test \
-	testappengine \
-	testappenginedeps \
-	testdeps \
 	testrace \
-	updatedeps \
-	updatetestdeps \
 	vet \
 	vetdeps
diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt
new file mode 100644
index 0000000..5301977
--- /dev/null
+++ b/vendor/google.golang.org/grpc/NOTICE.txt
@@ -0,0 +1,13 @@
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 800e7bd..0e6ae69 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,64 +1,53 @@
 # gRPC-Go
 
 [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
-[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
 [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
 
-The Go implementation of [gRPC](https://grpc.io/): A high performance, open
-source, general RPC framework that puts mobile and HTTP/2 first. For more
-information see the [gRPC Quick Start:
-Go](https://grpc.io/docs/quickstart/go.html) guide.
+The [Go][] implementation of [gRPC][]: A high performance, open source, general
+RPC framework that puts mobile and HTTP/2 first. For more information see the
+[Go gRPC docs][], or jump directly into the [quick start][].
 
-Installation
-------------
+## Prerequisites
 
-To install this package, you need to install Go and setup your Go workspace on
-your computer. The simplest way to install the library is to run:
+- **[Go][]**: any one of the **three latest major** [releases][go-releases].
 
+## Installation
+
+With [Go module][] support (Go 1.11+), simply add the following import
+
+```go
+import "google.golang.org/grpc"
 ```
+
+to your code, and then `go [build|run|test]` will automatically fetch the
+necessary dependencies.
+
+Otherwise, to install the `grpc-go` package, run the following command:
+
+```console
 $ go get -u google.golang.org/grpc
 ```
 
-With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
-your source code and `go [build|run|test]` will automatically download the
-necessary dependencies ([Go modules
-ref](https://github.com/golang/go/wiki/Modules)).
+> **Note:** If you are trying to access `grpc-go` from **China**, see the
+> [FAQ](#FAQ) below.
 
-If you are trying to access grpc-go from within China, please see the
-[FAQ](#FAQ) below.
+## Learn more
 
-Prerequisites
--------------
-gRPC-Go requires Go 1.9 or later.
+- [Go gRPC docs][], which include a [quick start][] and [API
+  reference][API] among other resources
+- [Low-level technical docs](Documentation) from this repository
+- [Performance benchmark][]
+- [Examples](examples)
 
-Documentation
--------------
-- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
-  descriptions.
-- Documentation on specific topics can be found in the [Documentation
-  directory](Documentation/).
-- Examples can be found in the [examples directory](examples/).
+## FAQ
 
-Performance
------------
-Performance benchmark data for grpc-go and other languages is maintained in
-[this
-dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
+### I/O Timeout Errors
 
-Status
-------
-General Availability [Google Cloud Platform Launch
-Stages](https://cloud.google.com/terms/launch-stages).
-
-FAQ
----
-
-#### I/O Timeout Errors
-
-The `golang.org` domain may be blocked from some countries.  `go get` usually
+The `golang.org` domain may be blocked from some countries. `go get` usually
 produces an error like the following when this happens:
 
-```
+```console
 $ go get -u google.golang.org/grpc
 package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
 ```
@@ -69,7 +58,7 @@
 
 - Without Go module support: `git clone` the repo manually:
 
-  ```
+  ```sh
   git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
   ```
 
@@ -79,7 +68,7 @@
 - With Go module support: it is possible to use the `replace` feature of `go
   mod` to create aliases for golang.org packages.  In your project's directory:
 
-  ```
+  ```sh
   go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
   go mod tidy
   go mod vendor
@@ -87,19 +76,17 @@
   ```
 
   Again, this will need to be done for all transitive dependencies hosted on
-  golang.org as well.  Please refer to [this
-  issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
-  this concern.
+  golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
 
-#### Compiling error, undefined: grpc.SupportPackageIsVersion
+### Compiling error, undefined: grpc.SupportPackageIsVersion
 
-##### If you are using Go modules:
+#### If you are using Go modules:
 
-Please ensure your gRPC-Go version is `require`d at the appropriate version in
+Ensure your gRPC-Go version is `require`d at the appropriate version in
 the same module containing the generated `.pb.go` files.  For example,
 `SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
 
-```
+```go
 module <your module name>
 
 require (
@@ -107,23 +94,27 @@
 )
 ```
 
-##### If you are *not* using Go modules:
+#### If you are *not* using Go modules:
 
-Please update proto package, gRPC package and rebuild the proto files:
- - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- - `go get -u google.golang.org/grpc`
- - `protoc --go_out=plugins=grpc:. *.proto`
+Update the `proto` package, gRPC package, and rebuild the `.proto` files:
 
-#### How to turn on logging
-
-The default logger is controlled by the environment variables. Turn everything
-on by setting:
-
-```
-GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
+```sh
+go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
+go get -u google.golang.org/grpc
+protoc --go_out=plugins=grpc:. *.proto
 ```
 
-#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+### How to turn on logging
+
+The default logger is controlled by environment variables. Turn everything on
+like this:
+
+```console
+$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
+$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
+```
+
+### The RPC failed with error `"code = Unavailable desc = transport is closing"`
 
 This error means the connection the RPC is using was closed, and there are many
 possible reasons, including:
@@ -139,3 +130,12 @@
 the root cause of the connection being closed is on the server side. Turn on
 logging on __both client and server__, and see if there are any transport
 errors.
+
+[API]: https://pkg.go.dev/google.golang.org/grpc
+[Go]: https://golang.org
+[Go module]: https://github.com/golang/go/wiki/Modules
+[gRPC]: https://grpc.io
+[Go gRPC docs]: https://grpc.io/docs/languages/go
+[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
+[quick start]: https://grpc.io/docs/languages/go/quickstart
+[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
new file mode 100644
index 0000000..be6e108
--- /dev/null
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -0,0 +1,3 @@
+# Security Policy
+
+For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
index 68ffc62..ae13dda 100644
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -19,52 +19,83 @@
 // Package attributes defines a generic key/value store used in various gRPC
 // components.
 //
-// All APIs in this package are EXPERIMENTAL.
+// Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package attributes
 
-import "fmt"
-
 // Attributes is an immutable struct for storing and retrieving generic
 // key/value pairs.  Keys must be hashable, and users should define their own
-// types for keys.
+// types for keys.  Values should not be modified after they are added to an
+// Attributes or if they were received from one.  If values implement 'Equal(o
+// interface{}) bool', it will be called by (*Attributes).Equal to determine
+// whether two values with the same key should be considered equal.
 type Attributes struct {
 	m map[interface{}]interface{}
 }
 
-// New returns a new Attributes containing all key/value pairs in kvs.  If the
-// same key appears multiple times, the last value overwrites all previous
-// values for that key.  Panics if len(kvs) is not even.
-func New(kvs ...interface{}) *Attributes {
-	if len(kvs)%2 != 0 {
-		panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
-	}
-	a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
-	for i := 0; i < len(kvs)/2; i++ {
-		a.m[kvs[i*2]] = kvs[i*2+1]
-	}
-	return a
+// New returns a new Attributes containing the key/value pair.
+func New(key, value interface{}) *Attributes {
+	return &Attributes{m: map[interface{}]interface{}{key: value}}
 }
 
-// WithValues returns a new Attributes containing all key/value pairs in a and
-// kvs.  Panics if len(kvs) is not even.  If the same key appears multiple
-// times, the last value overwrites all previous values for that key.  To
-// remove an existing key, use a nil value.
-func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
-	if len(kvs)%2 != 0 {
-		panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+// WithValue returns a new Attributes containing the previous keys and values
+// and the new key/value pair.  If the same key appears multiple times, the
+// last value overwrites all previous values for that key.  To remove an
+// existing key, use a nil value.  value should not be modified later.
+func (a *Attributes) WithValue(key, value interface{}) *Attributes {
+	if a == nil {
+		return New(key, value)
 	}
-	n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
+	n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
 	for k, v := range a.m {
 		n.m[k] = v
 	}
-	for i := 0; i < len(kvs)/2; i++ {
-		n.m[kvs[i*2]] = kvs[i*2+1]
-	}
+	n.m[key] = value
 	return n
 }
 
 // Value returns the value associated with these attributes for key, or nil if
-// no value is associated with key.
+// no value is associated with key.  The returned value should not be modified.
 func (a *Attributes) Value(key interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
 	return a.m[key]
 }
+
+// Equal returns whether a and o are equivalent.  If 'Equal(o interface{})
+// bool' is implemented for a value in the attributes, it is called to
+// determine if the value matches the one stored in the other attributes.  If
+// Equal is not implemented, standard equality is used to determine if the two
+// values are equal. Note that some types (e.g. maps) aren't comparable by
+// default, so they must be wrapped in a struct, or in an alias type, with Equal
+// defined.
+func (a *Attributes) Equal(o *Attributes) bool {
+	if a == nil && o == nil {
+		return true
+	}
+	if a == nil || o == nil {
+		return false
+	}
+	if len(a.m) != len(o.m) {
+		return false
+	}
+	for k, v := range a.m {
+		ov, ok := o.m[k]
+		if !ok {
+			// o missing element of a
+			return false
+		}
+		if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
+			if !eq.Equal(ov) {
+				return false
+			}
+		} else if v != ov {
+			// Fallback to a standard equality check if Value is unimplemented.
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index ff7c3ee..542594f 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -48,7 +48,10 @@
 // here for more details:
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ConnectParams struct {
 	// Backoff specifies the configuration options for connection backoff.
 	Backoff backoff.Config
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
deleted file mode 100644
index a8eb0f4..0000000
--- a/vendor/google.golang.org/grpc/balancer.go
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"context"
-	"net"
-	"sync"
-
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/naming"
-	"google.golang.org/grpc/status"
-)
-
-// Address represents a server the client connects to.
-//
-// Deprecated: please use package balancer.
-type Address struct {
-	// Addr is the server address on which a connection will be established.
-	Addr string
-	// Metadata is the information associated with Addr, which may be used
-	// to make load balancing decision.
-	Metadata interface{}
-}
-
-// BalancerConfig specifies the configurations for Balancer.
-//
-// Deprecated: please use package balancer.  May be removed in a future 1.x release.
-type BalancerConfig struct {
-	// DialCreds is the transport credential the Balancer implementation can
-	// use to dial to a remote load balancer server. The Balancer implementations
-	// can ignore this if it does not need to talk to another party securely.
-	DialCreds credentials.TransportCredentials
-	// Dialer is the custom dialer the Balancer implementation can use to dial
-	// to a remote load balancer server. The Balancer implementations
-	// can ignore this if it doesn't need to talk to remote balancer.
-	Dialer func(context.Context, string) (net.Conn, error)
-}
-
-// BalancerGetOptions configures a Get call.
-//
-// Deprecated: please use package balancer.  May be removed in a future 1.x release.
-type BalancerGetOptions struct {
-	// BlockingWait specifies whether Get should block when there is no
-	// connected address.
-	BlockingWait bool
-}
-
-// Balancer chooses network addresses for RPCs.
-//
-// Deprecated: please use package balancer.  May be removed in a future 1.x release.
-type Balancer interface {
-	// Start does the initialization work to bootstrap a Balancer. For example,
-	// this function may start the name resolution and watch the updates. It will
-	// be called when dialing.
-	Start(target string, config BalancerConfig) error
-	// Up informs the Balancer that gRPC has a connection to the server at
-	// addr. It returns down which is called once the connection to addr gets
-	// lost or closed.
-	// TODO: It is not clear how to construct and take advantage of the meaningful error
-	// parameter for down. Need realistic demands to guide.
-	Up(addr Address) (down func(error))
-	// Get gets the address of a server for the RPC corresponding to ctx.
-	// i) If it returns a connected address, gRPC internals issues the RPC on the
-	// connection to this address;
-	// ii) If it returns an address on which the connection is under construction
-	// (initiated by Notify(...)) but not connected, gRPC internals
-	//  * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
-	//  Shutdown state;
-	//  or
-	//  * issues RPC on the connection otherwise.
-	// iii) If it returns an address on which the connection does not exist, gRPC
-	// internals treats it as an error and will fail the corresponding RPC.
-	//
-	// Therefore, the following is the recommended rule when writing a custom Balancer.
-	// If opts.BlockingWait is true, it should return a connected address or
-	// block if there is no connected address. It should respect the timeout or
-	// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
-	// RPCs), it should return an address it has notified via Notify(...) immediately
-	// instead of blocking.
-	//
-	// The function returns put which is called once the rpc has completed or failed.
-	// put can collect and report RPC stats to a remote load balancer.
-	//
-	// This function should only return the errors Balancer cannot recover by itself.
-	// gRPC internals will fail the RPC if an error is returned.
-	Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
-	// Notify returns a channel that is used by gRPC internals to watch the addresses
-	// gRPC needs to connect. The addresses might be from a name resolver or remote
-	// load balancer. gRPC internals will compare it with the existing connected
-	// addresses. If the address Balancer notified is not in the existing connected
-	// addresses, gRPC starts to connect the address. If an address in the existing
-	// connected addresses is not in the notification list, the corresponding connection
-	// is shutdown gracefully. Otherwise, there are no operations to take. Note that
-	// the Address slice must be the full list of the Addresses which should be connected.
-	// It is NOT delta.
-	Notify() <-chan []Address
-	// Close shuts down the balancer.
-	Close() error
-}
-
-// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
-// the name resolution updates and updates the addresses available correspondingly.
-//
-// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
-func RoundRobin(r naming.Resolver) Balancer {
-	return &roundRobin{r: r}
-}
-
-type addrInfo struct {
-	addr      Address
-	connected bool
-}
-
-type roundRobin struct {
-	r      naming.Resolver
-	w      naming.Watcher
-	addrs  []*addrInfo // all the addresses the client should potentially connect
-	mu     sync.Mutex
-	addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
-	next   int            // index of the next address to return for Get()
-	waitCh chan struct{}  // the channel to block when there is no connected address available
-	done   bool           // The Balancer is closed.
-}
-
-func (rr *roundRobin) watchAddrUpdates() error {
-	updates, err := rr.w.Next()
-	if err != nil {
-		grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
-		return err
-	}
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	for _, update := range updates {
-		addr := Address{
-			Addr:     update.Addr,
-			Metadata: update.Metadata,
-		}
-		switch update.Op {
-		case naming.Add:
-			var exist bool
-			for _, v := range rr.addrs {
-				if addr == v.addr {
-					exist = true
-					grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
-					break
-				}
-			}
-			if exist {
-				continue
-			}
-			rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
-		case naming.Delete:
-			for i, v := range rr.addrs {
-				if addr == v.addr {
-					copy(rr.addrs[i:], rr.addrs[i+1:])
-					rr.addrs = rr.addrs[:len(rr.addrs)-1]
-					break
-				}
-			}
-		default:
-			grpclog.Errorln("Unknown update.Op ", update.Op)
-		}
-	}
-	// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
-	open := make([]Address, len(rr.addrs))
-	for i, v := range rr.addrs {
-		open[i] = v.addr
-	}
-	if rr.done {
-		return ErrClientConnClosing
-	}
-	select {
-	case <-rr.addrCh:
-	default:
-	}
-	rr.addrCh <- open
-	return nil
-}
-
-func (rr *roundRobin) Start(target string, config BalancerConfig) error {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	if rr.done {
-		return ErrClientConnClosing
-	}
-	if rr.r == nil {
-		// If there is no name resolver installed, it is not needed to
-		// do name resolution. In this case, target is added into rr.addrs
-		// as the only address available and rr.addrCh stays nil.
-		rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
-		return nil
-	}
-	w, err := rr.r.Resolve(target)
-	if err != nil {
-		return err
-	}
-	rr.w = w
-	rr.addrCh = make(chan []Address, 1)
-	go func() {
-		for {
-			if err := rr.watchAddrUpdates(); err != nil {
-				return
-			}
-		}
-	}()
-	return nil
-}
-
-// Up sets the connected state of addr and sends notification if there are pending
-// Get() calls.
-func (rr *roundRobin) Up(addr Address) func(error) {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	var cnt int
-	for _, a := range rr.addrs {
-		if a.addr == addr {
-			if a.connected {
-				return nil
-			}
-			a.connected = true
-		}
-		if a.connected {
-			cnt++
-		}
-	}
-	// addr is only one which is connected. Notify the Get() callers who are blocking.
-	if cnt == 1 && rr.waitCh != nil {
-		close(rr.waitCh)
-		rr.waitCh = nil
-	}
-	return func(err error) {
-		rr.down(addr, err)
-	}
-}
-
-// down unsets the connected state of addr.
-func (rr *roundRobin) down(addr Address, err error) {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	for _, a := range rr.addrs {
-		if addr == a.addr {
-			a.connected = false
-			break
-		}
-	}
-}
-
-// Get returns the next addr in the rotation.
-func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
-	var ch chan struct{}
-	rr.mu.Lock()
-	if rr.done {
-		rr.mu.Unlock()
-		err = ErrClientConnClosing
-		return
-	}
-
-	if len(rr.addrs) > 0 {
-		if rr.next >= len(rr.addrs) {
-			rr.next = 0
-		}
-		next := rr.next
-		for {
-			a := rr.addrs[next]
-			next = (next + 1) % len(rr.addrs)
-			if a.connected {
-				addr = a.addr
-				rr.next = next
-				rr.mu.Unlock()
-				return
-			}
-			if next == rr.next {
-				// Has iterated all the possible address but none is connected.
-				break
-			}
-		}
-	}
-	if !opts.BlockingWait {
-		if len(rr.addrs) == 0 {
-			rr.mu.Unlock()
-			err = status.Errorf(codes.Unavailable, "there is no address available")
-			return
-		}
-		// Returns the next addr on rr.addrs for failfast RPCs.
-		addr = rr.addrs[rr.next].addr
-		rr.next++
-		rr.mu.Unlock()
-		return
-	}
-	// Wait on rr.waitCh for non-failfast RPCs.
-	if rr.waitCh == nil {
-		ch = make(chan struct{})
-		rr.waitCh = ch
-	} else {
-		ch = rr.waitCh
-	}
-	rr.mu.Unlock()
-	for {
-		select {
-		case <-ctx.Done():
-			err = ctx.Err()
-			return
-		case <-ch:
-			rr.mu.Lock()
-			if rr.done {
-				rr.mu.Unlock()
-				err = ErrClientConnClosing
-				return
-			}
-
-			if len(rr.addrs) > 0 {
-				if rr.next >= len(rr.addrs) {
-					rr.next = 0
-				}
-				next := rr.next
-				for {
-					a := rr.addrs[next]
-					next = (next + 1) % len(rr.addrs)
-					if a.connected {
-						addr = a.addr
-						rr.next = next
-						rr.mu.Unlock()
-						return
-					}
-					if next == rr.next {
-						// Has iterated all the possible address but none is connected.
-						break
-					}
-				}
-			}
-			// The newly added addr got removed by Down() again.
-			if rr.waitCh == nil {
-				ch = make(chan struct{})
-				rr.waitCh = ch
-			} else {
-				ch = rr.waitCh
-			}
-			rr.mu.Unlock()
-		}
-	}
-}
-
-func (rr *roundRobin) Notify() <-chan []Address {
-	return rr.addrCh
-}
-
-func (rr *roundRobin) Close() error {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	if rr.done {
-		return errBalancerClosed
-	}
-	rr.done = true
-	if rr.w != nil {
-		rr.w.Close()
-	}
-	if rr.waitCh != nil {
-		close(rr.waitCh)
-		rr.waitCh = nil
-	}
-	if rr.addrCh != nil {
-		close(rr.addrCh)
-	}
-	return nil
-}
-
-// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
-// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
-// returns the only address Up by resetTransport().
-type pickFirst struct {
-	*roundRobin
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 9258858..bcc6f54 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -75,24 +75,26 @@
 	return nil
 }
 
-// SubConn represents a gRPC sub connection.
-// Each sub connection contains a list of addresses. gRPC will
-// try to connect to them (in sequence), and stop trying the
-// remainder once one connection is successful.
+// A SubConn represents a single connection to a gRPC backend service.
 //
-// The reconnect backoff will be applied on the list, not a single address.
-// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
+// Each SubConn contains a list of addresses.
 //
-// All SubConns start in IDLE, and will not try to connect. To trigger
-// the connecting, Balancers must call Connect.
-// When the connection encounters an error, it will reconnect immediately.
-// When the connection becomes IDLE, it will not reconnect unless Connect is
-// called.
+// All SubConns start in IDLE, and will not try to connect. To trigger the
+// connecting, Balancers must call Connect.  If a connection re-enters IDLE,
+// Balancers must call Connect again to trigger a new connection attempt.
 //
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
+// gRPC will try to connect to the addresses in sequence, and stop trying the
+// remainder once the first connection is successful. If an attempt to connect
+// to all addresses encounters an error, the SubConn will enter
+// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
+//
+// Once established, if a connection is lost, the SubConn will transition
+// directly to IDLE.
+//
+// This interface is to be implemented by gRPC. Users should not need their own
+// implementation of this interface. For situations like testing, any
+// implementations should embed this interface. This allows gRPC to add new
+// methods to this interface.
 type SubConn interface {
 	// UpdateAddresses updates the addresses used in this SubConn.
 	// gRPC checks if currently-connected address is still in the new list.
@@ -101,6 +103,9 @@
 	// a new connection will be created.
 	//
 	// This will trigger a state transition for the SubConn.
+	//
+	// Deprecated: This method is now part of the ClientConn interface and will
+	// eventually be removed from here.
 	UpdateAddresses([]resolver.Address)
 	// Connect starts the connecting for this SubConn.
 	Connect()
@@ -111,6 +116,9 @@
 	// CredsBundle is the credentials bundle that will be used in the created
 	// SubConn. If it's nil, the original creds from grpc DialOptions will be
 	// used.
+	//
+	// Deprecated: Use the Attributes field in resolver.Address to pass
+	// arbitrary data to the credential handshaker.
 	CredsBundle credentials.Bundle
 	// HealthCheckEnabled indicates whether health check service should be
 	// enabled on this SubConn
@@ -123,7 +131,7 @@
 	// determine the state of the ClientConn.
 	ConnectivityState connectivity.State
 	// Picker is used to choose connections (SubConns) for RPCs.
-	Picker V2Picker
+	Picker Picker
 }
 
 // ClientConn represents a gRPC ClientConn.
@@ -140,21 +148,19 @@
 	// RemoveSubConn removes the SubConn from ClientConn.
 	// The SubConn will be shutdown.
 	RemoveSubConn(SubConn)
-
-	// UpdateBalancerState is called by balancer to notify gRPC that some internal
-	// state in balancer has changed.
+	// UpdateAddresses updates the addresses used in the passed in SubConn.
+	// gRPC checks if the currently connected address is still in the new list.
+	// If so, the connection will be kept. Else, the connection will be
+	// gracefully closed, and a new connection will be created.
 	//
-	// gRPC will update the connectivity state of the ClientConn, and will call pick
-	// on the new picker to pick new SubConn.
-	//
-	// Deprecated: use UpdateState instead
-	UpdateBalancerState(s connectivity.State, p Picker)
+	// This will trigger a state transition for the SubConn.
+	UpdateAddresses(SubConn, []resolver.Address)
 
 	// UpdateState notifies gRPC that the balancer's internal state has
 	// changed.
 	//
-	// gRPC will update the connectivity state of the ClientConn, and will call pick
-	// on the new picker to pick new SubConns.
+	// gRPC will update the connectivity state of the ClientConn, and will call
+	// Pick on the new Picker to pick new SubConns.
 	UpdateState(State)
 
 	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
@@ -168,21 +174,32 @@
 
 // BuildOptions contains additional information for Build.
 type BuildOptions struct {
-	// DialCreds is the transport credential the Balancer implementation can
-	// use to dial to a remote load balancer server. The Balancer implementations
-	// can ignore this if it does not need to talk to another party securely.
+	// DialCreds is the transport credentials to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
 	DialCreds credentials.TransportCredentials
-	// CredsBundle is the credentials bundle that the Balancer can use.
+	// CredsBundle is the credentials bundle to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
 	CredsBundle credentials.Bundle
-	// Dialer is the custom dialer the Balancer implementation can use to dial
-	// to a remote load balancer server. The Balancer implementations
-	// can ignore this if it doesn't need to talk to remote balancer.
+	// Dialer is the custom dialer to use when communicating with a remote load
+	// balancer server. Balancer implementations which do not communicate with a
+	// remote load balancer server can ignore this field.
 	Dialer func(context.Context, string) (net.Conn, error)
-	// ChannelzParentID is the entity parent's channelz unique identification number.
+	// Authority is the server name to use as part of the authentication
+	// handshake when communicating with a remote load balancer server. Balancer
+	// implementations which do not communicate with a remote load balancer
+	// server can ignore this field.
+	Authority string
+	// ChannelzParentID is the parent ClientConn's channelz ID.
 	ChannelzParentID int64
-	// Target contains the parsed address info of the dial target. It is the same resolver.Target as
-	// passed to the resolver.
-	// See the documentation for the resolver.Target type for details about what it contains.
+	// CustomUserAgent is the custom user agent set on the parent ClientConn.
+	// The balancer should set the same custom user agent if it creates a
+	// ClientConn.
+	CustomUserAgent string
+	// Target contains the parsed address info of the dial target. It is the
+	// same resolver.Target as passed to the resolver. See the documentation for
+	// the resolver.Target type for details about what it contains.
 	Target resolver.Target
 }
 
@@ -232,56 +249,17 @@
 
 var (
 	// ErrNoSubConnAvailable indicates no SubConn is available for pick().
-	// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
+	// gRPC will block the RPC until a new picker is available via UpdateState().
 	ErrNoSubConnAvailable = errors.New("no SubConn is available")
 	// ErrTransientFailure indicates all SubConns are in TransientFailure.
 	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
-	ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
+	//
+	// Deprecated: return an appropriate error based on the last resolution or
+	// connection attempt instead.  The behavior is the same for any non-gRPC
+	// status error.
+	ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
 )
 
-// Picker is used by gRPC to pick a SubConn to send an RPC.
-// Balancer is expected to generate a new picker from its snapshot every time its
-// internal state has changed.
-//
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
-//
-// Deprecated: use V2Picker instead
-type Picker interface {
-	// Pick returns the SubConn to be used to send the RPC.
-	// The returned SubConn must be one returned by NewSubConn().
-	//
-	// This functions is expected to return:
-	// - a SubConn that is known to be READY;
-	// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
-	//   made (for example, some SubConn is in CONNECTING mode);
-	// - other errors if no active connecting is happening (for example, all SubConn
-	//   are in TRANSIENT_FAILURE mode).
-	//
-	// If a SubConn is returned:
-	// - If it is READY, gRPC will send the RPC on it;
-	// - If it is not ready, or becomes not ready after it's returned, gRPC will
-	//   block until UpdateBalancerState() is called and will call pick on the
-	//   new picker. The done function returned from Pick(), if not nil, will be
-	//   called with nil error, no bytes sent and no bytes received.
-	//
-	// If the returned error is not nil:
-	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
-	// - If the error is ErrTransientFailure or implements IsTransientFailure()
-	//   bool, returning true:
-	//   - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
-	//     is called to pick again;
-	//   - Otherwise, RPC will fail with unavailable error.
-	// - Else (error is other non-nil error):
-	//   - The RPC will fail with the error's status code, or Unknown if it is
-	//     not a status error.
-	//
-	// The returned done() function will be called once the rpc has finished,
-	// with the final status of that RPC.  If the SubConn returned is not a
-	// valid SubConn type, done may not be called.  done may be nil if balancer
-	// doesn't care about the RPC status.
-	Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
-}
-
 // PickResult contains information related to a connection chosen for an RPC.
 type PickResult struct {
 	// SubConn is the connection to use for this pick, if its state is Ready.
@@ -297,24 +275,19 @@
 	Done func(DoneInfo)
 }
 
-type transientFailureError struct {
-	error
-}
+// TransientFailureError returns e.  It exists for backward compatibility and
+// will be deleted soon.
+//
+// Deprecated: no longer necessary, picker errors are treated this way by
+// default.
+func TransientFailureError(e error) error { return e }
 
-func (e *transientFailureError) IsTransientFailure() bool { return true }
-
-// TransientFailureError wraps err in an error implementing
-// IsTransientFailure() bool, returning true.
-func TransientFailureError(err error) error {
-	return &transientFailureError{error: err}
-}
-
-// V2Picker is used by gRPC to pick a SubConn to send an RPC.
+// Picker is used by gRPC to pick a SubConn to send an RPC.
 // Balancer is expected to generate a new picker from its snapshot every time its
 // internal state has changed.
 //
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
-type V2Picker interface {
+// The pickers used by gRPC can be updated by ClientConn.UpdateState().
+type Picker interface {
 	// Pick returns the connection to use for this RPC and related information.
 	//
 	// Pick should not block.  If the balancer needs to do I/O or any blocking
@@ -327,14 +300,13 @@
 	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
 	//   Picker is provided by the balancer (using ClientConn.UpdateState).
 	//
-	// - If the error implements IsTransientFailure() bool, returning true,
-	//   wait for ready RPCs will wait, but non-wait for ready RPCs will be
-	//   terminated with this error's Error() string and status code
-	//   Unavailable.
+	// - If the error is a status error (implemented by the grpc/status
+	//   package), gRPC will terminate the RPC with the code and message
+	//   provided.
 	//
-	// - Any other errors terminate all RPCs with the code and message
-	//   provided.  If the error is not a status error, it will be converted by
-	//   gRPC to a status error with code Unknown.
+	// - For all other errors, wait for ready RPCs will wait, but non-wait for
+	//   ready RPCs will be terminated with this error's Error() string and
+	//   status code Unavailable.
 	Pick(info PickInfo) (PickResult, error)
 }
 
@@ -343,34 +315,40 @@
 //
 // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
 //
-// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
-// to be called synchronously from the same goroutine.
-// There's no guarantee on picker.Pick, it may be called anytime.
+// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
+// guaranteed to be called synchronously from the same goroutine.  There's no
+// guarantee on picker.Pick, it may be called anytime.
 type Balancer interface {
-	// HandleSubConnStateChange is called by gRPC when the connectivity state
-	// of sc has changed.
-	// Balancer is expected to aggregate all the state of SubConn and report
-	// that back to gRPC.
-	// Balancer should also generate and update Pickers when its internal state has
-	// been changed by the new state.
-	//
-	// Deprecated: if V2Balancer is implemented by the Balancer,
-	// UpdateSubConnState will be called instead.
-	HandleSubConnStateChange(sc SubConn, state connectivity.State)
-	// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
-	// balancers.
-	// Balancer can create new SubConn or remove SubConn with the addresses.
-	// An empty address slice and a non-nil error will be passed if the resolver returns
-	// non-nil error to gRPC.
-	//
-	// Deprecated: if V2Balancer is implemented by the Balancer,
-	// UpdateClientConnState will be called instead.
-	HandleResolvedAddrs([]resolver.Address, error)
+	// UpdateClientConnState is called by gRPC when the state of the ClientConn
+	// changes.  If the error returned is ErrBadResolverState, the ClientConn
+	// will begin calling ResolveNow on the active name resolver with
+	// exponential backoff until a subsequent call to UpdateClientConnState
+	// returns a nil error.  Any other errors are currently ignored.
+	UpdateClientConnState(ClientConnState) error
+	// ResolverError is called by gRPC when the name resolver reports an error.
+	ResolverError(error)
+	// UpdateSubConnState is called by gRPC when the state of a SubConn
+	// changes.
+	UpdateSubConnState(SubConn, SubConnState)
 	// Close closes the balancer. The balancer is not required to call
 	// ClientConn.RemoveSubConn for its existing SubConns.
 	Close()
 }
 
+// ExitIdler is an optional interface for balancers to implement.  If
+// implemented, ExitIdle will be called when ClientConn.Connect is called, if
+// the ClientConn is idle.  If unimplemented, ClientConn.Connect will cause
+// all SubConns to connect.
+//
+// Notice: it will be required for all balancers to implement this in a future
+// release.
+type ExitIdler interface {
+	// ExitIdle instructs the LB policy to reconnect to backends / exit the
+	// IDLE state, if appropriate and possible.  Note that SubConns that enter
+	// the IDLE state will not reconnect until SubConn.Connect is called.
+	ExitIdle()
+}
+
 // SubConnState describes the state of a SubConn.
 type SubConnState struct {
 	// ConnectivityState is the connectivity state of the SubConn.
@@ -393,34 +371,15 @@
 // problem with the provided name resolver data.
 var ErrBadResolverState = errors.New("bad resolver state")
 
-// V2Balancer is defined for documentation purposes.  If a Balancer also
-// implements V2Balancer, its UpdateClientConnState method will be called
-// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
-// instead of HandleSubConnStateChange.
-type V2Balancer interface {
-	// UpdateClientConnState is called by gRPC when the state of the ClientConn
-	// changes.  If the error returned is ErrBadResolverState, the ClientConn
-	// will begin calling ResolveNow on the active name resolver with
-	// exponential backoff until a subsequent call to UpdateClientConnState
-	// returns a nil error.  Any other errors are currently ignored.
-	UpdateClientConnState(ClientConnState) error
-	// ResolverError is called by gRPC when the name resolver reports an error.
-	ResolverError(error)
-	// UpdateSubConnState is called by gRPC when the state of a SubConn
-	// changes.
-	UpdateSubConnState(SubConn, SubConnState)
-	// Close closes the balancer. The balancer is not required to call
-	// ClientConn.RemoveSubConn for its existing SubConns.
-	Close()
-}
-
 // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
 // and returns one aggregated connectivity state.
 //
 // It's not thread safe.
 type ConnectivityStateEvaluator struct {
-	numReady      uint64 // Number of addrConns in ready state.
-	numConnecting uint64 // Number of addrConns in connecting state.
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transient failure state.
+	numIdle             uint64 // Number of addrConns in idle state.
 }
 
 // RecordTransition records state change happening in subConn and based on that
@@ -428,9 +387,11 @@
 //
 //  - If at least one SubConn in Ready, the aggregated state is Ready;
 //  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-//  - Else the aggregated state is TransientFailure.
+//  - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure;
+//  - Else if at least one SubConn is Idle, the aggregated state is Idle;
+//  - Else there are no subconns and the aggregated state is Transient Failure
 //
-// Idle and Shutdown are not considered.
+// Shutdown is not considered.
 func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
 	// Update counters.
 	for idx, state := range []connectivity.State{oldState, newState} {
@@ -440,6 +401,10 @@
 			cse.numReady += updateVal
 		case connectivity.Connecting:
 			cse.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			cse.numTransientFailure += updateVal
+		case connectivity.Idle:
+			cse.numIdle += updateVal
 		}
 	}
 
@@ -450,5 +415,11 @@
 	if cse.numConnecting > 0 {
 		return connectivity.Connecting
 	}
+	if cse.numTransientFailure > 0 {
+		return connectivity.TransientFailure
+	}
+	if cse.numIdle > 0 {
+		return connectivity.Idle
+	}
 	return connectivity.TransientFailure
 }
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 80559b8..a67074a 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -19,7 +19,6 @@
 package base
 
 import (
-	"context"
 	"errors"
 	"fmt"
 
@@ -29,20 +28,20 @@
 	"google.golang.org/grpc/resolver"
 )
 
+var logger = grpclog.Component("balancer")
+
 type baseBuilder struct {
-	name            string
-	pickerBuilder   PickerBuilder
-	v2PickerBuilder V2PickerBuilder
-	config          Config
+	name          string
+	pickerBuilder PickerBuilder
+	config        Config
 }
 
 func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
 	bal := &baseBalancer{
-		cc:              cc,
-		pickerBuilder:   bb.pickerBuilder,
-		v2PickerBuilder: bb.v2PickerBuilder,
+		cc:            cc,
+		pickerBuilder: bb.pickerBuilder,
 
-		subConns: make(map[resolver.Address]balancer.SubConn),
+		subConns: resolver.NewAddressMap(),
 		scStates: make(map[balancer.SubConn]connectivity.State),
 		csEvltr:  &balancer.ConnectivityStateEvaluator{},
 		config:   bb.config,
@@ -50,11 +49,7 @@
 	// Initialize picker to a picker that always returns
 	// ErrNoSubConnAvailable, because when state of a SubConn changes, we
 	// may call UpdateState with this picker.
-	if bb.pickerBuilder != nil {
-		bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
-	} else {
-		bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
-	}
+	bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
 	return bal
 }
 
@@ -62,82 +57,73 @@
 	return bb.name
 }
 
-var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
-
 type baseBalancer struct {
-	cc              balancer.ClientConn
-	pickerBuilder   PickerBuilder
-	v2PickerBuilder V2PickerBuilder
+	cc            balancer.ClientConn
+	pickerBuilder PickerBuilder
 
 	csEvltr *balancer.ConnectivityStateEvaluator
 	state   connectivity.State
 
-	subConns map[resolver.Address]balancer.SubConn
+	subConns *resolver.AddressMap
 	scStates map[balancer.SubConn]connectivity.State
 	picker   balancer.Picker
-	v2Picker balancer.V2Picker
 	config   Config
 
 	resolverErr error // the last error reported by the resolver; cleared on successful resolution
 	connErr     error // the last connection error; cleared upon leaving TransientFailure
 }
 
-func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	panic("not implemented")
-}
-
 func (b *baseBalancer) ResolverError(err error) {
 	b.resolverErr = err
-	if len(b.subConns) == 0 {
+	if b.subConns.Len() == 0 {
 		b.state = connectivity.TransientFailure
 	}
+
 	if b.state != connectivity.TransientFailure {
 		// The picker will not change since the balancer does not currently
 		// report an error.
 		return
 	}
 	b.regeneratePicker()
-	if b.picker != nil {
-		b.cc.UpdateBalancerState(b.state, b.picker)
-	} else {
-		b.cc.UpdateState(balancer.State{
-			ConnectivityState: b.state,
-			Picker:            b.v2Picker,
-		})
-	}
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: b.state,
+		Picker:            b.picker,
+	})
 }
 
 func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
-	// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
 	// TODO: handle s.ResolverState.ServiceConfig?
-	if grpclog.V(2) {
-		grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+	if logger.V(2) {
+		logger.Info("base.baseBalancer: got new ClientConn state: ", s)
 	}
 	// Successful resolution; clear resolver error and ensure we return nil.
 	b.resolverErr = nil
 	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
-	addrsSet := make(map[resolver.Address]struct{})
+	addrsSet := resolver.NewAddressMap()
 	for _, a := range s.ResolverState.Addresses {
-		addrsSet[a] = struct{}{}
-		if _, ok := b.subConns[a]; !ok {
+		addrsSet.Set(a, nil)
+		if _, ok := b.subConns.Get(a); !ok {
 			// a is a new address (not existing in b.subConns).
 			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
 			if err != nil {
-				grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+				logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
 				continue
 			}
-			b.subConns[a] = sc
+			b.subConns.Set(a, sc)
 			b.scStates[sc] = connectivity.Idle
+			b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
 			sc.Connect()
 		}
 	}
-	for a, sc := range b.subConns {
+	for _, a := range b.subConns.Keys() {
+		sci, _ := b.subConns.Get(a)
+		sc := sci.(balancer.SubConn)
 		// a was removed by resolver.
-		if _, ok := addrsSet[a]; !ok {
+		if _, ok := addrsSet.Get(a); !ok {
 			b.cc.RemoveSubConn(sc)
-			delete(b.subConns, a)
+			b.subConns.Delete(a)
 			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
+			// The entry will be deleted in UpdateSubConnState.
 		}
 	}
 	// If resolver state contains no addresses, return an error so ClientConn
@@ -171,56 +157,42 @@
 //  - built by the pickerBuilder with all READY SubConns otherwise.
 func (b *baseBalancer) regeneratePicker() {
 	if b.state == connectivity.TransientFailure {
-		if b.pickerBuilder != nil {
-			b.picker = NewErrPicker(balancer.ErrTransientFailure)
-		} else {
-			b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
-		}
+		b.picker = NewErrPicker(b.mergeErrors())
 		return
 	}
-	if b.pickerBuilder != nil {
-		readySCs := make(map[resolver.Address]balancer.SubConn)
+	readySCs := make(map[balancer.SubConn]SubConnInfo)
 
-		// Filter out all ready SCs from full subConn map.
-		for addr, sc := range b.subConns {
-			if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
-				readySCs[addr] = sc
-			}
+	// Filter out all ready SCs from full subConn map.
+	for _, addr := range b.subConns.Keys() {
+		sci, _ := b.subConns.Get(addr)
+		sc := sci.(balancer.SubConn)
+		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+			readySCs[sc] = SubConnInfo{Address: addr}
 		}
-		b.picker = b.pickerBuilder.Build(readySCs)
-	} else {
-		readySCs := make(map[balancer.SubConn]SubConnInfo)
-
-		// Filter out all ready SCs from full subConn map.
-		for addr, sc := range b.subConns {
-			if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
-				readySCs[sc] = SubConnInfo{Address: addr}
-			}
-		}
-		b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
 	}
-}
-
-func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	panic("not implemented")
+	b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
 }
 
 func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
 	s := state.ConnectivityState
-	if grpclog.V(2) {
-		grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	if logger.V(2) {
+		logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
 	}
 	oldS, ok := b.scStates[sc]
 	if !ok {
-		if grpclog.V(2) {
-			grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		if logger.V(2) {
+			logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
 		}
 		return
 	}
-	if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
-		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
+	if oldS == connectivity.TransientFailure &&
+		(s == connectivity.Connecting || s == connectivity.Idle) {
+		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
 		// CONNECTING transitions to prevent the aggregated state from being
 		// always CONNECTING when many backends exist but are all down.
+		if s == connectivity.Idle {
+			sc.Connect()
+		}
 		return
 	}
 	b.scStates[sc] = s
@@ -246,12 +218,7 @@
 		b.state == connectivity.TransientFailure {
 		b.regeneratePicker()
 	}
-
-	if b.picker != nil {
-		b.cc.UpdateBalancerState(b.state, b.picker)
-	} else {
-		b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
-	}
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
 }
 
 // Close is a nop because base balancer doesn't have internal state to clean up,
@@ -259,28 +226,25 @@
 func (b *baseBalancer) Close() {
 }
 
-// NewErrPicker returns a picker that always returns err on Pick().
+// ExitIdle is a nop because the base balancer attempts to stay connected to
+// all SubConns at all times.
+func (b *baseBalancer) ExitIdle() {
+}
+
+// NewErrPicker returns a Picker that always returns err on Pick().
 func NewErrPicker(err error) balancer.Picker {
 	return &errPicker{err: err}
 }
 
+// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
+//
+// Deprecated: use NewErrPicker instead.
+var NewErrPickerV2 = NewErrPicker
+
 type errPicker struct {
 	err error // Pick() always returns this err.
 }
 
-func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, p.err
-}
-
-// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
-func NewErrPickerV2(err error) balancer.V2Picker {
-	return &errPickerV2{err: err}
-}
-
-type errPickerV2 struct {
-	err error // Pick() always returns this err.
-}
-
-func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
 	return balancer.PickResult{}, p.err
 }
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 4192918..e31d76e 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -37,15 +37,8 @@
 
 // PickerBuilder creates balancer.Picker.
 type PickerBuilder interface {
-	// Build takes a slice of ready SubConns, and returns a picker that will be
-	// used by gRPC to pick a SubConn.
-	Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
-}
-
-// V2PickerBuilder creates balancer.V2Picker.
-type V2PickerBuilder interface {
 	// Build returns a picker that will be used by gRPC to pick a SubConn.
-	Build(info PickerBuildInfo) balancer.V2Picker
+	Build(info PickerBuildInfo) balancer.Picker
 }
 
 // PickerBuildInfo contains information needed by the picker builder to
@@ -62,32 +55,17 @@
 	Address resolver.Address // the address used to create this SubConn
 }
 
-// NewBalancerBuilder returns a balancer builder. The balancers
-// built by this builder will use the picker builder to build pickers.
-func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
-	return NewBalancerBuilderWithConfig(name, pb, Config{})
-}
-
 // Config contains the config info about the base balancer builder.
 type Config struct {
 	// HealthCheck indicates whether health checking should be enabled for this specific balancer.
 	HealthCheck bool
 }
 
-// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
-func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
+// NewBalancerBuilder returns a base balancer builder configured by the provided config.
+func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
 	return &baseBuilder{
 		name:          name,
 		pickerBuilder: pb,
 		config:        config,
 	}
 }
-
-// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
-func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
-	return &baseBuilder{
-		name:            name,
-		v2PickerBuilder: pb,
-		config:          config,
-	}
-}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
new file mode 100644
index 0000000..4ecfa1c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package state declares grpclb types to be set by resolvers wishing to pass
+// information to grpclb via resolver.State Attributes.
+package state
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.grpclb.state")
+
+// State contains gRPCLB-relevant data passed from the name resolver.
+type State struct {
+	// BalancerAddresses contains the remote load balancer address(es).  If
+	// set, overrides any resolver-provided addresses with Type of GRPCLB.
+	BalancerAddresses []resolver.Address
+}
+
+// Set returns a copy of the provided state with attributes containing s.  s's
+// data should not be mutated after calling Set.
+func Set(state resolver.State, s *State) resolver.State {
+	state.Attributes = state.Attributes.WithValue(key, s)
+	return state
+}
+
+// Get returns the grpclb State in the resolver.State, or nil if not present.
+// The returned data should not be mutated.
+func Get(state resolver.State) *State {
+	s, _ := state.Attributes.Value(key).(*State)
+	return s
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index d4d6455..274eb2f 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -33,9 +33,11 @@
 // Name is the name of round_robin balancer.
 const Name = "round_robin"
 
+var logger = grpclog.Component("roundrobin")
+
 // newBuilder creates a new roundrobin balancer builder.
 func newBuilder() balancer.Builder {
-	return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+	return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
 }
 
 func init() {
@@ -44,12 +46,12 @@
 
 type rrPickerBuilder struct{}
 
-func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
-	grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
+	logger.Infof("roundrobinPicker: Build called with info: %v", info)
 	if len(info.ReadySCs) == 0 {
-		return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
+		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
 	}
-	var scs []balancer.SubConn
+	scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
 	for sc := range info.ReadySCs {
 		scs = append(scs, sc)
 	}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index f8667a2..f4ea617 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -37,14 +37,20 @@
 	err   error
 }
 
+// exitIdle contains no data and is just a signal sent on the updateCh in
+// ccBalancerWrapper to instruct the balancer to exit idle.
+type exitIdle struct{}
+
 // ccBalancerWrapper is a wrapper on top of cc for balancers.
 // It implements balancer.ClientConn interface.
 type ccBalancerWrapper struct {
-	cc         *ClientConn
-	balancerMu sync.Mutex // synchronizes calls to the balancer
-	balancer   balancer.Balancer
-	scBuffer   *buffer.Unbounded
-	done       *grpcsync.Event
+	cc          *ClientConn
+	balancerMu  sync.Mutex // synchronizes calls to the balancer
+	balancer    balancer.Balancer
+	hasExitIdle bool
+	updateCh    *buffer.Unbounded
+	closed      *grpcsync.Event
+	done        *grpcsync.Event
 
 	mu       sync.Mutex
 	subConns map[*acBalancerWrapper]struct{}
@@ -53,12 +59,14 @@
 func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
 	ccb := &ccBalancerWrapper{
 		cc:       cc,
-		scBuffer: buffer.NewUnbounded(),
+		updateCh: buffer.NewUnbounded(),
+		closed:   grpcsync.NewEvent(),
 		done:     grpcsync.NewEvent(),
 		subConns: make(map[*acBalancerWrapper]struct{}),
 	}
 	go ccb.watcher()
 	ccb.balancer = b.Build(ccb, bopts)
+	_, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler)
 	return ccb
 }
 
@@ -67,39 +75,72 @@
 func (ccb *ccBalancerWrapper) watcher() {
 	for {
 		select {
-		case t := <-ccb.scBuffer.Get():
-			ccb.scBuffer.Load()
-			if ccb.done.HasFired() {
+		case t := <-ccb.updateCh.Get():
+			ccb.updateCh.Load()
+			if ccb.closed.HasFired() {
 				break
 			}
-			ccb.balancerMu.Lock()
-			su := t.(*scStateUpdate)
-			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
-			} else {
-				ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
+			switch u := t.(type) {
+			case *scStateUpdate:
+				ccb.balancerMu.Lock()
+				ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
+				ccb.balancerMu.Unlock()
+			case *acBalancerWrapper:
+				ccb.mu.Lock()
+				if ccb.subConns != nil {
+					delete(ccb.subConns, u)
+					ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
+				}
+				ccb.mu.Unlock()
+			case exitIdle:
+				if ccb.cc.GetState() == connectivity.Idle {
+					if ei, ok := ccb.balancer.(balancer.ExitIdler); ok {
+						// We already checked that the balancer implements
+						// ExitIdle before pushing the event to updateCh, but
+						// check conditionally again as defensive programming.
+						ccb.balancerMu.Lock()
+						ei.ExitIdle()
+						ccb.balancerMu.Unlock()
+					}
+				}
+			default:
+				logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
 			}
-			ccb.balancerMu.Unlock()
-		case <-ccb.done.Done():
+		case <-ccb.closed.Done():
 		}
 
-		if ccb.done.HasFired() {
+		if ccb.closed.HasFired() {
+			ccb.balancerMu.Lock()
 			ccb.balancer.Close()
+			ccb.balancerMu.Unlock()
 			ccb.mu.Lock()
 			scs := ccb.subConns
 			ccb.subConns = nil
 			ccb.mu.Unlock()
+			ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
+			ccb.done.Fire()
+			// Fire done before removing the addr conns.  We can safely unblock
+			// ccb.close and allow the removeAddrConns to happen
+			// asynchronously.
 			for acbw := range scs {
 				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
 			}
-			ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
 			return
 		}
 	}
 }
 
 func (ccb *ccBalancerWrapper) close() {
-	ccb.done.Fire()
+	ccb.closed.Fire()
+	<-ccb.done.Done()
+}
+
+func (ccb *ccBalancerWrapper) exitIdle() bool {
+	if !ccb.hasExitIdle {
+		return false
+	}
+	ccb.updateCh.Put(exitIdle{})
+	return true
 }
 
 func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
@@ -113,7 +154,7 @@
 	if sc == nil {
 		return
 	}
-	ccb.scBuffer.Put(&scStateUpdate{
+	ccb.updateCh.Put(&scStateUpdate{
 		sc:    sc,
 		state: s,
 		err:   err,
@@ -123,19 +164,13 @@
 func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
 	ccb.balancerMu.Lock()
 	defer ccb.balancerMu.Unlock()
-	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-		return ub.UpdateClientConnState(*ccs)
-	}
-	ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
-	return nil
+	return ccb.balancer.UpdateClientConnState(*ccs)
 }
 
 func (ccb *ccBalancerWrapper) resolverError(err error) {
-	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-		ccb.balancerMu.Lock()
-		ub.ResolverError(err)
-		ccb.balancerMu.Unlock()
-	}
+	ccb.balancerMu.Lock()
+	defer ccb.balancerMu.Unlock()
+	ccb.balancer.ResolverError(err)
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -160,32 +195,18 @@
 }
 
 func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+	// The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
+	// during switchBalancer() if the old balancer calls RemoveSubConn() in its
+	// Close().
+	ccb.updateCh.Put(sc)
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
 	acbw, ok := sc.(*acBalancerWrapper)
 	if !ok {
 		return
 	}
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return
-	}
-	delete(ccb.subConns, acbw)
-	ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-}
-
-func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return
-	}
-	// Update picker before updating state.  Even though the ordering here does
-	// not matter, it can lead to multiple calls of Pick in the common start-up
-	// case where we wait for ready and then perform an RPC.  If the picker is
-	// updated later, we could call the "connecting" picker when the state is
-	// updated, and then call the "ready" picker after the picker gets updated.
-	ccb.cc.blockingpicker.updatePicker(p)
-	ccb.cc.csMgr.updateState(s)
+	acbw.UpdateAddresses(addrs)
 }
 
 func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
@@ -199,7 +220,7 @@
 	// case where we wait for ready and then perform an RPC.  If the picker is
 	// updated later, we could call the "connecting" picker when the state is
 	// updated, and then call the "ready" picker after the picker gets updated.
-	ccb.cc.blockingpicker.updatePickerV2(s.Picker)
+	ccb.cc.blockingpicker.updatePicker(s.Picker)
 	ccb.cc.csMgr.updateState(s.ConnectivityState)
 }
 
@@ -222,7 +243,7 @@
 	acbw.mu.Lock()
 	defer acbw.mu.Unlock()
 	if len(addrs) <= 0 {
-		acbw.ac.tearDown(errConnDrain)
+		acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
 		return
 	}
 	if !acbw.ac.tryUpdateAddrs(addrs) {
@@ -237,23 +258,23 @@
 		acbw.ac.acbw = nil
 		acbw.ac.mu.Unlock()
 		acState := acbw.ac.getState()
-		acbw.ac.tearDown(errConnDrain)
+		acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
 
 		if acState == connectivity.Shutdown {
 			return
 		}
 
-		ac, err := cc.newAddrConn(addrs, opts)
+		newAC, err := cc.newAddrConn(addrs, opts)
 		if err != nil {
-			channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
+			channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
 			return
 		}
-		acbw.ac = ac
-		ac.mu.Lock()
-		ac.acbw = acbw
-		ac.mu.Unlock()
+		acbw.ac = newAC
+		newAC.mu.Lock()
+		newAC.acbw = acbw
+		newAC.mu.Unlock()
 		if acState != connectivity.Idle {
-			ac.connect()
+			go newAC.connect()
 		}
 	}
 }
@@ -261,7 +282,7 @@
 func (acbw *acBalancerWrapper) Connect() {
 	acbw.mu.Lock()
 	defer acbw.mu.Unlock()
-	acbw.ac.connect()
+	go acbw.ac.connect()
 }
 
 func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
deleted file mode 100644
index db04b08..0000000
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"sync"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-type balancerWrapperBuilder struct {
-	b Balancer // The v1 balancer.
-}
-
-func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
-	bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
-		DialCreds: opts.DialCreds,
-		Dialer:    opts.Dialer,
-	})
-	_, pickfirst := bwb.b.(*pickFirst)
-	bw := &balancerWrapper{
-		balancer:   bwb.b,
-		pickfirst:  pickfirst,
-		cc:         cc,
-		targetAddr: opts.Target.Endpoint,
-		startCh:    make(chan struct{}),
-		conns:      make(map[resolver.Address]balancer.SubConn),
-		connSt:     make(map[balancer.SubConn]*scState),
-		csEvltr:    &balancer.ConnectivityStateEvaluator{},
-		state:      connectivity.Idle,
-	}
-	cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
-	go bw.lbWatcher()
-	return bw
-}
-
-func (bwb *balancerWrapperBuilder) Name() string {
-	return "wrapper"
-}
-
-type scState struct {
-	addr Address // The v1 address type.
-	s    connectivity.State
-	down func(error)
-}
-
-type balancerWrapper struct {
-	balancer  Balancer // The v1 balancer.
-	pickfirst bool
-
-	cc         balancer.ClientConn
-	targetAddr string // Target without the scheme.
-
-	mu     sync.Mutex
-	conns  map[resolver.Address]balancer.SubConn
-	connSt map[balancer.SubConn]*scState
-	// This channel is closed when handling the first resolver result.
-	// lbWatcher blocks until this is closed, to avoid race between
-	// - NewSubConn is created, cc wants to notify balancer of state changes;
-	// - Build hasn't return, cc doesn't have access to balancer.
-	startCh chan struct{}
-
-	// To aggregate the connectivity state.
-	csEvltr *balancer.ConnectivityStateEvaluator
-	state   connectivity.State
-}
-
-// lbWatcher watches the Notify channel of the balancer and manages
-// connections accordingly.
-func (bw *balancerWrapper) lbWatcher() {
-	<-bw.startCh
-	notifyCh := bw.balancer.Notify()
-	if notifyCh == nil {
-		// There's no resolver in the balancer. Connect directly.
-		a := resolver.Address{
-			Addr: bw.targetAddr,
-			Type: resolver.Backend,
-		}
-		sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
-		if err != nil {
-			grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
-		} else {
-			bw.mu.Lock()
-			bw.conns[a] = sc
-			bw.connSt[sc] = &scState{
-				addr: Address{Addr: bw.targetAddr},
-				s:    connectivity.Idle,
-			}
-			bw.mu.Unlock()
-			sc.Connect()
-		}
-		return
-	}
-
-	for addrs := range notifyCh {
-		grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
-		if bw.pickfirst {
-			var (
-				oldA  resolver.Address
-				oldSC balancer.SubConn
-			)
-			bw.mu.Lock()
-			for oldA, oldSC = range bw.conns {
-				break
-			}
-			bw.mu.Unlock()
-			if len(addrs) <= 0 {
-				if oldSC != nil {
-					// Teardown old sc.
-					bw.mu.Lock()
-					delete(bw.conns, oldA)
-					delete(bw.connSt, oldSC)
-					bw.mu.Unlock()
-					bw.cc.RemoveSubConn(oldSC)
-				}
-				continue
-			}
-
-			var newAddrs []resolver.Address
-			for _, a := range addrs {
-				newAddr := resolver.Address{
-					Addr:       a.Addr,
-					Type:       resolver.Backend, // All addresses from balancer are all backends.
-					ServerName: "",
-					Metadata:   a.Metadata,
-				}
-				newAddrs = append(newAddrs, newAddr)
-			}
-			if oldSC == nil {
-				// Create new sc.
-				sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
-				if err != nil {
-					grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
-				} else {
-					bw.mu.Lock()
-					// For pickfirst, there should be only one SubConn, so the
-					// address doesn't matter. All states updating (up and down)
-					// and picking should all happen on that only SubConn.
-					bw.conns[resolver.Address{}] = sc
-					bw.connSt[sc] = &scState{
-						addr: addrs[0], // Use the first address.
-						s:    connectivity.Idle,
-					}
-					bw.mu.Unlock()
-					sc.Connect()
-				}
-			} else {
-				bw.mu.Lock()
-				bw.connSt[oldSC].addr = addrs[0]
-				bw.mu.Unlock()
-				oldSC.UpdateAddresses(newAddrs)
-			}
-		} else {
-			var (
-				add []resolver.Address // Addresses need to setup connections.
-				del []balancer.SubConn // Connections need to tear down.
-			)
-			resAddrs := make(map[resolver.Address]Address)
-			for _, a := range addrs {
-				resAddrs[resolver.Address{
-					Addr:       a.Addr,
-					Type:       resolver.Backend, // All addresses from balancer are all backends.
-					ServerName: "",
-					Metadata:   a.Metadata,
-				}] = a
-			}
-			bw.mu.Lock()
-			for a := range resAddrs {
-				if _, ok := bw.conns[a]; !ok {
-					add = append(add, a)
-				}
-			}
-			for a, c := range bw.conns {
-				if _, ok := resAddrs[a]; !ok {
-					del = append(del, c)
-					delete(bw.conns, a)
-					// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
-				}
-			}
-			bw.mu.Unlock()
-			for _, a := range add {
-				sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
-				if err != nil {
-					grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
-				} else {
-					bw.mu.Lock()
-					bw.conns[a] = sc
-					bw.connSt[sc] = &scState{
-						addr: resAddrs[a],
-						s:    connectivity.Idle,
-					}
-					bw.mu.Unlock()
-					sc.Connect()
-				}
-			}
-			for _, c := range del {
-				bw.cc.RemoveSubConn(c)
-			}
-		}
-	}
-}
-
-func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	scSt, ok := bw.connSt[sc]
-	if !ok {
-		return
-	}
-	if s == connectivity.Idle {
-		sc.Connect()
-	}
-	oldS := scSt.s
-	scSt.s = s
-	if oldS != connectivity.Ready && s == connectivity.Ready {
-		scSt.down = bw.balancer.Up(scSt.addr)
-	} else if oldS == connectivity.Ready && s != connectivity.Ready {
-		if scSt.down != nil {
-			scSt.down(errConnClosing)
-		}
-	}
-	sa := bw.csEvltr.RecordTransition(oldS, s)
-	if bw.state != sa {
-		bw.state = sa
-	}
-	bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
-	if s == connectivity.Shutdown {
-		// Remove state for this sc.
-		delete(bw.connSt, sc)
-	}
-}
-
-func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	select {
-	case <-bw.startCh:
-	default:
-		close(bw.startCh)
-	}
-	// There should be a resolver inside the balancer.
-	// All updates here, if any, are ignored.
-}
-
-func (bw *balancerWrapper) Close() {
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	select {
-	case <-bw.startCh:
-	default:
-		close(bw.startCh)
-	}
-	bw.balancer.Close()
-}
-
-// The picker is the balancerWrapper itself.
-// It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
-	failfast := true // Default failfast is true.
-	if ss, ok := rpcInfoFromContext(info.Ctx); ok {
-		failfast = ss.failfast
-	}
-	a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
-	if err != nil {
-		return balancer.PickResult{}, toRPCErr(err)
-	}
-	if p != nil {
-		result.Done = func(balancer.DoneInfo) { p() }
-		defer func() {
-			if err != nil {
-				p()
-			}
-		}()
-	}
-
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	if bw.pickfirst {
-		// Get the first sc in conns.
-		for _, result.SubConn = range bw.conns {
-			return result, nil
-		}
-		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-	}
-	var ok1 bool
-	result.SubConn, ok1 = bw.conns[resolver.Address{
-		Addr:       a.Addr,
-		Type:       resolver.Backend,
-		ServerName: "",
-		Metadata:   a.Metadata,
-	}]
-	s, ok2 := bw.connSt[result.SubConn]
-	if !ok1 || !ok2 {
-		// This can only happen due to a race where Get() returned an address
-		// that was subsequently removed by Notify.  In this case we should
-		// retry always.
-		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-	}
-	switch s.s {
-	case connectivity.Ready, connectivity.Idle:
-		return result, nil
-	case connectivity.Shutdown, connectivity.TransientFailure:
-		// If the returned sc has been shut down or is in transient failure,
-		// return error, and this RPC will fail or wait for another picker (if
-		// non-failfast).
-		return balancer.PickResult{}, balancer.ErrTransientFailure
-	default:
-		// For other states (connecting or unknown), the v1 balancer would
-		// traditionally wait until ready and then issue the RPC.  Returning
-		// ErrNoSubConnAvailable will be a slight improvement in that it will
-		// allow the balancer to choose another address in case others are
-		// connected.
-		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-	}
-}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index f393bb6..ed75290 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -1,24 +1,49 @@
+// Copyright 2018 The gRPC Authors
+// All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
+
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
+// versions:
+// 	protoc-gen-go v1.25.0
+// 	protoc        v3.14.0
+// source: grpc/binlog/v1/binarylog.proto
 
-package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+package grpc_binarylog_v1
 
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import duration "github.com/golang/protobuf/ptypes/duration"
-import timestamp "github.com/golang/protobuf/ptypes/timestamp"
+import (
+	proto "github.com/golang/protobuf/proto"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+)
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
 
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
 
 // Enumerates the type of event
 // Note the terminology is different from the RPC semantics
@@ -54,32 +79,55 @@
 	GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
 )
 
-var GrpcLogEntry_EventType_name = map[int32]string{
-	0: "EVENT_TYPE_UNKNOWN",
-	1: "EVENT_TYPE_CLIENT_HEADER",
-	2: "EVENT_TYPE_SERVER_HEADER",
-	3: "EVENT_TYPE_CLIENT_MESSAGE",
-	4: "EVENT_TYPE_SERVER_MESSAGE",
-	5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
-	6: "EVENT_TYPE_SERVER_TRAILER",
-	7: "EVENT_TYPE_CANCEL",
-}
-var GrpcLogEntry_EventType_value = map[string]int32{
-	"EVENT_TYPE_UNKNOWN":           0,
-	"EVENT_TYPE_CLIENT_HEADER":     1,
-	"EVENT_TYPE_SERVER_HEADER":     2,
-	"EVENT_TYPE_CLIENT_MESSAGE":    3,
-	"EVENT_TYPE_SERVER_MESSAGE":    4,
-	"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
-	"EVENT_TYPE_SERVER_TRAILER":    6,
-	"EVENT_TYPE_CANCEL":            7,
+// Enum value maps for GrpcLogEntry_EventType.
+var (
+	GrpcLogEntry_EventType_name = map[int32]string{
+		0: "EVENT_TYPE_UNKNOWN",
+		1: "EVENT_TYPE_CLIENT_HEADER",
+		2: "EVENT_TYPE_SERVER_HEADER",
+		3: "EVENT_TYPE_CLIENT_MESSAGE",
+		4: "EVENT_TYPE_SERVER_MESSAGE",
+		5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+		6: "EVENT_TYPE_SERVER_TRAILER",
+		7: "EVENT_TYPE_CANCEL",
+	}
+	GrpcLogEntry_EventType_value = map[string]int32{
+		"EVENT_TYPE_UNKNOWN":           0,
+		"EVENT_TYPE_CLIENT_HEADER":     1,
+		"EVENT_TYPE_SERVER_HEADER":     2,
+		"EVENT_TYPE_CLIENT_MESSAGE":    3,
+		"EVENT_TYPE_SERVER_MESSAGE":    4,
+		"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+		"EVENT_TYPE_SERVER_TRAILER":    6,
+		"EVENT_TYPE_CANCEL":            7,
+	}
+)
+
+func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
+	p := new(GrpcLogEntry_EventType)
+	*p = x
+	return p
 }
 
 func (x GrpcLogEntry_EventType) String() string {
-	return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
+
+func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
+}
+
+func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
+}
+
+func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
 func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
 }
 
 // Enumerates the entity that generates the log entry
@@ -91,22 +139,45 @@
 	GrpcLogEntry_LOGGER_SERVER  GrpcLogEntry_Logger = 2
 )
 
-var GrpcLogEntry_Logger_name = map[int32]string{
-	0: "LOGGER_UNKNOWN",
-	1: "LOGGER_CLIENT",
-	2: "LOGGER_SERVER",
-}
-var GrpcLogEntry_Logger_value = map[string]int32{
-	"LOGGER_UNKNOWN": 0,
-	"LOGGER_CLIENT":  1,
-	"LOGGER_SERVER":  2,
+// Enum value maps for GrpcLogEntry_Logger.
+var (
+	GrpcLogEntry_Logger_name = map[int32]string{
+		0: "LOGGER_UNKNOWN",
+		1: "LOGGER_CLIENT",
+		2: "LOGGER_SERVER",
+	}
+	GrpcLogEntry_Logger_value = map[string]int32{
+		"LOGGER_UNKNOWN": 0,
+		"LOGGER_CLIENT":  1,
+		"LOGGER_SERVER":  2,
+	}
+)
+
+func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
+	p := new(GrpcLogEntry_Logger)
+	*p = x
+	return p
 }
 
 func (x GrpcLogEntry_Logger) String() string {
-	return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
+
+func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
+}
+
+func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
 func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
 }
 
 type Address_Type int32
@@ -122,30 +193,57 @@
 	Address_TYPE_UNIX Address_Type = 3
 )
 
-var Address_Type_name = map[int32]string{
-	0: "TYPE_UNKNOWN",
-	1: "TYPE_IPV4",
-	2: "TYPE_IPV6",
-	3: "TYPE_UNIX",
-}
-var Address_Type_value = map[string]int32{
-	"TYPE_UNKNOWN": 0,
-	"TYPE_IPV4":    1,
-	"TYPE_IPV6":    2,
-	"TYPE_UNIX":    3,
+// Enum value maps for Address_Type.
+var (
+	Address_Type_name = map[int32]string{
+		0: "TYPE_UNKNOWN",
+		1: "TYPE_IPV4",
+		2: "TYPE_IPV6",
+		3: "TYPE_UNIX",
+	}
+	Address_Type_value = map[string]int32{
+		"TYPE_UNKNOWN": 0,
+		"TYPE_IPV4":    1,
+		"TYPE_IPV6":    2,
+		"TYPE_UNIX":    3,
+	}
+)
+
+func (x Address_Type) Enum() *Address_Type {
+	p := new(Address_Type)
+	*p = x
+	return p
 }
 
 func (x Address_Type) String() string {
-	return proto.EnumName(Address_Type_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
+
+func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
+}
+
+func (Address_Type) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
+}
+
+func (x Address_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Address_Type.Descriptor instead.
 func (Address_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
 }
 
 // Log entry we store in binary logs
 type GrpcLogEntry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// The timestamp of the binary log message
-	Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
 	// Uniquely identifies a call. The value must not be 0 in order to disambiguate
 	// from an unset value.
 	// Each call may have several log entries, they will all have the same call_id.
@@ -158,11 +256,11 @@
 	// durability or ordering is not guaranteed.
 	SequenceIdWithinCall uint64                 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
 	Type                 GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
-	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
+	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
 	// The logger uses one of the following fields to record the payload,
 	// according to the type of the log entry.
 	//
-	// Types that are valid to be assigned to Payload:
+	// Types that are assignable to Payload:
 	//	*GrpcLogEntry_ClientHeader
 	//	*GrpcLogEntry_ServerHeader
 	//	*GrpcLogEntry_Message
@@ -175,71 +273,125 @@
 	// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
 	// the case of trailers-only. On server side, peer is always
 	// logged on EVENT_TYPE_CLIENT_HEADER.
-	Peer                 *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
 }
 
-func (m *GrpcLogEntry) Reset()         { *m = GrpcLogEntry{} }
-func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
-func (*GrpcLogEntry) ProtoMessage()    {}
+func (x *GrpcLogEntry) Reset() {
+	*x = GrpcLogEntry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *GrpcLogEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcLogEntry) ProtoMessage() {}
+
+func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
 func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
-}
-func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
-}
-func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
-}
-func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
-}
-func (m *GrpcLogEntry) XXX_Size() int {
-	return xxx_messageInfo_GrpcLogEntry.Size(m)
-}
-func (m *GrpcLogEntry) XXX_DiscardUnknown() {
-	xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
 }
 
-var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
-
-func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
-	if m != nil {
-		return m.Timestamp
+func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
 	}
 	return nil
 }
 
-func (m *GrpcLogEntry) GetCallId() uint64 {
-	if m != nil {
-		return m.CallId
+func (x *GrpcLogEntry) GetCallId() uint64 {
+	if x != nil {
+		return x.CallId
 	}
 	return 0
 }
 
-func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
-	if m != nil {
-		return m.SequenceIdWithinCall
+func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+	if x != nil {
+		return x.SequenceIdWithinCall
 	}
 	return 0
 }
 
-func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
-	if m != nil {
-		return m.Type
+func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+	if x != nil {
+		return x.Type
 	}
 	return GrpcLogEntry_EVENT_TYPE_UNKNOWN
 }
 
-func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
-	if m != nil {
-		return m.Logger
+func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+	if x != nil {
+		return x.Logger
 	}
 	return GrpcLogEntry_LOGGER_UNKNOWN
 }
 
+func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
+		return x.ClientHeader
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
+		return x.ServerHeader
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetMessage() *Message {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
+		return x.Message
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetTrailer() *Trailer {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
+		return x.Trailer
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetPayloadTruncated() bool {
+	if x != nil {
+		return x.PayloadTruncated
+	}
+	return false
+}
+
+func (x *GrpcLogEntry) GetPeer() *Address {
+	if x != nil {
+		return x.Peer
+	}
+	return nil
+}
+
 type isGrpcLogEntry_Payload interface {
 	isGrpcLogEntry_Payload()
 }
@@ -253,6 +405,7 @@
 }
 
 type GrpcLogEntry_Message struct {
+	// Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
 	Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
 }
 
@@ -268,168 +421,11 @@
 
 func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
 
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
-	if m != nil {
-		return m.Payload
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
-		return x.ClientHeader
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
-		return x.ServerHeader
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetMessage() *Message {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
-		return x.Message
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetTrailer() *Trailer {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
-		return x.Trailer
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetPayloadTruncated() bool {
-	if m != nil {
-		return m.PayloadTruncated
-	}
-	return false
-}
-
-func (m *GrpcLogEntry) GetPeer() *Address {
-	if m != nil {
-		return m.Peer
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
-		(*GrpcLogEntry_ClientHeader)(nil),
-		(*GrpcLogEntry_ServerHeader)(nil),
-		(*GrpcLogEntry_Message)(nil),
-		(*GrpcLogEntry_Trailer)(nil),
-	}
-}
-
-func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*GrpcLogEntry)
-	// payload
-	switch x := m.Payload.(type) {
-	case *GrpcLogEntry_ClientHeader:
-		b.EncodeVarint(6<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ClientHeader); err != nil {
-			return err
-		}
-	case *GrpcLogEntry_ServerHeader:
-		b.EncodeVarint(7<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ServerHeader); err != nil {
-			return err
-		}
-	case *GrpcLogEntry_Message:
-		b.EncodeVarint(8<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Message); err != nil {
-			return err
-		}
-	case *GrpcLogEntry_Trailer:
-		b.EncodeVarint(9<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Trailer); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*GrpcLogEntry)
-	switch tag {
-	case 6: // payload.client_header
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ClientHeader)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_ClientHeader{msg}
-		return true, err
-	case 7: // payload.server_header
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ServerHeader)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_ServerHeader{msg}
-		return true, err
-	case 8: // payload.message
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Message)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_Message{msg}
-		return true, err
-	case 9: // payload.trailer
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Trailer)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_Trailer{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*GrpcLogEntry)
-	// payload
-	switch x := m.Payload.(type) {
-	case *GrpcLogEntry_ClientHeader:
-		s := proto.Size(x.ClientHeader)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *GrpcLogEntry_ServerHeader:
-		s := proto.Size(x.ServerHeader)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *GrpcLogEntry_Message:
-		s := proto.Size(x.Message)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *GrpcLogEntry_Trailer:
-		s := proto.Size(x.Trailer)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
 type ClientHeader struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// This contains only the metadata from the application.
 	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
 	// The name of the RPC method, which looks something like:
@@ -443,104 +439,122 @@
 	// <host> or <host>:<port> .
 	Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
 	// the RPC timeout
-	Timeout              *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
+	Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
 }
 
-func (m *ClientHeader) Reset()         { *m = ClientHeader{} }
-func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
-func (*ClientHeader) ProtoMessage()    {}
+func (x *ClientHeader) Reset() {
+	*x = ClientHeader{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ClientHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientHeader) ProtoMessage() {}
+
+func (x *ClientHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
 func (*ClientHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
-}
-func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
-}
-func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
-}
-func (dst *ClientHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ClientHeader.Merge(dst, src)
-}
-func (m *ClientHeader) XXX_Size() int {
-	return xxx_messageInfo_ClientHeader.Size(m)
-}
-func (m *ClientHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_ClientHeader.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
 }
 
-var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
-
-func (m *ClientHeader) GetMetadata() *Metadata {
-	if m != nil {
-		return m.Metadata
+func (x *ClientHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
 	}
 	return nil
 }
 
-func (m *ClientHeader) GetMethodName() string {
-	if m != nil {
-		return m.MethodName
+func (x *ClientHeader) GetMethodName() string {
+	if x != nil {
+		return x.MethodName
 	}
 	return ""
 }
 
-func (m *ClientHeader) GetAuthority() string {
-	if m != nil {
-		return m.Authority
+func (x *ClientHeader) GetAuthority() string {
+	if x != nil {
+		return x.Authority
 	}
 	return ""
 }
 
-func (m *ClientHeader) GetTimeout() *duration.Duration {
-	if m != nil {
-		return m.Timeout
+func (x *ClientHeader) GetTimeout() *durationpb.Duration {
+	if x != nil {
+		return x.Timeout
 	}
 	return nil
 }
 
 type ServerHeader struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// This contains only the metadata from the application.
-	Metadata             *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
 }
 
-func (m *ServerHeader) Reset()         { *m = ServerHeader{} }
-func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
-func (*ServerHeader) ProtoMessage()    {}
+func (x *ServerHeader) Reset() {
+	*x = ServerHeader{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ServerHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerHeader) ProtoMessage() {}
+
+func (x *ServerHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
 func (*ServerHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
-}
-func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
-}
-func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
-}
-func (dst *ServerHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerHeader.Merge(dst, src)
-}
-func (m *ServerHeader) XXX_Size() int {
-	return xxx_messageInfo_ServerHeader.Size(m)
-}
-func (m *ServerHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServerHeader.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
 }
 
-var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
-
-func (m *ServerHeader) GetMetadata() *Metadata {
-	if m != nil {
-		return m.Metadata
+func (x *ServerHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
 	}
 	return nil
 }
 
 type Trailer struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// This contains only the metadata from the application.
 	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
 	// The gRPC status code.
@@ -550,110 +564,124 @@
 	StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
 	// The value of the 'grpc-status-details-bin' metadata key. If
 	// present, this is always an encoded 'google.rpc.Status' message.
-	StatusDetails        []byte   `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
 }
 
-func (m *Trailer) Reset()         { *m = Trailer{} }
-func (m *Trailer) String() string { return proto.CompactTextString(m) }
-func (*Trailer) ProtoMessage()    {}
+func (x *Trailer) Reset() {
+	*x = Trailer{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Trailer) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Trailer) ProtoMessage() {}
+
+func (x *Trailer) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
 func (*Trailer) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
-}
-func (m *Trailer) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Trailer.Unmarshal(m, b)
-}
-func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
-}
-func (dst *Trailer) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Trailer.Merge(dst, src)
-}
-func (m *Trailer) XXX_Size() int {
-	return xxx_messageInfo_Trailer.Size(m)
-}
-func (m *Trailer) XXX_DiscardUnknown() {
-	xxx_messageInfo_Trailer.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
 }
 
-var xxx_messageInfo_Trailer proto.InternalMessageInfo
-
-func (m *Trailer) GetMetadata() *Metadata {
-	if m != nil {
-		return m.Metadata
+func (x *Trailer) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
 	}
 	return nil
 }
 
-func (m *Trailer) GetStatusCode() uint32 {
-	if m != nil {
-		return m.StatusCode
+func (x *Trailer) GetStatusCode() uint32 {
+	if x != nil {
+		return x.StatusCode
 	}
 	return 0
 }
 
-func (m *Trailer) GetStatusMessage() string {
-	if m != nil {
-		return m.StatusMessage
+func (x *Trailer) GetStatusMessage() string {
+	if x != nil {
+		return x.StatusMessage
 	}
 	return ""
 }
 
-func (m *Trailer) GetStatusDetails() []byte {
-	if m != nil {
-		return m.StatusDetails
+func (x *Trailer) GetStatusDetails() []byte {
+	if x != nil {
+		return x.StatusDetails
 	}
 	return nil
 }
 
 // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
 type Message struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// Length of the message. It may not be the same as the length of the
 	// data field, as the logging payload can be truncated or omitted.
 	Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
 	// May be truncated or omitted.
-	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
 }
 
-func (m *Message) Reset()         { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage()    {}
+func (x *Message) Reset() {
+	*x = Message{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Message) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
 func (*Message) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Message.Unmarshal(m, b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-}
-func (dst *Message) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Message.Merge(dst, src)
-}
-func (m *Message) XXX_Size() int {
-	return xxx_messageInfo_Message.Size(m)
-}
-func (m *Message) XXX_DiscardUnknown() {
-	xxx_messageInfo_Message.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
 }
 
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-func (m *Message) GetLength() uint32 {
-	if m != nil {
-		return m.Length
+func (x *Message) GetLength() uint32 {
+	if x != nil {
+		return x.Length
 	}
 	return 0
 }
 
-func (m *Message) GetData() []byte {
-	if m != nil {
-		return m.Data
+func (x *Message) GetData() []byte {
+	if x != nil {
+		return x.Data
 	}
 	return nil
 }
@@ -680,221 +708,480 @@
 // header is just a normal metadata key.
 // The pair will not count towards the size limit.
 type Metadata struct {
-	Entry                []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
 }
 
-func (m *Metadata) Reset()         { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage()    {}
+func (x *Metadata) Reset() {
+	*x = Metadata{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metadata) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
 func (*Metadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Metadata.Unmarshal(m, b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-}
-func (dst *Metadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metadata.Merge(dst, src)
-}
-func (m *Metadata) XXX_Size() int {
-	return xxx_messageInfo_Metadata.Size(m)
-}
-func (m *Metadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_Metadata.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
 }
 
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func (m *Metadata) GetEntry() []*MetadataEntry {
-	if m != nil {
-		return m.Entry
+func (x *Metadata) GetEntry() []*MetadataEntry {
+	if x != nil {
+		return x.Entry
 	}
 	return nil
 }
 
 // A metadata key value pair
 type MetadataEntry struct {
-	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Key   string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
 }
 
-func (m *MetadataEntry) Reset()         { *m = MetadataEntry{} }
-func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
-func (*MetadataEntry) ProtoMessage()    {}
+func (x *MetadataEntry) Reset() {
+	*x = MetadataEntry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetadataEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataEntry) ProtoMessage() {}
+
+func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
 func (*MetadataEntry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
-}
-func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
-}
-func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
-}
-func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MetadataEntry.Merge(dst, src)
-}
-func (m *MetadataEntry) XXX_Size() int {
-	return xxx_messageInfo_MetadataEntry.Size(m)
-}
-func (m *MetadataEntry) XXX_DiscardUnknown() {
-	xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
 }
 
-var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
-
-func (m *MetadataEntry) GetKey() string {
-	if m != nil {
-		return m.Key
+func (x *MetadataEntry) GetKey() string {
+	if x != nil {
+		return x.Key
 	}
 	return ""
 }
 
-func (m *MetadataEntry) GetValue() []byte {
-	if m != nil {
-		return m.Value
+func (x *MetadataEntry) GetValue() []byte {
+	if x != nil {
+		return x.Value
 	}
 	return nil
 }
 
 // Address information
 type Address struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	Type    Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
 	Address string       `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
 	// only for TYPE_IPV4 and TYPE_IPV6
-	IpPort               uint32   `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
 }
 
-func (m *Address) Reset()         { *m = Address{} }
-func (m *Address) String() string { return proto.CompactTextString(m) }
-func (*Address) ProtoMessage()    {}
+func (x *Address) Reset() {
+	*x = Address{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Address) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
 func (*Address) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
-}
-func (m *Address) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Address.Unmarshal(m, b)
-}
-func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Address.Marshal(b, m, deterministic)
-}
-func (dst *Address) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Address.Merge(dst, src)
-}
-func (m *Address) XXX_Size() int {
-	return xxx_messageInfo_Address.Size(m)
-}
-func (m *Address) XXX_DiscardUnknown() {
-	xxx_messageInfo_Address.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
 }
 
-var xxx_messageInfo_Address proto.InternalMessageInfo
-
-func (m *Address) GetType() Address_Type {
-	if m != nil {
-		return m.Type
+func (x *Address) GetType() Address_Type {
+	if x != nil {
+		return x.Type
 	}
 	return Address_TYPE_UNKNOWN
 }
 
-func (m *Address) GetAddress() string {
-	if m != nil {
-		return m.Address
+func (x *Address) GetAddress() string {
+	if x != nil {
+		return x.Address
 	}
 	return ""
 }
 
-func (m *Address) GetIpPort() uint32 {
-	if m != nil {
-		return m.IpPort
+func (x *Address) GetIpPort() uint32 {
+	if x != nil {
+		return x.IpPort
 	}
 	return 0
 }
 
-func init() {
-	proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
-	proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
-	proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
-	proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
-	proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
-	proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
-	proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
-	proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
-	proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
-	proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
-	proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
+var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
+
+var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
+	0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+	0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+	0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
+	0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
+	0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
+	0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
+	0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+	0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
+	0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
+	0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
+	0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+	0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
+	0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
+	0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+	0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+	0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
+	0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
+	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
+	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
+	0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
+	0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
+	0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+	0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
+	0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
+	0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
+	0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
+	0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
+	0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
+	0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
+	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+	0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
+	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
+	0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
+	0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
+	0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
+	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
+	0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
+	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
+	0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
+	0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+	0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
+	0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+	0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
+	0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+	0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
+	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
+	0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
+	0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+	0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
+	0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+	0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
+	0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+	0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
+	0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
+	0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
+	0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
+	0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
+	0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+	0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+	0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
+	0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+	0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
+	0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
+	0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
+	0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
+	0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
+	0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
+	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+	0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
+	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
+	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
+	0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
+	0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
+	0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
 }
 
-func init() {
-	proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
+var (
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
+	file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
+)
+
+func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
+		file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
+	})
+	return file_grpc_binlog_v1_binarylog_proto_rawDescData
 }
 
-var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
-	// 900 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
-	0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
-	0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
-	0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
-	0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
-	0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
-	0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
-	0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
-	0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
-	0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
-	0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
-	0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
-	0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
-	0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
-	0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
-	0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
-	0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
-	0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
-	0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
-	0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
-	0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
-	0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
-	0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
-	0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
-	0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
-	0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
-	0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
-	0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
-	0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
-	0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
-	0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
-	0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
-	0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
-	0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
-	0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
-	0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
-	0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
-	0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
-	0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
-	0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
-	0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
-	0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
-	0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
-	0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
-	0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
-	0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
-	0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
-	0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
-	0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
-	0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
-	0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
-	0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
-	0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
-	0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
-	0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
-	0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
-	0xd4, 0x07, 0x00, 0x00,
+var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+	(GrpcLogEntry_EventType)(0),   // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
+	(GrpcLogEntry_Logger)(0),      // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
+	(Address_Type)(0),             // 2: grpc.binarylog.v1.Address.Type
+	(*GrpcLogEntry)(nil),          // 3: grpc.binarylog.v1.GrpcLogEntry
+	(*ClientHeader)(nil),          // 4: grpc.binarylog.v1.ClientHeader
+	(*ServerHeader)(nil),          // 5: grpc.binarylog.v1.ServerHeader
+	(*Trailer)(nil),               // 6: grpc.binarylog.v1.Trailer
+	(*Message)(nil),               // 7: grpc.binarylog.v1.Message
+	(*Metadata)(nil),              // 8: grpc.binarylog.v1.Metadata
+	(*MetadataEntry)(nil),         // 9: grpc.binarylog.v1.MetadataEntry
+	(*Address)(nil),               // 10: grpc.binarylog.v1.Address
+	(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+	(*durationpb.Duration)(nil),   // 12: google.protobuf.Duration
+}
+var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
+	11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
+	0,  // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
+	1,  // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
+	4,  // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
+	5,  // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
+	7,  // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
+	6,  // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
+	10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
+	8,  // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
+	8,  // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	8,  // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
+	9,  // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
+	2,  // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
+	14, // [14:14] is the sub-list for method output_type
+	14, // [14:14] is the sub-list for method input_type
+	14, // [14:14] is the sub-list for extension type_name
+	14, // [14:14] is the sub-list for extension extendee
+	0,  // [0:14] is the sub-list for field type_name
+}
+
+func init() { file_grpc_binlog_v1_binarylog_proto_init() }
+func file_grpc_binlog_v1_binarylog_proto_init() {
+	if File_grpc_binlog_v1_binarylog_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*GrpcLogEntry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ClientHeader); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ServerHeader); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Trailer); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Message); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Metadata); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetadataEntry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Address); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+		(*GrpcLogEntry_ClientHeader)(nil),
+		(*GrpcLogEntry_ServerHeader)(nil),
+		(*GrpcLogEntry_Message)(nil),
+		(*GrpcLogEntry_Trailer)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
+			NumEnums:      3,
+			NumMessages:   8,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_grpc_binlog_v1_binarylog_proto_goTypes,
+		DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
+		EnumInfos:         file_grpc_binlog_v1_binarylog_proto_enumTypes,
+		MessageInfos:      file_grpc_binlog_v1_binarylog_proto_msgTypes,
+	}.Build()
+	File_grpc_binlog_v1_binarylog_proto = out.File
+	file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
+	file_grpc_binlog_v1_binarylog_proto_goTypes = nil
+	file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 0740693..28f09dc 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -23,7 +23,7 @@
 	"errors"
 	"fmt"
 	"math"
-	"net"
+	"net/url"
 	"reflect"
 	"strings"
 	"sync"
@@ -38,7 +38,7 @@
 	"google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcsync"
-	"google.golang.org/grpc/internal/grpcutil"
+	iresolver "google.golang.org/grpc/internal/resolver"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
@@ -48,6 +48,7 @@
 	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
 	_ "google.golang.org/grpc/internal/resolver/dns"         // To register dns resolver.
 	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
+	_ "google.golang.org/grpc/internal/resolver/unix"        // To register unix resolver.
 )
 
 const (
@@ -68,8 +69,6 @@
 	errConnDrain = errors.New("grpc: the connection is drained")
 	// errConnClosing indicates that the connection is closing.
 	errConnClosing = errors.New("grpc: the connection is closing")
-	// errBalancerClosed indicates that the balancer is closed.
-	errBalancerClosed = errors.New("grpc: balancer is closed")
 	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
 	// service config.
 	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
@@ -84,13 +83,13 @@
 	// errTransportCredsAndBundle indicates that creds bundle is used together
 	// with other individual Transport Credentials.
 	errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
-	// errTransportCredentialsMissing indicates that users want to transmit security
-	// information (e.g., OAuth2 token) which requires secure connection on an insecure
-	// connection.
+	// errNoTransportCredsInBundle indicated that the configured creds bundle
+	// returned a transport credentials which was nil.
+	errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
+	// errTransportCredentialsMissing indicates that users want to transmit
+	// security information (e.g., OAuth2 token) which requires secure
+	// connection on an insecure connection.
 	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
-	// errCredentialsConflict indicates that grpc.WithTransportCredentials()
-	// and grpc.WithInsecure() are both called for a connection.
-	errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
 )
 
 const (
@@ -106,6 +105,17 @@
 	return DialContext(context.Background(), target, opts...)
 }
 
+type defaultConfigSelector struct {
+	sc *ServiceConfig
+}
+
+func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
+	return &iresolver.RPCConfig{
+		Context:      rpcInfo.Context,
+		MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
+	}, nil
+}
+
 // DialContext creates a client connection to the given target. By default, it's
 // a non-blocking dial (the function won't wait for connections to be
 // established, and connecting happens in the background). To make it a blocking
@@ -133,6 +143,7 @@
 		firstResolveEvent: grpcsync.NewEvent(),
 	}
 	cc.retryThrottler.Store((*retryThrottler)(nil))
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
 	cc.ctx, cc.cancel = context.WithCancel(context.Background())
 
 	for _, opt := range opts {
@@ -151,32 +162,35 @@
 	if channelz.IsOn() {
 		if cc.dopts.channelzParentID != 0 {
 			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
-			channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{
+			channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
 				Desc:     "Channel Created",
-				Severity: channelz.CtINFO,
+				Severity: channelz.CtInfo,
 				Parent: &channelz.TraceEventDesc{
 					Desc:     fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
-					Severity: channelz.CtINFO,
+					Severity: channelz.CtInfo,
 				},
 			})
 		} else {
 			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
-			channelz.Info(cc.channelzID, "Channel Created")
+			channelz.Info(logger, cc.channelzID, "Channel Created")
 		}
 		cc.csMgr.channelzID = cc.channelzID
 	}
 
-	if !cc.dopts.insecure {
-		if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
-			return nil, errNoTransportSecurity
-		}
-		if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
-			return nil, errTransportCredsAndBundle
-		}
-	} else {
-		if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
-			return nil, errCredentialsConflict
-		}
+	if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+		return nil, errNoTransportSecurity
+	}
+	if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+		return nil, errTransportCredsAndBundle
+	}
+	if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+		return nil, errNoTransportCredsInBundle
+	}
+	transportCreds := cc.dopts.copts.TransportCredentials
+	if transportCreds == nil {
+		transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+	}
+	if transportCreds.Info().SecurityProtocol == "insecure" {
 		for _, cd := range cc.dopts.copts.PerRPCCredentials {
 			if cd.RequireTransportSecurity() {
 				return nil, errTransportCredentialsMissing
@@ -193,16 +207,6 @@
 	}
 	cc.mkp = cc.dopts.copts.KeepaliveParams
 
-	if cc.dopts.copts.Dialer == nil {
-		cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
-			network, addr := parseDialTarget(addr)
-			return (&net.Dialer{}).DialContext(ctx, network, addr)
-		}
-		if cc.dopts.withProxy {
-			cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer)
-		}
-	}
-
 	if cc.dopts.copts.UserAgent != "" {
 		cc.dopts.copts.UserAgent += " " + grpcUA
 	} else {
@@ -217,7 +221,14 @@
 	defer func() {
 		select {
 		case <-ctx.Done():
-			conn, err = nil, ctx.Err()
+			switch {
+			case ctx.Err() == err:
+				conn = nil
+			case err == nil || !cc.dopts.returnLastError:
+				conn, err = nil, ctx.Err()
+			default:
+				conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
+			}
 		default:
 		}
 	}()
@@ -229,6 +240,7 @@
 		case sc, ok := <-cc.dopts.scChan:
 			if ok {
 				cc.sc = &sc
+				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
 				scSet = true
 			}
 		default:
@@ -239,34 +251,15 @@
 	}
 
 	// Determine the resolver to use.
-	cc.parsedTarget = grpcutil.ParseTarget(cc.target)
-	channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
-	resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
-	if resolverBuilder == nil {
-		// If resolver builder is still nil, the parsed target's scheme is
-		// not registered. Fallback to default resolver and set Endpoint to
-		// the original target.
-		channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
-		cc.parsedTarget = resolver.Target{
-			Scheme:   resolver.GetDefaultScheme(),
-			Endpoint: target,
-		}
-		resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
-		if resolverBuilder == nil {
-			return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
-		}
+	resolverBuilder, err := cc.parseTargetAndFindResolver()
+	if err != nil {
+		return nil, err
 	}
-
-	creds := cc.dopts.copts.TransportCredentials
-	if creds != nil && creds.Info().ServerName != "" {
-		cc.authority = creds.Info().ServerName
-	} else if cc.dopts.insecure && cc.dopts.authority != "" {
-		cc.authority = cc.dopts.authority
-	} else {
-		// Use endpoint from "scheme://authority/endpoint" as the default
-		// authority for ClientConn.
-		cc.authority = cc.parsedTarget.Endpoint
+	cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
+	if err != nil {
+		return nil, err
 	}
+	channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
 
 	if cc.dopts.scChan != nil && !scSet {
 		// Blocking wait for the initial service config.
@@ -274,6 +267,7 @@
 		case sc, ok := <-cc.dopts.scChan:
 			if ok {
 				cc.sc = &sc
+				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
 			}
 		case <-ctx.Done():
 			return nil, ctx.Err()
@@ -291,6 +285,8 @@
 		DialCreds:        credsClone,
 		CredsBundle:      cc.dopts.copts.CredsBundle,
 		Dialer:           cc.dopts.copts.Dialer,
+		Authority:        cc.authority,
+		CustomUserAgent:  cc.dopts.copts.UserAgent,
 		ChannelzParentID: cc.channelzID,
 		Target:           cc.parsedTarget,
 	}
@@ -307,11 +303,12 @@
 	// A blocking dial blocks until the clientConn is ready.
 	if cc.dopts.block {
 		for {
+			cc.Connect()
 			s := cc.GetState()
 			if s == connectivity.Ready {
 				break
 			} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
-				if err = cc.blockingpicker.connectionError(); err != nil {
+				if err = cc.connectionError(); err != nil {
 					terr, ok := err.(interface {
 						Temporary() bool
 					})
@@ -322,6 +319,9 @@
 			}
 			if !cc.WaitForStateChange(ctx, s) {
 				// ctx got timeout or canceled.
+				if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+					return nil, err
+				}
 				return nil, ctx.Err()
 			}
 		}
@@ -414,7 +414,7 @@
 		return
 	}
 	csm.state = state
-	channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state)
+	channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
 	if csm.notifyChan != nil {
 		// There are other goroutines waiting on this channel.
 		close(csm.notifyChan)
@@ -476,6 +476,8 @@
 	balancerBuildOpts balancer.BuildOptions
 	blockingpicker    *pickerWrapper
 
+	safeConfigSelector iresolver.SafeConfigSelector
+
 	mu              sync.RWMutex
 	resolverWrapper *ccResolverWrapper
 	sc              *ServiceConfig
@@ -490,11 +492,18 @@
 
 	channelzID int64 // channelz unique identification number
 	czData     *channelzData
+
+	lceMu               sync.Mutex // protects lastConnectionError
+	lastConnectionError error
 }
 
 // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
 // ctx expires. A true value is returned in former case and false in latter.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
 	ch := cc.csMgr.getNotifyChan()
 	if cc.csMgr.getState() != sourceState {
@@ -509,11 +518,34 @@
 }
 
 // GetState returns the connectivity.State of ClientConn.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
 func (cc *ClientConn) GetState() connectivity.State {
 	return cc.csMgr.getState()
 }
 
+// Connect causes all subchannels in the ClientConn to attempt to connect if
+// the channel is idle.  Does not wait for the connection attempts to begin
+// before returning.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) Connect() {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() {
+		return
+	}
+	for ac := range cc.conns {
+		go ac.connect()
+	}
+}
+
 func (cc *ClientConn) scWatcher() {
 	for {
 		select {
@@ -525,6 +557,7 @@
 			// TODO: load balance policy runtime change is ignored.
 			// We may revisit this decision in the future.
 			cc.sc = &sc
+			cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
 			cc.mu.Unlock()
 		case <-cc.ctx.Done():
 			return
@@ -563,13 +596,13 @@
 
 func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
 	if cc.sc != nil {
-		cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+		cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs)
 		return
 	}
 	if cc.dopts.defaultServiceConfig != nil {
-		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs)
 	} else {
-		cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs)
 	}
 }
 
@@ -600,13 +633,24 @@
 	}
 
 	var ret error
-	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
+	if cc.dopts.disableServiceConfig {
+		channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+		cc.maybeApplyDefaultServiceConfig(s.Addresses)
+	} else if s.ServiceConfig == nil {
 		cc.maybeApplyDefaultServiceConfig(s.Addresses)
 		// TODO: do we need to apply a failing LB policy if there is no
 		// default, per the error handling design?
 	} else {
 		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
-			cc.applyServiceConfigAndBalancer(sc, s.Addresses)
+			configSelector := iresolver.GetConfigSelector(s)
+			if configSelector != nil {
+				if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
+					channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector")
+				}
+			} else {
+				configSelector = &defaultConfigSelector{sc}
+			}
+			cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
 		} else {
 			ret = balancer.ErrBadResolverState
 			if cc.balancerWrapper == nil {
@@ -616,6 +660,7 @@
 				} else {
 					err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
 				}
+				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
 				cc.blockingpicker.updatePicker(base.NewErrPicker(err))
 				cc.csMgr.updateState(connectivity.TransientFailure)
 				cc.mu.Unlock()
@@ -664,22 +709,27 @@
 		return
 	}
 
-	channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name)
+	channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
 	if cc.dopts.balancerBuilder != nil {
-		channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
+		channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
 		return
 	}
 	if cc.balancerWrapper != nil {
+		// Don't hold cc.mu while closing the balancers. The balancers may call
+		// methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex
+		// would cause a deadlock in that case.
+		cc.mu.Unlock()
 		cc.balancerWrapper.close()
+		cc.mu.Lock()
 	}
 
 	builder := balancer.Get(name)
 	if builder == nil {
-		channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
-		channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
+		channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
+		channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
 		builder = newPickfirstBuilder()
 	} else {
-		channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name)
+		channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
 	}
 
 	cc.curBalancerName = builder.Name()
@@ -720,12 +770,12 @@
 	}
 	if channelz.IsOn() {
 		ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
-		channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
+		channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
 			Desc:     "Subchannel Created",
-			Severity: channelz.CtINFO,
+			Severity: channelz.CtInfo,
 			Parent: &channelz.TraceEventDesc{
 				Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
-				Severity: channelz.CtINFO,
+				Severity: channelz.CtInfo,
 			},
 		})
 	}
@@ -759,7 +809,11 @@
 }
 
 // Target returns the target string of the ClientConn.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func (cc *ClientConn) Target() string {
 	return cc.target
 }
@@ -795,8 +849,7 @@
 	ac.updateConnectivityState(connectivity.Connecting, nil)
 	ac.mu.Unlock()
 
-	// Start a goroutine connecting to the server asynchronously.
-	go ac.resetTransport()
+	ac.resetTransport()
 	return nil
 }
 
@@ -818,7 +871,7 @@
 func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
 	ac.mu.Lock()
 	defer ac.mu.Unlock()
-	channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+	channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
 	if ac.state == connectivity.Shutdown ||
 		ac.state == connectivity.TransientFailure ||
 		ac.state == connectivity.Idle {
@@ -833,12 +886,13 @@
 	// ac.state is Ready, try to find the connected address.
 	var curAddrFound bool
 	for _, a := range addrs {
+		a.ServerName = ac.cc.getServerName(a)
 		if reflect.DeepEqual(ac.curAddr, a) {
 			curAddrFound = true
 			break
 		}
 	}
-	channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
+	channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
 	if curAddrFound {
 		ac.addrs = addrs
 	}
@@ -846,26 +900,53 @@
 	return curAddrFound
 }
 
+// getServerName determines the serverName to be used in the connection
+// handshake. The default value for the serverName is the authority on the
+// ClientConn, which either comes from the user's dial target or through an
+// authority override specified using the WithAuthority dial option. Name
+// resolvers can specify a per-address override for the serverName through the
+// resolver.Address.ServerName field which is used only if the WithAuthority
+// dial option was not used. The rationale is that per-address authority
+// overrides specified by the name resolver can represent a security risk, while
+// an override specified by the user is more dependable since they probably know
+// what they are doing.
+func (cc *ClientConn) getServerName(addr resolver.Address) string {
+	if cc.dopts.authority != "" {
+		return cc.dopts.authority
+	}
+	if addr.ServerName != "" {
+		return addr.ServerName
+	}
+	return cc.authority
+}
+
+func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
+	if sc == nil {
+		return MethodConfig{}
+	}
+	if m, ok := sc.Methods[method]; ok {
+		return m
+	}
+	i := strings.LastIndex(method, "/")
+	if m, ok := sc.Methods[method[:i+1]]; ok {
+		return m
+	}
+	return sc.Methods[""]
+}
+
 // GetMethodConfig gets the method config of the input method.
 // If there's an exact match for input method (i.e. /service/method), we return
 // the corresponding MethodConfig.
-// If there isn't an exact match for the input method, we look for the default config
-// under the service (i.e /service/). If there is a default MethodConfig for
-// the service, we return it.
+// If there isn't an exact match for the input method, we look for the service's default
+// config under the service (i.e /service/) and then for the default for all services (empty string).
+//
+// If there is a default MethodConfig for the service, we return it.
 // Otherwise, we return an empty MethodConfig.
 func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
 	// TODO: Avoid the locking here.
 	cc.mu.RLock()
 	defer cc.mu.RUnlock()
-	if cc.sc == nil {
-		return MethodConfig{}
-	}
-	m, ok := cc.sc.Methods[method]
-	if !ok {
-		i := strings.LastIndex(method, "/")
-		m = cc.sc.Methods[method[:i+1]]
-	}
-	return m
+	return getMethodConfig(cc.sc, method)
 }
 
 func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
@@ -888,12 +969,15 @@
 	return t, done, nil
 }
 
-func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
 	if sc == nil {
 		// should never reach here.
 		return
 	}
 	cc.sc = sc
+	if configSelector != nil {
+		cc.safeConfigSelector.UpdateConfigSelector(configSelector)
+	}
 
 	if cc.sc.retryThrottling != nil {
 		newThrottler := &retryThrottler{
@@ -957,7 +1041,10 @@
 // However, if a previously unavailable network becomes available, this may be
 // used to trigger an immediate reconnect.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func (cc *ClientConn) ResetConnectBackoff() {
 	cc.mu.Lock()
 	conns := cc.conns
@@ -988,12 +1075,12 @@
 
 	cc.blockingpicker.close()
 
-	if rWrapper != nil {
-		rWrapper.close()
-	}
 	if bWrapper != nil {
 		bWrapper.close()
 	}
+	if rWrapper != nil {
+		rWrapper.close()
+	}
 
 	for ac := range conns {
 		ac.tearDown(ErrClientConnClosing)
@@ -1001,15 +1088,15 @@
 	if channelz.IsOn() {
 		ted := &channelz.TraceEventDesc{
 			Desc:     "Channel Deleted",
-			Severity: channelz.CtINFO,
+			Severity: channelz.CtInfo,
 		}
 		if cc.dopts.channelzParentID != 0 {
 			ted.Parent = &channelz.TraceEventDesc{
 				Desc:     fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
-				Severity: channelz.CtINFO,
+				Severity: channelz.CtInfo,
 			}
 		}
-		channelz.AddTraceEvent(cc.channelzID, 0, ted)
+		channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
 		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
 		// the entity being deleted, and thus prevent it from being deleted right away.
 		channelz.RemoveEntry(cc.channelzID)
@@ -1053,7 +1140,7 @@
 		return
 	}
 	ac.state = s
-	channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s)
+	channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
 	ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
 }
 
@@ -1072,112 +1159,86 @@
 }
 
 func (ac *addrConn) resetTransport() {
-	for i := 0; ; i++ {
-		if i > 0 {
-			ac.cc.resolveNow(resolver.ResolveNowOptions{})
-		}
-
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			return
-		}
-
-		addrs := ac.addrs
-		backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
-		// This will be the duration that dial gets to finish.
-		dialDuration := minConnectTimeout
-		if ac.dopts.minConnectTimeout != nil {
-			dialDuration = ac.dopts.minConnectTimeout()
-		}
-
-		if dialDuration < backoffFor {
-			// Give dial more time as we keep failing to connect.
-			dialDuration = backoffFor
-		}
-		// We can potentially spend all the time trying the first address, and
-		// if the server accepts the connection and then hangs, the following
-		// addresses will never be tried.
-		//
-		// The spec doesn't mention what should be done for multiple addresses.
-		// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
-		connectDeadline := time.Now().Add(dialDuration)
-
-		ac.updateConnectivityState(connectivity.Connecting, nil)
-		ac.transport = nil
+	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
 		ac.mu.Unlock()
-
-		newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
-		if err != nil {
-			// After exhausting all addresses, the addrConn enters
-			// TRANSIENT_FAILURE.
-			ac.mu.Lock()
-			if ac.state == connectivity.Shutdown {
-				ac.mu.Unlock()
-				return
-			}
-			ac.updateConnectivityState(connectivity.TransientFailure, err)
-
-			// Backoff.
-			b := ac.resetBackoff
-			ac.mu.Unlock()
-
-			timer := time.NewTimer(backoffFor)
-			select {
-			case <-timer.C:
-				ac.mu.Lock()
-				ac.backoffIdx++
-				ac.mu.Unlock()
-			case <-b:
-				timer.Stop()
-			case <-ac.ctx.Done():
-				timer.Stop()
-				return
-			}
-			continue
-		}
-
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			newTr.Close()
-			return
-		}
-		ac.curAddr = addr
-		ac.transport = newTr
-		ac.backoffIdx = 0
-
-		hctx, hcancel := context.WithCancel(ac.ctx)
-		ac.startHealthCheck(hctx)
-		ac.mu.Unlock()
-
-		// Block until the created transport is down. And when this happens,
-		// we restart from the top of the addr list.
-		<-reconnect.Done()
-		hcancel()
-		// restart connecting - the top of the loop will set state to
-		// CONNECTING.  This is against the current connectivity semantics doc,
-		// however it allows for graceful behavior for RPCs not yet dispatched
-		// - unfortunate timing would otherwise lead to the RPC failing even
-		// though the TRANSIENT_FAILURE state (called for by the doc) would be
-		// instantaneous.
-		//
-		// Ideally we should transition to Idle here and block until there is
-		// RPC activity that leads to the balancer requesting a reconnect of
-		// the associated SubConn.
+		return
 	}
+
+	addrs := ac.addrs
+	backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+	// This will be the duration that dial gets to finish.
+	dialDuration := minConnectTimeout
+	if ac.dopts.minConnectTimeout != nil {
+		dialDuration = ac.dopts.minConnectTimeout()
+	}
+
+	if dialDuration < backoffFor {
+		// Give dial more time as we keep failing to connect.
+		dialDuration = backoffFor
+	}
+	// We can potentially spend all the time trying the first address, and
+	// if the server accepts the connection and then hangs, the following
+	// addresses will never be tried.
+	//
+	// The spec doesn't mention what should be done for multiple addresses.
+	// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+	connectDeadline := time.Now().Add(dialDuration)
+
+	ac.updateConnectivityState(connectivity.Connecting, nil)
+	ac.mu.Unlock()
+
+	if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		// After exhausting all addresses, the addrConn enters
+		// TRANSIENT_FAILURE.
+		ac.mu.Lock()
+		if ac.state == connectivity.Shutdown {
+			ac.mu.Unlock()
+			return
+		}
+		ac.updateConnectivityState(connectivity.TransientFailure, err)
+
+		// Backoff.
+		b := ac.resetBackoff
+		ac.mu.Unlock()
+
+		timer := time.NewTimer(backoffFor)
+		select {
+		case <-timer.C:
+			ac.mu.Lock()
+			ac.backoffIdx++
+			ac.mu.Unlock()
+		case <-b:
+			timer.Stop()
+		case <-ac.ctx.Done():
+			timer.Stop()
+			return
+		}
+
+		ac.mu.Lock()
+		if ac.state != connectivity.Shutdown {
+			ac.updateConnectivityState(connectivity.Idle, err)
+		}
+		ac.mu.Unlock()
+		return
+	}
+	// Success; reset backoff.
+	ac.mu.Lock()
+	ac.backoffIdx = 0
+	ac.mu.Unlock()
 }
 
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at the
-// first successful one. It returns the transport, the address and a Event in
-// the successful case. The Event fires when the returned transport disconnects.
-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// the first successful one. It returns an error if no address was successfully
+// connected, or updates ac appropriately with the new transport.
+func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
 	var firstConnErr error
 	for _, addr := range addrs {
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
 			ac.mu.Unlock()
-			return nil, resolver.Address{}, nil, errConnClosing
+			return errConnClosing
 		}
 
 		ac.cc.mu.RLock()
@@ -1190,77 +1251,63 @@
 		}
 		ac.mu.Unlock()
 
-		channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
+		channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
 
-		newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
+		err := ac.createTransport(addr, copts, connectDeadline)
 		if err == nil {
-			return newTr, addr, reconnect, nil
+			return nil
 		}
 		if firstConnErr == nil {
 			firstConnErr = err
 		}
-		ac.cc.blockingpicker.updateConnectionError(err)
+		ac.cc.updateConnectionError(err)
 	}
 
 	// Couldn't connect to any address.
-	return nil, resolver.Address{}, nil, firstConnErr
+	return firstConnErr
 }
 
-// createTransport creates a connection to addr. It returns the transport and a
-// Event in the successful case. The Event fires when the returned transport
-// disconnects.
-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) {
-	prefaceReceived := make(chan struct{})
-	onCloseCalled := make(chan struct{})
-	reconnect := grpcsync.NewEvent()
+// createTransport creates a connection to addr. It returns an error if the
+// address was not successfully connected, or updates ac appropriately with the
+// new transport.
+func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+	// TODO: Delete prefaceReceived and move the logic to wait for it into the
+	// transport.
+	prefaceReceived := grpcsync.NewEvent()
+	connClosed := grpcsync.NewEvent()
 
-	authority := ac.cc.authority
-	// addr.ServerName takes precedent over ClientConn authority, if present.
-	if addr.ServerName != "" {
-		authority = addr.ServerName
-	}
-
-	target := transport.TargetInfo{
-		Addr:      addr.Addr,
-		Metadata:  addr.Metadata,
-		Authority: authority,
-	}
-
-	once := sync.Once{}
-	onGoAway := func(r transport.GoAwayReason) {
-		ac.mu.Lock()
-		ac.adjustParams(r)
-		once.Do(func() {
-			if ac.state == connectivity.Ready {
-				// Prevent this SubConn from being used for new RPCs by setting its
-				// state to Connecting.
-				//
-				// TODO: this should be Idle when grpc-go properly supports it.
-				ac.updateConnectivityState(connectivity.Connecting, nil)
-			}
-		})
-		ac.mu.Unlock()
-		reconnect.Fire()
-	}
+	addr.ServerName = ac.cc.getServerName(addr)
+	hctx, hcancel := context.WithCancel(ac.ctx)
+	hcStarted := false // protected by ac.mu
 
 	onClose := func() {
 		ac.mu.Lock()
-		once.Do(func() {
-			if ac.state == connectivity.Ready {
-				// Prevent this SubConn from being used for new RPCs by setting its
-				// state to Connecting.
-				//
-				// TODO: this should be Idle when grpc-go properly supports it.
-				ac.updateConnectivityState(connectivity.Connecting, nil)
-			}
-		})
-		ac.mu.Unlock()
-		close(onCloseCalled)
-		reconnect.Fire()
+		defer ac.mu.Unlock()
+		defer connClosed.Fire()
+		if !hcStarted || hctx.Err() != nil {
+			// We didn't start the health check or set the state to READY, so
+			// no need to do anything else here.
+			//
+			// OR, we have already cancelled the health check context, meaning
+			// we have already called onClose once for this transport.  In this
+			// case it would be dangerous to clear the transport and update the
+			// state, since there may be a new transport in this addrConn.
+			return
+		}
+		hcancel()
+		ac.transport = nil
+		// Refresh the name resolver
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		if ac.state != connectivity.Shutdown {
+			ac.updateConnectivityState(connectivity.Idle, nil)
+		}
 	}
 
-	onPrefaceReceipt := func() {
-		close(prefaceReceived)
+	onGoAway := func(r transport.GoAwayReason) {
+		ac.mu.Lock()
+		ac.adjustParams(r)
+		ac.mu.Unlock()
+		onClose()
 	}
 
 	connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
@@ -1269,27 +1316,67 @@
 		copts.ChannelzParentID = ac.channelzID
 	}
 
-	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
+	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
 	if err != nil {
 		// newTr is either nil, or closed.
-		channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
-		return nil, nil, err
+		channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err)
+		return err
 	}
 
 	select {
-	case <-time.After(time.Until(connectDeadline)):
+	case <-connectCtx.Done():
 		// We didn't get the preface in time.
-		newTr.Close()
-		channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
-		return nil, nil, errors.New("timed out waiting for server handshake")
-	case <-prefaceReceived:
+		// The error we pass to Close() is immaterial since there are no open
+		// streams at this point, so no trailers with error details will be sent
+		// out. We just need to pass a non-nil error.
+		newTr.Close(transport.ErrConnClosing)
+		if connectCtx.Err() == context.DeadlineExceeded {
+			err := errors.New("failed to receive server preface within timeout")
+			channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err)
+			return err
+		}
+		return nil
+	case <-prefaceReceived.Done():
 		// We got the preface - huzzah! things are good.
-	case <-onCloseCalled:
-		// The transport has already closed - noop.
-		return nil, nil, errors.New("connection closed")
-		// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
+		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		if connClosed.HasFired() {
+			// onClose called first; go idle but do nothing else.
+			if ac.state != connectivity.Shutdown {
+				ac.updateConnectivityState(connectivity.Idle, nil)
+			}
+			return nil
+		}
+		if ac.state == connectivity.Shutdown {
+			// This can happen if the subConn was removed while in `Connecting`
+			// state. tearDown() would have set the state to `Shutdown`, but
+			// would not have closed the transport since ac.transport would not
+			// been set at that point.
+			//
+			// We run this in a goroutine because newTr.Close() calls onClose()
+			// inline, which requires locking ac.mu.
+			//
+			// The error we pass to Close() is immaterial since there are no open
+			// streams at this point, so no trailers with error details will be sent
+			// out. We just need to pass a non-nil error.
+			go newTr.Close(transport.ErrConnClosing)
+			return nil
+		}
+		ac.curAddr = addr
+		ac.transport = newTr
+		hcStarted = true
+		ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
+		return nil
+	case <-connClosed.Done():
+		// The transport has already closed.  If we received the preface, too,
+		// this is not an error.
+		select {
+		case <-prefaceReceived.Done():
+			return nil
+		default:
+			return errors.New("connection closed before server preface received")
+		}
 	}
-	return newTr, reconnect, nil
 }
 
 // startHealthCheck starts the health checking stream (RPC) to watch the health
@@ -1297,7 +1384,7 @@
 //
 // LB channel health checking is enabled when all requirements below are met:
 // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
-// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
+// 2. internal.HealthCheckFunc is set by importing the grpc/health package
 // 3. a service config with non-empty healthCheckConfig field is provided
 // 4. the load balancer requests it
 //
@@ -1327,7 +1414,7 @@
 		// The health package is not imported to set health check function.
 		//
 		// TODO: add a link to the health check doc in the error message.
-		channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.")
+		channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
 		return
 	}
 
@@ -1357,9 +1444,9 @@
 		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
 		if err != nil {
 			if status.Code(err) == codes.Unimplemented {
-				channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
+				channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
 			} else {
-				channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
+				channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
 			}
 		}
 	}()
@@ -1373,33 +1460,20 @@
 	ac.mu.Unlock()
 }
 
-// getReadyTransport returns the transport if ac's state is READY.
-// Otherwise it returns nil, false.
-// If ac's state is IDLE, it will trigger ac to connect.
-func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
+// getReadyTransport returns the transport if ac's state is READY or nil if not.
+func (ac *addrConn) getReadyTransport() transport.ClientTransport {
 	ac.mu.Lock()
-	if ac.state == connectivity.Ready && ac.transport != nil {
-		t := ac.transport
-		ac.mu.Unlock()
-		return t, true
+	defer ac.mu.Unlock()
+	if ac.state == connectivity.Ready {
+		return ac.transport
 	}
-	var idle bool
-	if ac.state == connectivity.Idle {
-		idle = true
-	}
-	ac.mu.Unlock()
-	// Trigger idle ac to connect.
-	if idle {
-		ac.connect()
-	}
-	return nil, false
+	return nil
 }
 
 // tearDown starts to tear down the addrConn.
-// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
-// some edge cases (e.g., the caller opens and closes many addrConn's in a
-// tight loop.
-// tearDown doesn't remove ac from ac.cc.conns.
+//
+// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
+// will leak. In most cases, call cc.removeAddrConn() instead.
 func (ac *addrConn) tearDown(err error) {
 	ac.mu.Lock()
 	if ac.state == connectivity.Shutdown {
@@ -1424,12 +1498,12 @@
 		ac.mu.Lock()
 	}
 	if channelz.IsOn() {
-		channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
+		channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
 			Desc:     "Subchannel Deleted",
-			Severity: channelz.CtINFO,
+			Severity: channelz.CtInfo,
 			Parent: &channelz.TraceEventDesc{
 				Desc:     fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
-				Severity: channelz.CtINFO,
+				Severity: channelz.CtInfo,
 			},
 		})
 		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
@@ -1532,3 +1606,126 @@
 	}
 	return resolver.Get(scheme)
 }
+
+func (cc *ClientConn) updateConnectionError(err error) {
+	cc.lceMu.Lock()
+	cc.lastConnectionError = err
+	cc.lceMu.Unlock()
+}
+
+func (cc *ClientConn) connectionError() error {
+	cc.lceMu.Lock()
+	defer cc.lceMu.Unlock()
+	return cc.lastConnectionError
+}
+
+func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
+	channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
+
+	var rb resolver.Builder
+	parsedTarget, err := parseTarget(cc.target)
+	if err != nil {
+		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
+	} else {
+		channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+		rb = cc.getResolver(parsedTarget.Scheme)
+		if rb != nil {
+			cc.parsedTarget = parsedTarget
+			return rb, nil
+		}
+	}
+
+	// We are here because the user's dial target did not contain a scheme or
+	// specified an unregistered scheme. We should fallback to the default
+	// scheme, except when a custom dialer is specified in which case, we should
+	// always use passthrough scheme.
+	defScheme := resolver.GetDefaultScheme()
+	channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme)
+	canonicalTarget := defScheme + ":///" + cc.target
+
+	parsedTarget, err = parseTarget(canonicalTarget)
+	if err != nil {
+		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
+		return nil, err
+	}
+	channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+	rb = cc.getResolver(parsedTarget.Scheme)
+	if rb == nil {
+		return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme)
+	}
+	cc.parsedTarget = parsedTarget
+	return rb, nil
+}
+
+// parseTarget uses RFC 3986 semantics to parse the given target into a
+// resolver.Target struct containing scheme, authority and endpoint. Query
+// params are stripped from the endpoint.
+func parseTarget(target string) (resolver.Target, error) {
+	u, err := url.Parse(target)
+	if err != nil {
+		return resolver.Target{}, err
+	}
+	// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
+	// value returned from url.Parse() contains a leading "/". Although this is
+	// in accordance with RFC 3986, we do not want to break existing resolver
+	// implementations which expect the endpoint without the leading "/". So, we
+	// end up stripping the leading "/" here. But this will result in an
+	// incorrect parsing for something like "unix:///path/to/socket". Since we
+	// own the "unix" resolver, we can workaround in the unix resolver by using
+	// the `URL` field instead of the `Endpoint` field.
+	endpoint := u.Path
+	if endpoint == "" {
+		endpoint = u.Opaque
+	}
+	endpoint = strings.TrimPrefix(endpoint, "/")
+	return resolver.Target{
+		Scheme:    u.Scheme,
+		Authority: u.Host,
+		Endpoint:  endpoint,
+		URL:       *u,
+	}, nil
+}
+
+// Determine channel authority. The order of precedence is as follows:
+// - user specified authority override using `WithAuthority` dial option
+// - creds' notion of server name for the authentication handshake
+// - endpoint from dial target of the form "scheme://[authority]/endpoint"
+func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
+	// Historically, we had two options for users to specify the serverName or
+	// authority for a channel. One was through the transport credentials
+	// (either in its constructor, or through the OverrideServerName() method).
+	// The other option (for cases where WithInsecure() dial option was used)
+	// was to use the WithAuthority() dial option.
+	//
+	// A few things have changed since:
+	// - `insecure` package with an implementation of the `TransportCredentials`
+	//   interface for the insecure case
+	// - WithAuthority() dial option support for secure credentials
+	authorityFromCreds := ""
+	if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
+		authorityFromCreds = creds.Info().ServerName
+	}
+	authorityFromDialOption := dopts.authority
+	if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
+		return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+	}
+
+	switch {
+	case authorityFromDialOption != "":
+		return authorityFromDialOption, nil
+	case authorityFromCreds != "":
+		return authorityFromCreds, nil
+	case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
+		// TODO: remove when the unix resolver implements optional interface to
+		// return channel authority.
+		return "localhost", nil
+	case strings.HasPrefix(endpoint, ":"):
+		return "localhost" + endpoint, nil
+	default:
+		// TODO: Define an optional interface on the resolver builder to return
+		// the channel authority given the user's dial target. For resolvers
+		// which don't implement this interface, we will use the endpoint from
+		// "scheme://authority/endpoint" as the default authority.
+		return endpoint, nil
+	}
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 0273883..11b1061 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -33,6 +33,9 @@
 	OK Code = 0
 
 	// Canceled indicates the operation was canceled (typically by the caller).
+	//
+	// The gRPC framework will generate this error code when cancellation
+	// is requested.
 	Canceled Code = 1
 
 	// Unknown error. An example of where this error may be returned is
@@ -40,12 +43,17 @@
 	// an error-space that is not known in this address space. Also
 	// errors raised by APIs that do not return enough error information
 	// may be converted to this error.
+	//
+	// The gRPC framework will generate this error code in the above two
+	// mentioned cases.
 	Unknown Code = 2
 
 	// InvalidArgument indicates client specified an invalid argument.
 	// Note that this differs from FailedPrecondition. It indicates arguments
 	// that are problematic regardless of the state of the system
 	// (e.g., a malformed file name).
+	//
+	// This error code will not be generated by the gRPC framework.
 	InvalidArgument Code = 3
 
 	// DeadlineExceeded means operation expired before completion.
@@ -53,14 +61,21 @@
 	// returned even if the operation has completed successfully. For
 	// example, a successful response from a server could have been delayed
 	// long enough for the deadline to expire.
+	//
+	// The gRPC framework will generate this error code when the deadline is
+	// exceeded.
 	DeadlineExceeded Code = 4
 
 	// NotFound means some requested entity (e.g., file or directory) was
 	// not found.
+	//
+	// This error code will not be generated by the gRPC framework.
 	NotFound Code = 5
 
 	// AlreadyExists means an attempt to create an entity failed because one
 	// already exists.
+	//
+	// This error code will not be generated by the gRPC framework.
 	AlreadyExists Code = 6
 
 	// PermissionDenied indicates the caller does not have permission to
@@ -69,10 +84,17 @@
 	// instead for those errors). It must not be
 	// used if the caller cannot be identified (use Unauthenticated
 	// instead for those errors).
+	//
+	// This error code will not be generated by the gRPC core framework,
+	// but expect authentication middleware to use it.
 	PermissionDenied Code = 7
 
 	// ResourceExhausted indicates some resource has been exhausted, perhaps
 	// a per-user quota, or perhaps the entire file system is out of space.
+	//
+	// This error code will be generated by the gRPC framework in
+	// out-of-memory and server overload situations, or when a message is
+	// larger than the configured maximum size.
 	ResourceExhausted Code = 8
 
 	// FailedPrecondition indicates operation was rejected because the
@@ -94,6 +116,8 @@
 	//      REST Get/Update/Delete on a resource and the resource on the
 	//      server does not match the condition. E.g., conflicting
 	//      read-modify-write on the same resource.
+	//
+	// This error code will not be generated by the gRPC framework.
 	FailedPrecondition Code = 9
 
 	// Aborted indicates the operation was aborted, typically due to a
@@ -102,6 +126,8 @@
 	//
 	// See litmus test above for deciding between FailedPrecondition,
 	// Aborted, and Unavailable.
+	//
+	// This error code will not be generated by the gRPC framework.
 	Aborted Code = 10
 
 	// OutOfRange means operation was attempted past the valid range.
@@ -119,15 +145,26 @@
 	// error) when it applies so that callers who are iterating through
 	// a space can easily look for an OutOfRange error to detect when
 	// they are done.
+	//
+	// This error code will not be generated by the gRPC framework.
 	OutOfRange Code = 11
 
 	// Unimplemented indicates operation is not implemented or not
 	// supported/enabled in this service.
+	//
+	// This error code will be generated by the gRPC framework. Most
+	// commonly, you will see this error code when a method implementation
+	// is missing on the server. It can also be generated for unknown
+	// compression algorithms or a disagreement as to whether an RPC should
+	// be streaming.
 	Unimplemented Code = 12
 
 	// Internal errors. Means some invariants expected by underlying
 	// system has been broken. If you see one of these errors,
 	// something is very broken.
+	//
+	// This error code will be generated by the gRPC framework in several
+	// internal error conditions.
 	Internal Code = 13
 
 	// Unavailable indicates the service is currently unavailable.
@@ -137,13 +174,22 @@
 	//
 	// See litmus test above for deciding between FailedPrecondition,
 	// Aborted, and Unavailable.
+	//
+	// This error code will be generated by the gRPC framework during
+	// abrupt shutdown of a server process or network connection.
 	Unavailable Code = 14
 
 	// DataLoss indicates unrecoverable data loss or corruption.
+	//
+	// This error code will not be generated by the gRPC framework.
 	DataLoss Code = 15
 
 	// Unauthenticated indicates the request does not have valid
 	// authentication credentials for the operation.
+	//
+	// The gRPC framework will generate this error code when the
+	// authentication metadata is invalid or a Credentials callback fails,
+	// but also expect authentication middleware to generate it.
 	Unauthenticated Code = 16
 
 	_maxCode = 17
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
index 34ec36f..4a89926 100644
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -18,15 +18,14 @@
 
 // Package connectivity defines connectivity semantics.
 // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
-// All APIs in this package are experimental.
 package connectivity
 
 import (
-	"context"
-
 	"google.golang.org/grpc/grpclog"
 )
 
+var logger = grpclog.Component("core")
+
 // State indicates the state of connectivity.
 // It can be the state of a ClientConn or SubConn.
 type State int
@@ -44,8 +43,8 @@
 	case Shutdown:
 		return "SHUTDOWN"
 	default:
-		grpclog.Errorf("unknown connectivity state: %d", s)
-		return "Invalid-State"
+		logger.Errorf("unknown connectivity state: %d", s)
+		return "INVALID_STATE"
 	}
 }
 
@@ -62,12 +61,34 @@
 	Shutdown
 )
 
-// Reporter reports the connectivity states.
-type Reporter interface {
-	// CurrentState returns the current state of the reporter.
-	CurrentState() State
-	// WaitForStateChange blocks until the reporter's state is different from the given state,
-	// and returns true.
-	// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
-	WaitForStateChange(context.Context, State) bool
+// ServingMode indicates the current mode of operation of the server.
+//
+// Only xDS enabled gRPC servers currently report their serving mode.
+type ServingMode int
+
+const (
+	// ServingModeStarting indicates that the server is starting up.
+	ServingModeStarting ServingMode = iota
+	// ServingModeServing indicates that the server contains all required
+	// configuration and is serving RPCs.
+	ServingModeServing
+	// ServingModeNotServing indicates that the server is not accepting new
+	// connections. Existing connections will be closed gracefully, allowing
+	// in-progress RPCs to complete. A server enters this mode when it does not
+	// contain the required configuration to serve RPCs.
+	ServingModeNotServing
+)
+
+func (s ServingMode) String() string {
+	switch s {
+	case ServingModeStarting:
+		return "STARTING"
+	case ServingModeServing:
+		return "SERVING"
+	case ServingModeNotServing:
+		return "NOT_SERVING"
+	default:
+		logger.Errorf("unknown serving mode: %d", s)
+		return "INVALID_MODE"
+	}
 }
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index e438fda..96ff187 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -29,7 +29,8 @@
 	"net"
 
 	"github.com/golang/protobuf/proto"
-	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/attributes"
+	icredentials "google.golang.org/grpc/internal/credentials"
 )
 
 // PerRPCCredentials defines the common interface for the credentials which need to
@@ -57,9 +58,11 @@
 type SecurityLevel int
 
 const (
-	// NoSecurity indicates a connection is insecure.
+	// InvalidSecurityLevel indicates an invalid security level.
 	// The zero SecurityLevel value is invalid for backward compatibility.
-	NoSecurity SecurityLevel = iota + 1
+	InvalidSecurityLevel SecurityLevel = iota
+	// NoSecurity indicates a connection is insecure.
+	NoSecurity
 	// IntegrityOnly indicates a connection only provides integrity protection.
 	IntegrityOnly
 	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
@@ -89,7 +92,7 @@
 }
 
 // GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
-func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
+func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
 	return c
 }
 
@@ -124,15 +127,23 @@
 // TransportCredentials defines the common interface for all the live gRPC wire
 // protocols and supported transport security protocols (e.g., TLS, SSL).
 type TransportCredentials interface {
-	// ClientHandshake does the authentication handshake specified by the corresponding
-	// authentication protocol on rawConn for clients. It returns the authenticated
-	// connection and the corresponding auth information about the connection.
-	// The auth information should embed CommonAuthInfo to return additional information about
-	// the credentials. Implementations must use the provided context to implement timely cancellation.
-	// gRPC will try to reconnect if the error returned is a temporary error
-	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
-	// If the returned error is a wrapper error, implementations should make sure that
+	// ClientHandshake does the authentication handshake specified by the
+	// corresponding authentication protocol on rawConn for clients. It returns
+	// the authenticated connection and the corresponding auth information
+	// about the connection.  The auth information should embed CommonAuthInfo
+	// to return additional information about the credentials. Implementations
+	// must use the provided context to implement timely cancellation.  gRPC
+	// will try to reconnect if the error returned is a temporary error
+	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).  If the
+	// returned error is a wrapper error, implementations should make sure that
 	// the error implements Temporary() to have the correct retry behaviors.
+	// Additionally, ClientHandshakeInfo data will be available via the context
+	// passed to this call.
+	//
+	// The second argument to this method is the `:authority` header value used
+	// while creating new streams on this connection after authentication
+	// succeeds. Implementations must use this as the server name during the
+	// authentication handshake.
 	//
 	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
 	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
@@ -147,9 +158,13 @@
 	Info() ProtocolInfo
 	// Clone makes a copy of this TransportCredentials.
 	Clone() TransportCredentials
-	// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
-	// gRPC internals also use it to override the virtual hosting name if it is set.
-	// It must be called before dialing. Currently, this is only used by grpclb.
+	// OverrideServerName specifies the value used for the following:
+	// - verifying the hostname on the returned certificates
+	// - as SNI in the client's handshake to support virtual hosting
+	// - as the value for `:authority` header at stream creation time
+	//
+	// Deprecated: use grpc.WithAuthority instead. Will be supported
+	// throughout 1.x.
 	OverrideServerName(string) error
 }
 
@@ -163,8 +178,18 @@
 //
 // This API is experimental.
 type Bundle interface {
+	// TransportCredentials returns the transport credentials from the Bundle.
+	//
+	// Implementations must return non-nil transport credentials. If transport
+	// security is not needed by the Bundle, implementations may choose to
+	// return insecure.NewCredentials().
 	TransportCredentials() TransportCredentials
+
+	// PerRPCCredentials returns the per-RPC credentials from the Bundle.
+	//
+	// May be nil if per-RPC credentials are not needed.
 	PerRPCCredentials() PerRPCCredentials
+
 	// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
 	// existing Bundle may cause races.
 	//
@@ -182,15 +207,33 @@
 	AuthInfo AuthInfo
 }
 
-// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
-type requestInfoKey struct{}
-
 // RequestInfoFromContext extracts the RequestInfo from the context if it exists.
 //
 // This API is experimental.
 func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
-	ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
-	return
+	ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
+	return ri, ok
+}
+
+// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
+// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
+// balancer etc. Individual credential implementations control the actual
+// format of the data that they are willing to receive.
+//
+// This API is experimental.
+type ClientHandshakeInfo struct {
+	// Attributes contains the attributes for the address. It could be provided
+	// by the gRPC, resolver, balancer etc.
+	Attributes *attributes.Attributes
+}
+
+// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
+// in ctx.
+//
+// This API is experimental.
+func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
+	chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
+	return chi
 }
 
 // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
@@ -198,17 +241,16 @@
 // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
 //
 // This API is experimental.
-func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
+func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
 	type internalInfo interface {
-		GetCommonAuthInfo() *CommonAuthInfo
+		GetCommonAuthInfo() CommonAuthInfo
 	}
-	ri, _ := RequestInfoFromContext(ctx)
-	if ri.AuthInfo == nil {
-		return errors.New("unable to obtain SecurityLevel from context")
+	if ai == nil {
+		return errors.New("AuthInfo is nil")
 	}
-	if ci, ok := ri.AuthInfo.(internalInfo); ok {
+	if ci, ok := ai.(internalInfo); ok {
 		// CommonAuthInfo.SecurityLevel has an invalid value.
-		if ci.GetCommonAuthInfo().SecurityLevel == 0 {
+		if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
 			return nil
 		}
 		if ci.GetCommonAuthInfo().SecurityLevel < level {
@@ -219,12 +261,6 @@
 	return nil
 }
 
-func init() {
-	internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
-		return context.WithValue(ctx, requestInfoKey{}, ri)
-	}
-}
-
 // ChannelzSecurityInfo defines the interface that security protocols should implement
 // in order to provide security info to channelz.
 //
diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go
deleted file mode 100644
index ccbf35b..0000000
--- a/vendor/google.golang.org/grpc/credentials/go12.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build go1.12
-
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import "crypto/tls"
-
-// This init function adds cipher suite constants only defined in Go 1.12.
-func init() {
-	cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
-	cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
-	cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
-}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
new file mode 100644
index 0000000..4fbed12
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package insecure provides an implementation of the
+// credentials.TransportCredentials interface which disables transport security.
+package insecure
+
+import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// NewCredentials returns a credentials which disables transport security.
+//
+// Note that using this credentials with per-RPC credentials which require
+// transport security is incompatible and will cause grpc.Dial() to fail.
+func NewCredentials() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+// insecureTC implements the insecure transport credentials. The handshake
+// methods simply return the passed in net.Conn and set the security level to
+// NoSecurity.
+type insecureTC struct{}
+
+func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) Info() credentials.ProtocolInfo {
+	return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
+}
+
+func (insecureTC) Clone() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+func (insecureTC) OverrideServerName(string) error {
+	return nil
+}
+
+// info contains the auth information for an insecure connection.
+// It implements the AuthInfo interface.
+type info struct {
+	credentials.CommonAuthInfo
+}
+
+// AuthType returns the type of info as a string.
+func (info) AuthType() string {
+	return "insecure"
+}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 86e956b..784822d 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -25,8 +25,9 @@
 	"fmt"
 	"io/ioutil"
 	"net"
+	"net/url"
 
-	"google.golang.org/grpc/credentials/internal"
+	credinternal "google.golang.org/grpc/internal/credentials"
 )
 
 // TLSInfo contains the auth information for a TLS authenticated connection.
@@ -34,6 +35,8 @@
 type TLSInfo struct {
 	State tls.ConnectionState
 	CommonAuthInfo
+	// This API is experimental.
+	SPIFFEID *url.URL
 }
 
 // AuthType returns the type of TLSInfo as a string.
@@ -69,7 +72,7 @@
 
 func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
 	// use local cfg to avoid clobbering ServerName if using multiple endpoints
-	cfg := cloneTLSConfig(c.config)
+	cfg := credinternal.CloneTLSConfig(c.config)
 	if cfg.ServerName == "" {
 		serverName, _, err := net.SplitHostPort(authority)
 		if err != nil {
@@ -94,7 +97,17 @@
 		conn.Close()
 		return nil, nil, ctx.Err()
 	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+	tlsInfo := TLSInfo{
+		State: conn.ConnectionState(),
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
 }
 
 func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@@ -103,7 +116,17 @@
 		conn.Close()
 		return nil, nil, err
 	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+	tlsInfo := TLSInfo{
+		State: conn.ConnectionState(),
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
 }
 
 func (c *tlsCreds) Clone() TransportCredentials {
@@ -115,23 +138,10 @@
 	return nil
 }
 
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
-	for _, p := range ps {
-		if p == alpnProtoStrH2 {
-			return ps
-		}
-	}
-	ret := make([]string, 0, len(ps)+1)
-	ret = append(ret, ps...)
-	return append(ret, alpnProtoStrH2)
-}
-
 // NewTLS uses c to construct a TransportCredentials based on TLS.
 func NewTLS(c *tls.Config) TransportCredentials {
-	tc := &tlsCreds{cloneTLSConfig(c)}
-	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
+	tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
+	tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
 	return tc
 }
 
@@ -185,7 +195,10 @@
 // TLSChannelzSecurityValue defines the struct that TLS protocol should return
 // from GetSecurityValue(), containing security info like cipher and certificate used.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type TLSChannelzSecurityValue struct {
 	ChannelzSecurityValue
 	StandardName      string
@@ -217,19 +230,7 @@
 	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
 	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
 	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-
-	return cfg.Clone()
+	tls.TLS_AES_128_GCM_SHA256:                  "TLS_AES_128_GCM_SHA256",
+	tls.TLS_AES_256_GCM_SHA384:                  "TLS_AES_256_GCM_SHA384",
+	tls.TLS_CHACHA20_POLY1305_SHA256:            "TLS_CHACHA20_POLY1305_SHA256",
 }
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 35bde10..c4bf09f 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -27,10 +27,9 @@
 	"google.golang.org/grpc/backoff"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/internal"
 	internalbackoff "google.golang.org/grpc/internal/backoff"
-	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
@@ -46,18 +45,17 @@
 	chainUnaryInts  []UnaryClientInterceptor
 	chainStreamInts []StreamClientInterceptor
 
-	cp          Compressor
-	dc          Decompressor
-	bs          internalbackoff.Strategy
-	block       bool
-	insecure    bool
-	timeout     time.Duration
-	scChan      <-chan ServiceConfig
-	authority   string
-	copts       transport.ConnectOptions
-	callOptions []CallOption
-	// This is used by v1 balancer dial option WithBalancer to support v1
-	// balancer, and also by WithBalancerName dial option.
+	cp              Compressor
+	dc              Decompressor
+	bs              internalbackoff.Strategy
+	block           bool
+	returnLastError bool
+	timeout         time.Duration
+	scChan          <-chan ServiceConfig
+	authority       string
+	copts           transport.ConnectOptions
+	callOptions     []CallOption
+	// This is used by WithBalancerName dial option.
 	balancerBuilder             balancer.Builder
 	channelzParentID            int64
 	disableServiceConfig        bool
@@ -67,12 +65,7 @@
 	minConnectTimeout           func() time.Duration
 	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
 	defaultServiceConfigRawJSON *string
-	// This is used by ccResolverWrapper to backoff between successive calls to
-	// resolver.ResolveNow(). The user will have no need to configure this, but
-	// we need to be able to configure this in tests.
-	resolveNowBackoff func(int) time.Duration
-	resolvers         []resolver.Builder
-	withProxy         bool
+	resolvers                   []resolver.Builder
 }
 
 // DialOption configures how we set up the connection.
@@ -83,7 +76,10 @@
 // EmptyDialOption does not alter the dial configuration. It can be embedded in
 // another structure to build custom dial options.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type EmptyDialOption struct{}
 
 func (EmptyDialOption) apply(*dialOptions) {}
@@ -199,19 +195,6 @@
 	})
 }
 
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName.  Will be removed in a future 1.x release.
-func WithBalancer(b Balancer) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.balancerBuilder = &balancerWrapperBuilder{
-			b: b,
-		}
-	})
-}
-
 // WithBalancerName sets the balancer that the ClientConn will be initialized
 // with. Balancer registered with balancerName will be used. This function
 // panics if no balancer was registered by balancerName.
@@ -244,15 +227,14 @@
 	})
 }
 
-// WithConnectParams configures the dialer to use the provided ConnectParams.
+// WithConnectParams configures the ClientConn to use the provided ConnectParams
+// for creating and maintaining connections to servers.
 //
 // The backoff configuration specified as part of the ConnectParams overrides
 // all defaults specified in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
 // using the backoff.DefaultConfig as a base, in cases where you want to
 // override only a subset of the backoff configuration.
-//
-// This API is EXPERIMENTAL.
 func WithConnectParams(p ConnectParams) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.bs = internalbackoff.Exponential{Config: p.Backoff}
@@ -290,7 +272,7 @@
 	})
 }
 
-// WithBlock returns a DialOption which makes caller of Dial blocks until the
+// WithBlock returns a DialOption which makes callers of Dial block until the
 // underlying connection is up. Without this, Dial returns immediately and
 // connecting the server happens in background.
 func WithBlock() DialOption {
@@ -299,22 +281,47 @@
 	})
 }
 
+// WithReturnConnectionError returns a DialOption which makes the client connection
+// return a string containing both the last connection error that occurred and
+// the context.DeadlineExceeded error.
+// Implies WithBlock()
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithReturnConnectionError() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+		o.returnLastError = true
+	})
+}
+
 // WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Note that transport security is required unless WithInsecure is
-// set.
+// ClientConn. Under the hood, it uses insecure.NewCredentials().
+//
+// Note that using this DialOption with per-RPC credentials (through
+// WithCredentialsBundle or WithPerRPCCredentials) which require transport
+// security is incompatible and will cause grpc.Dial() to fail.
+//
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
+// Will be supported throughout 1.x.
 func WithInsecure() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.insecure = true
+		o.copts.TransportCredentials = insecure.NewCredentials()
 	})
 }
 
 // WithNoProxy returns a DialOption which disables the use of proxies for this
 // ClientConn. This is ignored if WithDialer or WithContextDialer are used.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithNoProxy() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.withProxy = false
+		o.copts.UseProxy = false
 	})
 }
 
@@ -339,7 +346,10 @@
 // the ClientConn.WithCreds. This should not be used together with
 // WithTransportCredentials.
 //
-// This API is experimental.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithCredentialsBundle(b credentials.Bundle) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.CredsBundle = b
@@ -404,7 +414,10 @@
 // FailOnNonTempDialError only affects the initial dial, and does not do
 // anything useful unless you are also using WithBlock().
 //
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func FailOnNonTempDialError(f bool) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.FailOnNonTempDialError = f
@@ -423,7 +436,7 @@
 // for the client transport.
 func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
 	if kp.Time < internal.KeepaliveMinPingTime {
-		grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+		logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
 		kp.Time = internal.KeepaliveMinPingTime
 	}
 	return newFuncDialOption(func(o *dialOptions) {
@@ -459,7 +472,7 @@
 }
 
 // WithChainStreamInterceptor returns a DialOption that specifies the chained
-// interceptor for unary RPCs. The first interceptor will be the outer most,
+// interceptor for streaming RPCs. The first interceptor will be the outer most,
 // while the last interceptor will be the inner most wrapper around the real call.
 // All interceptors added by this method will be chained, and the interceptor
 // defined by WithStreamInterceptor will always be prepended to the chain.
@@ -470,8 +483,7 @@
 }
 
 // WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header. This value only works with WithInsecure and has no
-// effect if TransportCredentials are present.
+// :authority pseudo-header and as the server name in authentication handshake.
 func WithAuthority(a string) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.authority = a
@@ -482,7 +494,10 @@
 // current ClientConn's parent. This function is used in nested channel creation
 // (e.g. grpclb dial).
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithChannelzParentID(id int64) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.channelzParentID = id
@@ -504,11 +519,16 @@
 // WithDefaultServiceConfig returns a DialOption that configures the default
 // service config, which will be used in cases where:
 //
-// 1. WithDisableServiceConfig is also used.
-// 2. Resolver does not return a service config or if the resolver returns an
-//    invalid service config.
+// 1. WithDisableServiceConfig is also used, or
 //
-// This API is EXPERIMENTAL.
+// 2. The name resolver does not provide a service config or provides an
+// invalid service config.
+//
+// The parameter s is the JSON representation of the default service config.
+// For more information about service configs, see:
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// For a simple example of usage, see:
+// examples/features/load_balancing/client/main.go
 func WithDefaultServiceConfig(s string) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.defaultServiceConfigRawJSON = &s
@@ -520,11 +540,8 @@
 // will happen automatically if no data is written to the wire or if the RPC is
 // unprocessed by the remote server.
 //
-// Retry support is currently disabled by default, but will be enabled by
-// default in the future.  Until then, it may be enabled by setting the
-// environment variable "GRPC_GO_RETRY" to "on".
-//
-// This API is EXPERIMENTAL.
+// Retry support is currently enabled by default, but may be disabled by
+// setting the environment variable "GRPC_GO_RETRY" to "off".
 func WithDisableRetry() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.disableRetry = true
@@ -542,7 +559,10 @@
 // WithDisableHealthCheck disables the LB channel health checking for all
 // SubConns of this ClientConn.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithDisableHealthCheck() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.disableHealthCheck = true
@@ -561,14 +581,12 @@
 
 func defaultDialOptions() dialOptions {
 	return dialOptions{
-		disableRetry:    !envconfig.Retry,
 		healthCheckFunc: internal.HealthCheckFunc,
 		copts: transport.ConnectOptions{
 			WriteBufferSize: defaultWriteBufSize,
 			ReadBufferSize:  defaultReadBufSize,
+			UseProxy:        true,
 		},
-		resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
-		withProxy:         true,
 	}
 }
 
@@ -583,22 +601,15 @@
 	})
 }
 
-// withResolveNowBackoff specifies the function that clientconn uses to backoff
-// between successive calls to resolver.ResolveNow().
-//
-// For testing purpose only.
-func withResolveNowBackoff(f func(int) time.Duration) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.resolveNowBackoff = f
-	})
-}
-
 // WithResolvers allows a list of resolver implementations to be registered
 // locally with the ClientConn without needing to be globally registered via
 // resolver.Register.  They will be matched against the scheme used for the
 // current Dial only, and will take precedence over the global registry.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithResolvers(rs ...resolver.Builder) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.resolvers = append(o.resolvers, rs...)
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 187adbb..0022859 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,6 +16,8 @@
  *
  */
 
+//go:generate ./regenerate.sh
+
 /*
 Package grpc implements an RPC system called gRPC.
 
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 195e844..6d84f74 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -19,7 +19,10 @@
 // Package encoding defines the interface for the compressor and codec, and
 // functions to register and retrieve compressors and codecs.
 //
-// This package is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package encoding
 
 import (
@@ -46,10 +49,15 @@
 	// coding header.  The result must be static; the result cannot change
 	// between calls.
 	Name() string
-	// EXPERIMENTAL: if a Compressor implements
+	// If a Compressor implements
 	// DecompressedSize(compressedBytes []byte) int, gRPC will call it
 	// to determine the size of the buffer allocated for the result of decompression.
 	// Return -1 to indicate unknown size.
+	//
+	// Experimental
+	//
+	// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+	// later release.
 }
 
 var registeredCompressor = make(map[string]Compressor)
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66b97a6..3009b35 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -21,8 +21,7 @@
 package proto
 
 import (
-	"math"
-	"sync"
+	"fmt"
 
 	"github.com/golang/protobuf/proto"
 	"google.golang.org/grpc/encoding"
@@ -38,73 +37,22 @@
 // codec is a Codec implementation with protobuf. It is the default codec for gRPC.
 type codec struct{}
 
-type cachedProtoBuffer struct {
-	lastMarshaledSize uint32
-	proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
-	if val > math.MaxInt32 {
-		return uint32(math.MaxInt32)
-	}
-	return uint32(val)
-}
-
-func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
-	protoMsg := v.(proto.Message)
-	newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
-	cb.SetBuf(newSlice)
-	cb.Reset()
-	if err := cb.Marshal(protoMsg); err != nil {
-		return nil, err
-	}
-	out := cb.Bytes()
-	cb.lastMarshaledSize = capToMaxInt32(len(out))
-	return out, nil
-}
-
 func (codec) Marshal(v interface{}) ([]byte, error) {
-	if pm, ok := v.(proto.Marshaler); ok {
-		// object can marshal itself, no need for buffer
-		return pm.Marshal()
+	vv, ok := v.(proto.Message)
+	if !ok {
+		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
 	}
-
-	cb := protoBufferPool.Get().(*cachedProtoBuffer)
-	out, err := marshal(v, cb)
-
-	// put back buffer and lose the ref to the slice
-	cb.SetBuf(nil)
-	protoBufferPool.Put(cb)
-	return out, err
+	return proto.Marshal(vv)
 }
 
 func (codec) Unmarshal(data []byte, v interface{}) error {
-	protoMsg := v.(proto.Message)
-	protoMsg.Reset()
-
-	if pu, ok := protoMsg.(proto.Unmarshaler); ok {
-		// object can unmarshal itself, no need for buffer
-		return pu.Unmarshal(data)
+	vv, ok := v.(proto.Message)
+	if !ok {
+		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
 	}
-
-	cb := protoBufferPool.Get().(*cachedProtoBuffer)
-	cb.SetBuf(data)
-	err := cb.Unmarshal(protoMsg)
-	cb.SetBuf(nil)
-	protoBufferPool.Put(cb)
-	return err
+	return proto.Unmarshal(data, vv)
 }
 
 func (codec) Name() string {
 	return Name
 }
-
-var protoBufferPool = &sync.Pool{
-	New: func() interface{} {
-		return &cachedProtoBuffer{
-			Buffer:            proto.Buffer{},
-			lastMarshaledSize: 16,
-		}
-	},
-}
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
index ecef1ab..fcffdce 100644
--- a/vendor/google.golang.org/grpc/go.mod
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -1,16 +1,19 @@
 module google.golang.org/grpc
 
-go 1.11
+go 1.14
 
 require (
-	github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f
-	github.com/envoyproxy/go-control-plane v0.9.4
+	github.com/cespare/xxhash/v2 v2.1.1
+	github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4
+	github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1
+	github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021
 	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
-	github.com/golang/mock v1.1.1
-	github.com/golang/protobuf v1.3.3
-	github.com/google/go-cmp v0.2.0
-	golang.org/x/net v0.0.0-20190311183353-d8887717615a
-	golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
-	golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
-	google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
+	github.com/golang/protobuf v1.4.3
+	github.com/google/go-cmp v0.5.0
+	github.com/google/uuid v1.1.2
+	golang.org/x/net v0.0.0-20200822124328-c89045814202
+	golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
+	golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
+	google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
+	google.golang.org/protobuf v1.25.0
 )
diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum
index 0bf9f07..8b542e0 100644
--- a/vendor/google.golang.org/grpc/go.sum
+++ b/vendor/google.golang.org/grpc/go.sum
@@ -1,64 +1,129 @@
-cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
 github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
 github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
 github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
 google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
new file mode 100644
index 0000000..8358dd6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -0,0 +1,117 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/grpclog"
+)
+
+// componentData records the settings for a component.
+type componentData struct {
+	name string
+}
+
+var cache = map[string]*componentData{}
+
+func (c *componentData) InfoDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.InfoDepth(depth+1, args...)
+}
+
+func (c *componentData) WarningDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.WarningDepth(depth+1, args...)
+}
+
+func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.ErrorDepth(depth+1, args...)
+}
+
+func (c *componentData) FatalDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.FatalDepth(depth+1, args...)
+}
+
+func (c *componentData) Info(args ...interface{}) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warning(args ...interface{}) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Error(args ...interface{}) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatal(args ...interface{}) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) Infof(format string, args ...interface{}) {
+	c.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Warningf(format string, args ...interface{}) {
+	c.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Errorf(format string, args ...interface{}) {
+	c.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Fatalf(format string, args ...interface{}) {
+	c.FatalDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Infoln(args ...interface{}) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warningln(args ...interface{}) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Errorln(args ...interface{}) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatalln(args ...interface{}) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) V(l int) bool {
+	return V(l)
+}
+
+// Component creates a new component and returns it for logging. If a component
+// with the name already exists, nothing will be created and it will be
+// returned. SetLoggerV2 will panic if it is called with a logger created by
+// Component.
+func Component(componentName string) DepthLoggerV2 {
+	if cData, ok := cache[componentName]; ok {
+		return cData
+	}
+	c := &componentData{componentName}
+	cache[componentName] = c
+	return c
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index 23612b7..7c1f664 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,11 +19,14 @@
 package grpclog
 
 import (
+	"encoding/json"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"log"
 	"os"
 	"strconv"
+	"strings"
 
 	"google.golang.org/grpc/internal/grpclog"
 )
@@ -67,6 +70,9 @@
 // SetLoggerV2 sets logger that is used in grpc to a V2 logger.
 // Not mutex-protected, should be called before any gRPC functions.
 func SetLoggerV2(l LoggerV2) {
+	if _, ok := l.(*componentData); ok {
+		panic("cannot use component logger as grpclog logger")
+	}
 	grpclog.Logger = l
 	grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
 }
@@ -92,8 +98,9 @@
 
 // loggerT is the default logger used by grpclog.
 type loggerT struct {
-	m []*log.Logger
-	v int
+	m          []*log.Logger
+	v          int
+	jsonFormat bool
 }
 
 // NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -102,19 +109,32 @@
 // Warning logs will be written to warningW and infoW.
 // Info logs will be written to infoW.
 func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
-	return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
+	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
 }
 
 // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
 // verbosity level.
 func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
+	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
+}
+
+type loggerV2Config struct {
+	verbose    int
+	jsonFormat bool
+}
+
+func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
 	var m []*log.Logger
-	m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
-	m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
+	flag := log.LstdFlags
+	if c.jsonFormat {
+		flag = 0
+	}
+	m = append(m, log.New(infoW, "", flag))
+	m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
 	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
-	m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
-	m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
-	return &loggerT{m: m, v: v}
+	m = append(m, log.New(ew, "", flag))
+	m = append(m, log.New(ew, "", flag))
+	return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
 }
 
 // newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -139,58 +159,79 @@
 	if vl, err := strconv.Atoi(vLevel); err == nil {
 		v = vl
 	}
-	return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
+
+	jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
+
+	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
+		verbose:    v,
+		jsonFormat: jsonFormat,
+	})
+}
+
+func (g *loggerT) output(severity int, s string) {
+	sevStr := severityName[severity]
+	if !g.jsonFormat {
+		g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+		return
+	}
+	// TODO: we can also include the logging component, but that needs more
+	// (API) changes.
+	b, _ := json.Marshal(map[string]string{
+		"severity": sevStr,
+		"message":  s,
+	})
+	g.m[severity].Output(2, string(b))
 }
 
 func (g *loggerT) Info(args ...interface{}) {
-	g.m[infoLog].Print(args...)
+	g.output(infoLog, fmt.Sprint(args...))
 }
 
 func (g *loggerT) Infoln(args ...interface{}) {
-	g.m[infoLog].Println(args...)
+	g.output(infoLog, fmt.Sprintln(args...))
 }
 
 func (g *loggerT) Infof(format string, args ...interface{}) {
-	g.m[infoLog].Printf(format, args...)
+	g.output(infoLog, fmt.Sprintf(format, args...))
 }
 
 func (g *loggerT) Warning(args ...interface{}) {
-	g.m[warningLog].Print(args...)
+	g.output(warningLog, fmt.Sprint(args...))
 }
 
 func (g *loggerT) Warningln(args ...interface{}) {
-	g.m[warningLog].Println(args...)
+	g.output(warningLog, fmt.Sprintln(args...))
 }
 
 func (g *loggerT) Warningf(format string, args ...interface{}) {
-	g.m[warningLog].Printf(format, args...)
+	g.output(warningLog, fmt.Sprintf(format, args...))
 }
 
 func (g *loggerT) Error(args ...interface{}) {
-	g.m[errorLog].Print(args...)
+	g.output(errorLog, fmt.Sprint(args...))
 }
 
 func (g *loggerT) Errorln(args ...interface{}) {
-	g.m[errorLog].Println(args...)
+	g.output(errorLog, fmt.Sprintln(args...))
 }
 
 func (g *loggerT) Errorf(format string, args ...interface{}) {
-	g.m[errorLog].Printf(format, args...)
+	g.output(errorLog, fmt.Sprintf(format, args...))
 }
 
 func (g *loggerT) Fatal(args ...interface{}) {
-	g.m[fatalLog].Fatal(args...)
-	// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+	g.output(fatalLog, fmt.Sprint(args...))
+	os.Exit(1)
 }
 
 func (g *loggerT) Fatalln(args ...interface{}) {
-	g.m[fatalLog].Fatalln(args...)
-	// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+	g.output(fatalLog, fmt.Sprintln(args...))
+	os.Exit(1)
 }
 
 func (g *loggerT) Fatalf(format string, args ...interface{}) {
-	g.m[fatalLog].Fatalf(format, args...)
-	// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+	g.output(fatalLog, fmt.Sprintf(format, args...))
+	os.Exit(1)
 }
 
 func (g *loggerT) V(l int) bool {
@@ -201,14 +242,18 @@
 // DepthLoggerV2, the below functions will be called with the appropriate stack
 // depth set for trivial functions the logger may ignore.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type DepthLoggerV2 interface {
-	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	LoggerV2
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	InfoDepth(depth int, args ...interface{})
-	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	WarningDepth(depth int, args ...interface{})
-	// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	ErrorDepth(depth int, args ...interface{})
-	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	FatalDepth(depth int, args ...interface{})
 }
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
deleted file mode 100644
index 7c7bcad..0000000
--- a/vendor/google.golang.org/grpc/install_gae.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-TMP=$(mktemp -d /tmp/sdk.XXX) \
-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
-&& unzip -q $TMP.zip -d $TMP \
-&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 8b73500..668e0ad 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -25,17 +25,41 @@
 // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
 type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
 
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
-// and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
+// Unary interceptors can be specified as a DialOption, using
+// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
+// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
+// delegates all unary RPC invocations to the interceptor, and it is the
+// responsibility of the interceptor to call invoker to complete the processing
+// of the RPC.
+//
+// method is the RPC name. req and reply are the corresponding request and
+// response messages. cc is the ClientConn on which the RPC was invoked. invoker
+// is the handler to complete the RPC and it is the responsibility of the
+// interceptor to call it. opts contain all applicable call options, including
+// defaults from the ClientConn as well as per-call options.
+//
+// The returned error must be compatible with the status package.
 type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
 
 // Streamer is called by StreamClientInterceptor to create a ClientStream.
 type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
 
-// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
-// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
+// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
+// interceptors can be specified as a DialOption, using WithStreamInterceptor()
+// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
+// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
+// to the interceptor, and it is the responsibility of the interceptor to call
+// streamer.
+//
+// desc contains a description of the stream. cc is the ClientConn on which the
+// RPC was invoked. streamer is the handler to create a ClientStream and it is
+// the responsibility of the interceptor to call it. opts contain all applicable
+// call options, including defaults from the ClientConn as well as per-call
+// options.
+//
+// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
+// operations. The returned error must be compatible with the status package.
 type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
 
 // UnaryServerInfo consists of various information about a unary RPC on
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 8b10516..5cc3aed 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -25,6 +25,7 @@
 	"os"
 
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
 )
 
 // Logger is the global binary logger. It can be used to get binary logger for
@@ -39,6 +40,8 @@
 // It is used to get a methodLogger for each individual method.
 var binLogger Logger
 
+var grpclogLogger = grpclog.Component("binarylog")
+
 // SetLogger sets the binarg logger.
 //
 // Only call this at init time.
@@ -146,9 +149,9 @@
 // Each methodLogger returned by this method is a new instance. This is to
 // generate sequence id within the call.
 func (l *logger) getMethodLogger(methodName string) *MethodLogger {
-	s, m, err := parseMethodName(methodName)
+	s, m, err := grpcutil.ParseMethod(methodName)
 	if err != nil {
-		grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+		grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
 		return nil
 	}
 	if ml, ok := l.methods[s+"/"+m]; ok {
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index be30d0e..d8f4e76 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -24,8 +24,6 @@
 	"regexp"
 	"strconv"
 	"strings"
-
-	"google.golang.org/grpc/grpclog"
 )
 
 // NewLoggerFromConfigString reads the string and build a logger. It can be used
@@ -52,7 +50,7 @@
 	methods := strings.Split(s, ",")
 	for _, method := range methods {
 		if err := l.fillMethodLoggerWithConfigString(method); err != nil {
-			grpclog.Warningf("failed to parse binary log config: %v", err)
+			grpclogLogger.Warningf("failed to parse binary log config: %v", err)
 			return nil
 		}
 	}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 160f6e8..0cdb418 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -27,7 +27,6 @@
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 	pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/status"
 )
@@ -66,7 +65,7 @@
 		callID:          idGen.next(),
 		idWithinCallGen: &callIDGenerator{},
 
-		sink: defaultSink, // TODO(blog): make it plugable.
+		sink: DefaultSink, // TODO(blog): make it plugable.
 	}
 }
 
@@ -219,12 +218,12 @@
 	if m, ok := c.Message.(proto.Message); ok {
 		data, err = proto.Marshal(m)
 		if err != nil {
-			grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
 		}
 	} else if b, ok := c.Message.([]byte); ok {
 		data = b
 	} else {
-		grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
 	}
 	ret := &pb.GrpcLogEntry{
 		Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
@@ -259,12 +258,12 @@
 	if m, ok := c.Message.(proto.Message); ok {
 		data, err = proto.Marshal(m)
 		if err != nil {
-			grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
 		}
 	} else if b, ok := c.Message.([]byte); ok {
 		data = b
 	} else {
-		grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
 	}
 	ret := &pb.GrpcLogEntry{
 		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
@@ -315,7 +314,7 @@
 func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
 	st, ok := status.FromError(c.Err)
 	if !ok {
-		grpclog.Info("binarylogging: error in trailer is not a status error")
+		grpclogLogger.Info("binarylogging: error in trailer is not a status error")
 	}
 	var (
 		detailsBytes []byte
@@ -325,7 +324,7 @@
 	if stProto != nil && len(stProto.Details) != 0 {
 		detailsBytes, err = proto.Marshal(stProto)
 		if err != nil {
-			grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
+			grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
 		}
 	}
 	ret := &pb.GrpcLogEntry{
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
deleted file mode 100644
index 113d40c..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
-  rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/binarylog/grpc_binarylog_v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
-popd
-rm -f ./grpc_binarylog_v1/*.pb.go
-cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
-
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index a2e7c34..c2fdd58 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -21,32 +21,23 @@
 import (
 	"bufio"
 	"encoding/binary"
-	"fmt"
 	"io"
-	"io/ioutil"
 	"sync"
 	"time"
 
 	"github.com/golang/protobuf/proto"
 	pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
-	"google.golang.org/grpc/grpclog"
 )
 
 var (
-	defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+	// DefaultSink is the sink where the logs will be written to. It's exported
+	// for the binarylog package to update.
+	DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
 )
 
-// SetDefaultSink sets the sink where binary logs will be written to.
-//
-// Not thread safe. Only set during initialization.
-func SetDefaultSink(s Sink) {
-	if defaultSink != nil {
-		defaultSink.Close()
-	}
-	defaultSink = s
-}
-
 // Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
 type Sink interface {
 	// Write will be called to write the log entry into the sink.
 	//
@@ -67,7 +58,7 @@
 // message is prefixed with a 4 byte big endian unsigned integer as the length.
 //
 // No buffer is done, Close() doesn't try to close the writer.
-func newWriterSink(w io.Writer) *writerSink {
+func newWriterSink(w io.Writer) Sink {
 	return &writerSink{out: w}
 }
 
@@ -78,7 +69,8 @@
 func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
 	b, err := proto.Marshal(e)
 	if err != nil {
-		grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
+		grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+		return err
 	}
 	hdr := make([]byte, 4)
 	binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@@ -93,25 +85,28 @@
 
 func (ws *writerSink) Close() error { return nil }
 
-type bufWriteCloserSink struct {
-	mu     sync.Mutex
-	closer io.Closer
-	out    *writerSink   // out is built on buf.
-	buf    *bufio.Writer // buf is kept for flush.
+type bufferedSink struct {
+	mu             sync.Mutex
+	closer         io.Closer
+	out            Sink          // out is built on buf.
+	buf            *bufio.Writer // buf is kept for flush.
+	flusherStarted bool
 
-	writeStartOnce sync.Once
-	writeTicker    *time.Ticker
+	writeTicker *time.Ticker
+	done        chan struct{}
 }
 
-func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
-	// Start the write loop when Write is called.
-	fs.writeStartOnce.Do(fs.startFlushGoroutine)
+func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
 	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if !fs.flusherStarted {
+		// Start the write loop when Write is called.
+		fs.startFlushGoroutine()
+		fs.flusherStarted = true
+	}
 	if err := fs.out.Write(e); err != nil {
-		fs.mu.Unlock()
 		return err
 	}
-	fs.mu.Unlock()
 	return nil
 }
 
@@ -119,44 +114,57 @@
 	bufFlushDuration = 60 * time.Second
 )
 
-func (fs *bufWriteCloserSink) startFlushGoroutine() {
+func (fs *bufferedSink) startFlushGoroutine() {
 	fs.writeTicker = time.NewTicker(bufFlushDuration)
 	go func() {
-		for range fs.writeTicker.C {
+		for {
+			select {
+			case <-fs.done:
+				return
+			case <-fs.writeTicker.C:
+			}
 			fs.mu.Lock()
-			fs.buf.Flush()
+			if err := fs.buf.Flush(); err != nil {
+				grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+			}
 			fs.mu.Unlock()
 		}
 	}()
 }
 
-func (fs *bufWriteCloserSink) Close() error {
+func (fs *bufferedSink) Close() error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
 	if fs.writeTicker != nil {
 		fs.writeTicker.Stop()
 	}
-	fs.mu.Lock()
-	fs.buf.Flush()
-	fs.closer.Close()
-	fs.out.Close()
-	fs.mu.Unlock()
+	close(fs.done)
+	if err := fs.buf.Flush(); err != nil {
+		grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+	}
+	if err := fs.closer.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+	}
+	if err := fs.out.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the Sink: %v", err)
+	}
 	return nil
 }
 
-func newBufWriteCloserSink(o io.WriteCloser) Sink {
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
 	bufW := bufio.NewWriter(o)
-	return &bufWriteCloserSink{
+	return &bufferedSink{
 		closer: o,
 		out:    newWriterSink(bufW),
 		buf:    bufW,
+		done:   make(chan struct{}),
 	}
 }
-
-// NewTempFileSink creates a temp file and returns a Sink that writes to this
-// file.
-func NewTempFileSink() (Sink, error) {
-	tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
-	if err != nil {
-		return nil, fmt.Errorf("failed to create temp file: %v", err)
-	}
-	return newBufWriteCloserSink(tempFile), nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
deleted file mode 100644
index 15dc780..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
-	"errors"
-	"strings"
-)
-
-// parseMethodName splits service and method from the input. It expects format
-// "/service/method".
-//
-// TODO: move to internal/grpcutil.
-func parseMethodName(methodName string) (service, method string, _ error) {
-	if !strings.HasPrefix(methodName, "/") {
-		return "", "", errors.New("invalid method name: should start with /")
-	}
-	methodName = methodName[1:]
-
-	pos := strings.LastIndex(methodName, "/")
-	if pos < 0 {
-		return "", "", errors.New("invalid method name: suffix /method is missing")
-	}
-	return methodName[:pos], methodName[pos+1:], nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index e4252e5..cd18075 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -30,7 +30,7 @@
 	"sync/atomic"
 	"time"
 
-	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/grpclog"
 )
 
 const (
@@ -204,9 +204,9 @@
 		trace:       &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
 	}
 	if pid == 0 {
-		db.get().addChannel(id, cn, true, pid, ref)
+		db.get().addChannel(id, cn, true, pid)
 	} else {
-		db.get().addChannel(id, cn, false, pid, ref)
+		db.get().addChannel(id, cn, false, pid)
 	}
 	return id
 }
@@ -216,7 +216,7 @@
 // by pid). It returns the unique channelz tracking id assigned to this subchannel.
 func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
 	if pid == 0 {
-		grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0")
+		logger.Error("a SubChannel's parent id cannot be 0")
 		return 0
 	}
 	id := idGen.genID()
@@ -228,7 +228,7 @@
 		pid:     pid,
 		trace:   &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
 	}
-	db.get().addSubChannel(id, sc, pid, ref)
+	db.get().addSubChannel(id, sc, pid)
 	return id
 }
 
@@ -253,12 +253,12 @@
 // this listen socket.
 func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
 	if pid == 0 {
-		grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0")
+		logger.Error("a ListenSocket's parent id cannot be 0")
 		return 0
 	}
 	id := idGen.genID()
 	ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
-	db.get().addListenSocket(id, ls, pid, ref)
+	db.get().addListenSocket(id, ls, pid)
 	return id
 }
 
@@ -268,16 +268,16 @@
 // this normal socket.
 func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
 	if pid == 0 {
-		grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0")
+		logger.Error("a NormalSocket's parent id cannot be 0")
 		return 0
 	}
 	id := idGen.genID()
 	ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
-	db.get().addNormalSocket(id, ns, pid, ref)
+	db.get().addNormalSocket(id, ns, pid)
 	return id
 }
 
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
 // channelz database.
 func RemoveEntry(id int64) {
 	db.get().removeEntry(id)
@@ -294,17 +294,15 @@
 }
 
 // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) {
+func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
 	for d := desc; d != nil; d = d.Parent {
 		switch d.Severity {
-		case CtUNKNOWN:
-			grpclog.InfoDepth(depth+1, d.Desc)
-		case CtINFO:
-			grpclog.InfoDepth(depth+1, d.Desc)
+		case CtUnknown, CtInfo:
+			l.InfoDepth(depth+1, d.Desc)
 		case CtWarning:
-			grpclog.WarningDepth(depth+1, d.Desc)
+			l.WarningDepth(depth+1, d.Desc)
 		case CtError:
-			grpclog.ErrorDepth(depth+1, d.Desc)
+			l.ErrorDepth(depth+1, d.Desc)
 		}
 	}
 	if getMaxTraceEntry() == 0 {
@@ -335,7 +333,7 @@
 	c.mu.Unlock()
 }
 
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
+func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
 	c.mu.Lock()
 	cn.cm = c
 	cn.trace.cm = c
@@ -348,7 +346,7 @@
 	c.mu.Unlock()
 }
 
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
+func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
 	c.mu.Lock()
 	sc.cm = c
 	sc.trace.cm = c
@@ -357,7 +355,7 @@
 	c.mu.Unlock()
 }
 
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
+func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
 	c.mu.Lock()
 	ls.cm = c
 	c.listenSockets[id] = ls
@@ -365,7 +363,7 @@
 	c.mu.Unlock()
 }
 
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
+func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
 	c.mu.Lock()
 	ns.cm = c
 	c.normalSockets[id] = ns
@@ -632,7 +630,7 @@
 	if count == 0 {
 		end = true
 	}
-	var s []*SocketMetric
+	s := make([]*SocketMetric, 0, len(sks))
 	for _, ns := range sks {
 		sm := &SocketMetric{}
 		sm.SocketData = ns.s.ChannelzMetric()
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
index 59c7bed..b0013f9 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -21,80 +21,82 @@
 import (
 	"fmt"
 
-	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/grpclog"
 )
 
-// Info logs through grpclog.Info and adds a trace event if channelz is on.
-func Info(id int64, args ...interface{}) {
+var logger = grpclog.Component("channelz")
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
 	if IsOn() {
-		AddTraceEvent(id, 1, &TraceEventDesc{
+		AddTraceEvent(l, id, 1, &TraceEventDesc{
 			Desc:     fmt.Sprint(args...),
-			Severity: CtINFO,
+			Severity: CtInfo,
 		})
 	} else {
-		grpclog.InfoDepth(1, args...)
+		l.InfoDepth(1, args...)
 	}
 }
 
-// Infof logs through grpclog.Infof and adds a trace event if channelz is on.
-func Infof(id int64, format string, args ...interface{}) {
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
 	msg := fmt.Sprintf(format, args...)
 	if IsOn() {
-		AddTraceEvent(id, 1, &TraceEventDesc{
+		AddTraceEvent(l, id, 1, &TraceEventDesc{
 			Desc:     msg,
-			Severity: CtINFO,
+			Severity: CtInfo,
 		})
 	} else {
-		grpclog.InfoDepth(1, msg)
+		l.InfoDepth(1, msg)
 	}
 }
 
-// Warning logs through grpclog.Warning and adds a trace event if channelz is on.
-func Warning(id int64, args ...interface{}) {
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
 	if IsOn() {
-		AddTraceEvent(id, 1, &TraceEventDesc{
+		AddTraceEvent(l, id, 1, &TraceEventDesc{
 			Desc:     fmt.Sprint(args...),
 			Severity: CtWarning,
 		})
 	} else {
-		grpclog.WarningDepth(1, args...)
+		l.WarningDepth(1, args...)
 	}
 }
 
-// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on.
-func Warningf(id int64, format string, args ...interface{}) {
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
 	msg := fmt.Sprintf(format, args...)
 	if IsOn() {
-		AddTraceEvent(id, 1, &TraceEventDesc{
+		AddTraceEvent(l, id, 1, &TraceEventDesc{
 			Desc:     msg,
 			Severity: CtWarning,
 		})
 	} else {
-		grpclog.WarningDepth(1, msg)
+		l.WarningDepth(1, msg)
 	}
 }
 
-// Error logs through grpclog.Error and adds a trace event if channelz is on.
-func Error(id int64, args ...interface{}) {
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
 	if IsOn() {
-		AddTraceEvent(id, 1, &TraceEventDesc{
+		AddTraceEvent(l, id, 1, &TraceEventDesc{
 			Desc:     fmt.Sprint(args...),
 			Severity: CtError,
 		})
 	} else {
-		grpclog.ErrorDepth(1, args...)
+		l.ErrorDepth(1, args...)
 	}
 }
 
-// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on.
-func Errorf(id int64, format string, args ...interface{}) {
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
 	msg := fmt.Sprintf(format, args...)
 	if IsOn() {
-		AddTraceEvent(id, 1, &TraceEventDesc{
+		AddTraceEvent(l, id, 1, &TraceEventDesc{
 			Desc:     msg,
 			Severity: CtError,
 		})
 	} else {
-		grpclog.ErrorDepth(1, msg)
+		l.ErrorDepth(1, msg)
 	}
 }
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
index 17c2274..3c595d1 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -26,7 +26,6 @@
 
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
 )
 
 // entry represents a node in the channelz database.
@@ -60,17 +59,17 @@
 	// the addrConn will create a new transport. And when registering the new transport in
 	// channelz, its parent addrConn could have already been torn down and deleted
 	// from channelz tracking, and thus reach the code here.
-	grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
 }
 
 func (d *dummyEntry) deleteChild(id int64) {
 	// It is possible for a normal program to reach here under race condition.
 	// Refer to the example described in addChild().
-	grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
 }
 
 func (d *dummyEntry) triggerDelete() {
-	grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
 }
 
 func (*dummyEntry) deleteSelfIfReady() {
@@ -215,7 +214,7 @@
 	case *channel:
 		c.nestedChans[id] = v.refName
 	default:
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
 	}
 }
 
@@ -326,7 +325,7 @@
 	if v, ok := e.(*normalSocket); ok {
 		sc.sockets[id] = v.refName
 	} else {
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
 	}
 }
 
@@ -493,11 +492,11 @@
 }
 
 func (ls *listenSocket) addChild(id int64, e entry) {
-	grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
 }
 
 func (ls *listenSocket) deleteChild(id int64) {
-	grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
 }
 
 func (ls *listenSocket) triggerDelete() {
@@ -506,7 +505,7 @@
 }
 
 func (ls *listenSocket) deleteSelfIfReady() {
-	grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
+	logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
 }
 
 func (ls *listenSocket) getParentID() int64 {
@@ -522,11 +521,11 @@
 }
 
 func (ns *normalSocket) addChild(id int64, e entry) {
-	grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
+	logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
 }
 
 func (ns *normalSocket) deleteChild(id int64) {
-	grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
+	logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
 }
 
 func (ns *normalSocket) triggerDelete() {
@@ -535,7 +534,7 @@
 }
 
 func (ns *normalSocket) deleteSelfIfReady() {
-	grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
+	logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
 }
 
 func (ns *normalSocket) getParentID() int64 {
@@ -594,7 +593,7 @@
 	case *listenSocket:
 		s.listenSockets[id] = v.refName
 	default:
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
 	}
 }
 
@@ -673,10 +672,10 @@
 type Severity int
 
 const (
-	// CtUNKNOWN indicates unknown severity of a trace event.
-	CtUNKNOWN Severity = iota
-	// CtINFO indicates info level severity of a trace event.
-	CtINFO
+	// CtUnknown indicates unknown severity of a trace event.
+	CtUnknown Severity = iota
+	// CtInfo indicates info level severity of a trace event.
+	CtInfo
 	// CtWarning indicates warning level severity of a trace event.
 	CtWarning
 	// CtError indicates error level severity of a trace event.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
index 692dd61..1b1c4cc 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
 /*
  *
  * Copyright 2018 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
index 79edbef..8b06eed 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
 
 /*
  *
@@ -22,8 +23,6 @@
 
 import (
 	"sync"
-
-	"google.golang.org/grpc/grpclog"
 )
 
 var once sync.Once
@@ -39,6 +38,6 @@
 // Windows OS doesn't support Socket Option
 func (s *SocketOptionData) Getsockopt(fd uintptr) {
 	once.Do(func() {
-		grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
+		logger.Warning("Channelz: socket options are not supported on non-linux environments")
 	})
 }
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
index fdf409d..8d194e4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -1,5 +1,3 @@
-// +build linux,!appengine
-
 /*
  *
  * Copyright 2018 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
index 8864a08..837ddc4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
 
 /*
  *
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 0000000..32c9b59
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+	"context"
+)
+
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
+type requestInfoKey struct{}
+
+// NewRequestInfoContext creates a context with ri.
+func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
+	return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// RequestInfoFromContext extracts the RequestInfo from ctx.
+func RequestInfoFromContext(ctx context.Context) interface{} {
+	return ctx.Value(requestInfoKey{})
+}
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
+	return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
+	return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 0000000..25ade62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+	if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+		return nil
+	}
+	return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+	if cert == nil || cert.URIs == nil {
+		return nil
+	}
+	var spiffeID *url.URL
+	for _, uri := range cert.URIs {
+		if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+			continue
+		}
+		// From this point, we assume the uri is intended for a SPIFFE ID.
+		if len(uri.String()) > 2048 {
+			logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+			return nil
+		}
+		if len(uri.Host) == 0 || len(uri.Path) == 0 {
+			logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+			return nil
+		}
+		if len(uri.Host) > 255 {
+			logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+			return nil
+		}
+		// A valid SPIFFE certificate can only have exactly one URI SAN field.
+		if len(cert.URIs) > 1 {
+			logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+			return nil
+		}
+		spiffeID = uri
+	}
+	return spiffeID
+}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
similarity index 94%
rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
rename to vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
index 2f4472b..2919632 100644
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
+++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
 /*
  *
  * Copyright 2018 gRPC authors.
@@ -18,8 +16,7 @@
  *
  */
 
-// Package internal contains credentials-internal code.
-package internal
+package credentials
 
 import (
 	"net"
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 0000000..f792fd2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index ae6c897..6f02725 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -26,13 +26,10 @@
 
 const (
 	prefix          = "GRPC_GO_"
-	retryStr        = prefix + "RETRY"
 	txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
 )
 
 var (
-	// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
-	Retry = strings.EqualFold(os.Getenv(retryStr), "on")
 	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
-	TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
+	TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
 )
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 0000000..9bad03c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+	"os"
+	"strings"
+)
+
+const (
+	// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+	// Do not use this and read from env directly. Its value is read and kept in
+	// variable BootstrapFileName.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+	// XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
+	// content. Do not use this and read from env directly. Its value is read
+	// and kept in variable BootstrapFileName.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+
+	ringHashSupportEnv           = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
+	clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
+	aggregateAndDNSSupportEnv    = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
+	rbacSupportEnv               = "GRPC_XDS_EXPERIMENTAL_RBAC"
+	federationEnv                = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
+	rlsInXDSEnv                  = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
+
+	c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
+)
+
+var (
+	// XDSBootstrapFileName holds the name of the file which contains xDS
+	// bootstrap configuration. Users can specify the location of the bootstrap
+	// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+	// XDSBootstrapFileContent holds the content of the xDS bootstrap
+	// configuration. Users can specify the bootstrap config by setting the
+	// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+	// XDSRingHash indicates whether ring hash support is enabled, which can be
+	// disabled by setting the environment variable
+	// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
+	XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
+	// XDSClientSideSecurity is used to control processing of security
+	// configuration on the client-side.
+	//
+	// Note that there is no env var protection for the server-side because we
+	// have a brand new API on the server-side and users explicitly need to use
+	// the new API to get security integration on the server.
+	XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
+	// XDSAggregateAndDNS indicates whether processing of aggregated cluster
+	// and DNS cluster is enabled, which can be enabled by setting the
+	// environment variable
+	// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
+	// "true".
+	XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
+
+	// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
+	// which can be disabled by setting the environment variable
+	// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
+	XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
+
+	// XDSFederation indicates whether federation support is enabled.
+	XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
+
+	// XDSRLS indicates whether processing of Cluster Specifier plugins and
+	// support for the RLS CLuster Specifier is enabled, which can be enabled by
+	// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
+	// "true".
+	XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
+
+	// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+	C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
index 8c8e19f..30a3b42 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
@@ -19,6 +19,10 @@
 // Package grpclog (internal) defines depth logging for grpc.
 package grpclog
 
+import (
+	"os"
+)
+
 // Logger is the logger used for the non-depth log functions.
 var Logger LoggerV2
 
@@ -30,7 +34,7 @@
 	if DepthLogger != nil {
 		DepthLogger.InfoDepth(depth, args...)
 	} else {
-		Logger.Info(args...)
+		Logger.Infoln(args...)
 	}
 }
 
@@ -39,7 +43,7 @@
 	if DepthLogger != nil {
 		DepthLogger.WarningDepth(depth, args...)
 	} else {
-		Logger.Warning(args...)
+		Logger.Warningln(args...)
 	}
 }
 
@@ -48,7 +52,7 @@
 	if DepthLogger != nil {
 		DepthLogger.ErrorDepth(depth, args...)
 	} else {
-		Logger.Error(args...)
+		Logger.Errorln(args...)
 	}
 }
 
@@ -57,8 +61,9 @@
 	if DepthLogger != nil {
 		DepthLogger.FatalDepth(depth, args...)
 	} else {
-		Logger.Fatal(args...)
+		Logger.Fatalln(args...)
 	}
+	os.Exit(1)
 }
 
 // LoggerV2 does underlying logging work for grpclog.
@@ -105,14 +110,17 @@
 // This is a copy of the DepthLoggerV2 defined in the external grpclog package.
 // It is defined here to avoid a circular dependency.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type DepthLoggerV2 interface {
-	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	InfoDepth(depth int, args ...interface{})
-	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	WarningDepth(depth int, args ...interface{})
-	// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	ErrorDepth(depth int, args ...interface{})
-	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
 	FatalDepth(depth int, args ...interface{})
 }
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
index f6e0dc1..82af70e 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
@@ -18,10 +18,15 @@
 
 package grpclog
 
+import (
+	"fmt"
+)
+
 // PrefixLogger does logging with a prefix.
 //
 // Logging method on a nil logs without any prefix.
 type PrefixLogger struct {
+	logger DepthLoggerV2
 	prefix string
 }
 
@@ -30,34 +35,47 @@
 	if pl != nil {
 		// Handle nil, so the tests can pass in a nil logger.
 		format = pl.prefix + format
+		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+		return
 	}
-	Logger.Infof(format, args...)
+	InfoDepth(1, fmt.Sprintf(format, args...))
 }
 
 // Warningf does warning logging.
 func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
 	if pl != nil {
 		format = pl.prefix + format
+		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+		return
 	}
-	Logger.Warningf(format, args...)
+	WarningDepth(1, fmt.Sprintf(format, args...))
 }
 
 // Errorf does error logging.
 func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
 	if pl != nil {
 		format = pl.prefix + format
+		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+		return
 	}
-	Logger.Errorf(format, args...)
+	ErrorDepth(1, fmt.Sprintf(format, args...))
 }
 
 // Debugf does info logging at verbose level 2.
 func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
-	if Logger.V(2) {
-		pl.Infof(format, args...)
+	if !Logger.V(2) {
+		return
 	}
+	if pl != nil {
+		// Handle nil, so the tests can pass in a nil logger.
+		format = pl.prefix + format
+		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	InfoDepth(1, fmt.Sprintf(format, args...))
 }
 
 // NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(prefix string) *PrefixLogger {
-	return &PrefixLogger{prefix: prefix}
+func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+	return &PrefixLogger{logger: logger, prefix: prefix}
 }
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
index 200b115..740f83c 100644
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -31,26 +31,37 @@
 	mu sync.Mutex
 )
 
+// Int implements rand.Int on the grpcrand global source.
+func Int() int {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Int()
+}
+
 // Int63n implements rand.Int63n on the grpcrand global source.
 func Int63n(n int64) int64 {
 	mu.Lock()
-	res := r.Int63n(n)
-	mu.Unlock()
-	return res
+	defer mu.Unlock()
+	return r.Int63n(n)
 }
 
 // Intn implements rand.Intn on the grpcrand global source.
 func Intn(n int) int {
 	mu.Lock()
-	res := r.Intn(n)
-	mu.Unlock()
-	return res
+	defer mu.Unlock()
+	return r.Intn(n)
 }
 
 // Float64 implements rand.Float64 on the grpcrand global source.
 func Float64() float64 {
 	mu.Lock()
-	res := r.Float64()
-	mu.Unlock()
-	return res
+	defer mu.Unlock()
+	return r.Float64()
+}
+
+// Uint64 implements rand.Uint64 on the grpcrand global source.
+func Uint64() uint64 {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Uint64()
 }
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 0000000..b25b0ba
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strconv"
+	"time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+	if d%r > 0 {
+		return int64(d/r + 1)
+	}
+	return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+	// TODO: This is simplistic and not bandwidth efficient. Improve it.
+	if t <= 0 {
+		return "0n"
+	}
+	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "n"
+	}
+	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "u"
+	}
+	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "m"
+	}
+	if d := div(t, time.Second); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "S"
+	}
+	if d := div(t, time.Minute); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "M"
+	}
+	// Note that maxTimeoutValue * time.Hour > MaxInt64.
+	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
similarity index 72%
rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
index d4346e9..e2f948e 100644
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -1,8 +1,6 @@
-// +build appengine
-
 /*
  *
- * Copyright 2018 gRPC authors.
+ * Copyright 2021 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -18,13 +16,5 @@
  *
  */
 
-package internal
-
-import (
-	"net"
-)
-
-// WrapSyscallConn returns newConn on appengine.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
-	return newConn
-}
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 0000000..6f22bd8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"context"
+
+	"google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+	return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists.  The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+	md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+	return
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 0000000..4e74750
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"errors"
+	"strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+//
+func ParseMethod(methodName string) (service, method string, _ error) {
+	if !strings.HasPrefix(methodName, "/") {
+		return "", "", errors.New("invalid method name: should start with /")
+	}
+	methodName = methodName[1:]
+
+	pos := strings.LastIndex(methodName, "/")
+	if pos < 0 {
+		return "", "", errors.New("invalid method name: suffix /method is missing")
+	}
+	return methodName[:pos], methodName[pos+1:], nil
+}
+
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type.  The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+	if contentType == baseContentType {
+		return "", true
+	}
+	if !strings.HasPrefix(contentType, baseContentType) {
+		return "", false
+	}
+	// guaranteed since != baseContentType and has baseContentType prefix
+	switch contentType[len(baseContentType)] {
+	case '+', ';':
+		// this will return true for "application/grpc+" or "application/grpc;"
+		// which the previous validContentType function tested to be valid, so we
+		// just say that no content-subtype is specified in this case
+		return contentType[len(baseContentType)+1:], true
+	default:
+		return "", false
+	}
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+	if contentSubtype == "" {
+		return baseContentType
+	}
+	return baseContentType + "+" + contentSubtype
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
rename to vendor/google.golang.org/grpc/internal/grpcutil/regex.go
index 8783a8c..7a092b2 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -1,8 +1,6 @@
-// +build go1.13
-
 /*
  *
- * Copyright 2019 gRPC authors.
+ * Copyright 2021 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -18,16 +16,16 @@
  *
  */
 
-package dns
+package grpcutil
 
-import "net"
+import "regexp"
 
-func init() {
-	filterError = func(err error) error {
-		if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
-			// The name does not exist; not an error.
-			return nil
-		}
-		return err
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+	if len(text) == 0 {
+		return re.MatchString(text)
 	}
+	re.Longest()
+	rem := re.FindString(text)
+	return len(rem) == len(text)
 }
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go
deleted file mode 100644
index 80b33cd..0000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcutil provides a bunch of utility functions to be used across the
-// gRPC codebase.
-package grpcutil
-
-import (
-	"strings"
-
-	"google.golang.org/grpc/resolver"
-)
-
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func split2(s, sep string) (string, string, bool) {
-	spl := strings.SplitN(s, sep, 2)
-	if len(spl) < 2 {
-		return "", "", false
-	}
-	return spl[0], spl[1], true
-}
-
-// ParseTarget splits target into a resolver.Target struct containing scheme,
-// authority and endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func ParseTarget(target string) (ret resolver.Target) {
-	var ok bool
-	ret.Scheme, ret.Endpoint, ok = split2(target, "://")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	return ret
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c6fbe8b..1b596bf 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -25,6 +25,7 @@
 	"time"
 
 	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/serviceconfig"
 )
 
 var (
@@ -37,12 +38,32 @@
 	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
 	// default, but tests may wish to set it lower for convenience.
 	KeepaliveMinPingTime = 10 * time.Second
-	// NewRequestInfoContext creates a new context based on the argument context attaching
-	// the passed in RequestInfo to the new context.
-	NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
 	// ParseServiceConfigForTesting is for creating a fake
 	// ClientConn for resolver testing only
 	ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
+	// EqualServiceConfigForTesting is for testing service config generation and
+	// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
+	// This function compares the config without rawJSON stripped, in case the
+	// there's difference in white space.
+	EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+	// GetCertificateProviderBuilder returns the registered builder for the
+	// given name. This is set by package certprovider for use from xDS
+	// bootstrap code while parsing certificate provider configs in the
+	// bootstrap file.
+	GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
+	// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+	// stored in the passed in attributes. This is set by
+	// credentials/xds/xds.go.
+	GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
+	// GetServerCredentials returns the transport credentials configured on a
+	// gRPC server. An xDS-enabled server needs to know what type of credentials
+	// is configured on the underlying gRPC server. This is set by server.go.
+	GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
+	// DrainServerTransports initiates a graceful close of existing connections
+	// on a gRPC server accepted on the provided listener address. An
+	// xDS-enabled server invokes this method on a grpc.Server when a particular
+	// listener moves to "not-serving" mode.
+	DrainServerTransports interface{} // func(*grpc.Server, string)
 )
 
 // HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 0000000..b8733db
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o interface{}) bool {
+	om, ok := o.(mdValue)
+	if !ok {
+		return false
+	}
+	if len(m) != len(om) {
+		return false
+	}
+	for k, v := range m {
+		ov := om[k]
+		if len(ov) != len(v) {
+			return false
+		}
+		for i, ve := range v {
+			if ov[i] != ve {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+	attrs := addr.Attributes
+	if attrs == nil {
+		return nil
+	}
+	md, _ := attrs.Value(mdKey).(mdValue)
+	return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+	return addr
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 0000000..c7a18a9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc/internal/serviceconfig"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+	// Selects the configuration for the RPC, or terminates it using the error.
+	// This error will be converted by the gRPC library to a status error with
+	// code UNKNOWN if it is not returned as a status error.
+	SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+	// Context is the user's context for the RPC and contains headers and
+	// application timeout.  It is passed for interception purposes and for
+	// efficiency reasons.  SelectConfig should not be blocking.
+	Context context.Context
+	Method  string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+	// The context to use for the remainder of the RPC; can pass info to LB
+	// policy or affect timeout or metadata.
+	Context      context.Context
+	MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+	OnCommitted  func()                     // Called when the RPC has been committed (retries no longer possible)
+	Interceptor  ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
+	CloseSend() error
+	// Context returns the context for this stream.
+	//
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m interface{}) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+	// NewStream produces a ClientStream for an RPC which may optionally use
+	// the provided function to produce a stream for delegation.  Note:
+	// RPCInfo.Context should not be used (will be nil).
+	//
+	// done is invoked when the RPC is finished using its connection, or could
+	// not be assigned a connection.  RPC operations may still occur on
+	// ClientStream after done is called, since the interceptor is invoked by
+	// application-layer operations.  done must never be nil when called.
+	NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+	// AllowRPC checks if an incoming RPC is allowed to proceed based on
+	// information about connection RPC was received on, and HTTP Headers. This
+	// information will be piped into context.
+	AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+	state.Attributes = state.Attributes.WithValue(csKey, cs)
+	return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+	cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+	return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+	mu sync.RWMutex
+	cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+	scs.mu.Lock()
+	defer scs.mu.Unlock()
+	scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+	scs.mu.RLock()
+	defer scs.mu.RUnlock()
+	return scs.cs.SelectConfig(r)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index c368db6..75301c5 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -32,7 +32,9 @@
 	"sync"
 	"time"
 
+	grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/resolver"
@@ -43,6 +45,15 @@
 // addresses from SRV records.  Must not be changed after init time.
 var EnableSRVLookups = false
 
+var logger = grpclog.Component("dns")
+
+// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
+// single variable for testing the resolver?
+var (
+	newTimer           = time.NewTimer
+	newTimerDNSResRate = time.NewTimer
+)
+
 func init() {
 	resolver.Register(NewBuilder())
 }
@@ -140,7 +151,6 @@
 
 	d.wg.Add(1)
 	go d.watcher()
-	d.ResolveNow(resolver.ResolveNowOptions{})
 	return d, nil
 }
 
@@ -198,28 +208,38 @@
 
 func (d *dnsResolver) watcher() {
 	defer d.wg.Done()
+	backoffIndex := 1
 	for {
-		select {
-		case <-d.ctx.Done():
-			return
-		case <-d.rn:
-		}
-
 		state, err := d.lookup()
 		if err != nil {
+			// Report error to the underlying grpc.ClientConn.
 			d.cc.ReportError(err)
 		} else {
-			d.cc.UpdateState(*state)
+			err = d.cc.UpdateState(*state)
 		}
 
-		// Sleep to prevent excessive re-resolutions. Incoming resolution requests
-		// will be queued in d.rn.
-		t := time.NewTimer(minDNSResRate)
+		var timer *time.Timer
+		if err == nil {
+			// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
+			// to prevent constantly re-resolving.
+			backoffIndex = 1
+			timer = newTimerDNSResRate(minDNSResRate)
+			select {
+			case <-d.ctx.Done():
+				timer.Stop()
+				return
+			case <-d.rn:
+			}
+		} else {
+			// Poll on an error found in DNS Resolver or an error received from ClientConn.
+			timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
+			backoffIndex++
+		}
 		select {
-		case <-t.C:
 		case <-d.ctx.Done():
-			t.Stop()
+			timer.Stop()
 			return
+		case <-timer.C:
 		}
 	}
 }
@@ -251,27 +271,22 @@
 				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
 			}
 			addr := ip + ":" + strconv.Itoa(int(s.Port))
-			newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+			newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
 		}
 	}
 	return newAddrs, nil
 }
 
-var filterError = func(err error) error {
+func handleDNSError(err error, lookupType string) error {
 	if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
 		// Timeouts and temporary errors should be communicated to gRPC to
 		// attempt another DNS query (with backoff).  Other errors should be
 		// suppressed (they may represent the absence of a TXT record).
 		return nil
 	}
-	return err
-}
-
-func handleDNSError(err error, lookupType string) error {
-	err = filterError(err)
 	if err != nil {
 		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
-		grpclog.Infoln(err)
+		logger.Info(err)
 	}
 	return err
 }
@@ -294,7 +309,7 @@
 
 	// TXT record must have "grpc_config=" attribute in order to be used as service config.
 	if !strings.HasPrefix(res, txtAttribute) {
-		grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+		logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
 		// This is not an error; it is the equivalent of not having a service config.
 		return nil
 	}
@@ -303,12 +318,12 @@
 }
 
 func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
-	var newAddrs []resolver.Address
 	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
 	if err != nil {
 		err = handleDNSError(err, "A")
 		return nil, err
 	}
+	newAddrs := make([]resolver.Address, 0, len(addrs))
 	for _, a := range addrs {
 		ip, ok := formatIP(a)
 		if !ok {
@@ -326,13 +341,15 @@
 	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
 		return nil, hostErr
 	}
-	state := &resolver.State{
-		Addresses: append(addrs, srv...),
+
+	state := resolver.State{Addresses: addrs}
+	if len(srv) > 0 {
+		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
 	}
 	if !d.disableServiceConfig {
 		state.ServiceConfig = d.lookupTXT()
 	}
-	return state, nil
+	return &state, nil
 }
 
 // formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@@ -418,12 +435,12 @@
 	var rcs []rawChoice
 	err := json.Unmarshal([]byte(js), &rcs)
 	if err != nil {
-		grpclog.Warningf("dns: error parsing service config json: %v", err)
+		logger.Warningf("dns: error parsing service config json: %v", err)
 		return ""
 	}
 	cliHostname, err := os.Hostname()
 	if err != nil {
-		grpclog.Warningf("dns: error getting client hostname: %v", err)
+		logger.Warningf("dns: error getting client hostname: %v", err)
 		return ""
 	}
 	var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 0000000..20852e5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+	scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.Authority != "" {
+		return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
+	}
+
+	// gRPC was parsing the dial target manually before PR #4817, and we
+	// switched to using url.Parse() in that PR. To avoid breaking existing
+	// resolver implementations we ended up stripping the leading "/" from the
+	// endpoint. This obviously does not work for the "unix" scheme. Hence we
+	// end up using the parsed URL instead.
+	endpoint := target.URL.Path
+	if endpoint == "" {
+		endpoint = target.URL.Opaque
+	}
+	addr := resolver.Address{Addr: endpoint}
+	if b.scheme == unixAbstractScheme {
+		// prepend "\x00" to address for unix-abstract
+		addr.Addr = "\x00" + addr.Addr
+	}
+	cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+	return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+	return b.scheme
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+	resolver.Register(&builder{scheme: unixScheme})
+	resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000..badbdbf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+	Name   string
+	Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+	if bc.Config == nil {
+		// If config is nil, return empty config `{}`.
+		return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+	}
+	c, err := json.Marshal(bc.Config)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+// - If the config for the first supported policy is invalid, the whole service
+//   config is invalid.
+// - If the list doesn't contain any supported policy, the whole service config
+//   is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+	var ir intermediateBalancerConfig
+	err := json.Unmarshal(b, &ir)
+	if err != nil {
+		return err
+	}
+
+	var names []string
+	for i, lbcfg := range ir {
+		if len(lbcfg) != 1 {
+			return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+		}
+
+		var (
+			name    string
+			jsonCfg json.RawMessage
+		)
+		// Get the key:value pair from the map. We have already made sure that
+		// the map contains a single entry.
+		for name, jsonCfg = range lbcfg {
+		}
+
+		names = append(names, name)
+		builder := balancer.Get(name)
+		if builder == nil {
+			// If the balancer is not registered, move on to the next config.
+			// This is not an error.
+			continue
+		}
+		bc.Name = name
+
+		parser, ok := builder.(balancer.ConfigParser)
+		if !ok {
+			if string(jsonCfg) != "{}" {
+				logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+			}
+			// Stop at this, though the builder doesn't support parsing config.
+			return nil
+		}
+
+		cfg, err := parser.ParseConfig(jsonCfg)
+		if err != nil {
+			return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+		}
+		bc.Config = cfg
+		return nil
+	}
+	// This is reached when the for loop iterates over all entries, but didn't
+	// return. This means we had a loadBalancingConfig slice but did not
+	// encounter a registered policy. The config is considered invalid in this
+	// case.
+	return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+	// WaitForReady indicates whether RPCs sent to this method should wait until
+	// the connection is ready by default (!failfast). The value specified via the
+	// gRPC client API will override the value set here.
+	WaitForReady *bool
+	// Timeout is the default timeout for RPCs sent to this method. The actual
+	// deadline used will be the minimum of the value specified here and the value
+	// set by the application via the gRPC client API.  If either one is not set,
+	// then the other will be used.  If neither is set, then the RPC has no deadline.
+	Timeout *time.Duration
+	// MaxReqSize is the maximum allowed payload size for an individual request in a
+	// stream (client->server) in bytes. The size which is measured is the serialized
+	// payload after per-message compression (but before stream compression) in bytes.
+	// The actual value used is the minimum of the value specified here and the value set
+	// by the application via the gRPC client API. If either one is not set, then the other
+	// will be used.  If neither is set, then the built-in default is used.
+	MaxReqSize *int
+	// MaxRespSize is the maximum allowed payload size for an individual response in a
+	// stream (server->client) in bytes.
+	MaxRespSize *int
+	// RetryPolicy configures retry options for the method.
+	RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+	// MaxAttempts is the maximum number of attempts, including the original RPC.
+	//
+	// This field is required and must be two or greater.
+	MaxAttempts int
+
+	// Exponential backoff parameters. The initial retry attempt will occur at
+	// random(0, initialBackoff). In general, the nth attempt will occur at
+	// random(0,
+	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+	//
+	// These fields are required and must be greater than zero.
+	InitialBackoff    time.Duration
+	MaxBackoff        time.Duration
+	BackoffMultiplier float64
+
+	// The set of status codes which may be retried.
+	//
+	// Status codes are specified as strings, e.g., "UNAVAILABLE".
+	//
+	// This field is required and must be non-empty.
+	// Note: a set is used to store this for easy lookup.
+	RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 6812606..e5c6513 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -97,7 +97,7 @@
 	if s.Code() == codes.OK {
 		return nil
 	}
-	return (*Error)(s.Proto())
+	return &Error{s: s}
 }
 
 // WithDetails returns a new status with the provided details messages appended to the status.
@@ -136,26 +136,31 @@
 	return details
 }
 
-// Error is an alias of a status proto. It implements error and Status,
-// and a nil Error should never be returned by this package.
-type Error spb.Status
+func (s *Status) String() string {
+	return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
 
-func (se *Error) Error() string {
-	p := (*spb.Status)(se)
-	return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+	s *Status
+}
+
+func (e *Error) Error() string {
+	return e.s.String()
 }
 
 // GRPCStatus returns the Status represented by se.
-func (se *Error) GRPCStatus() *Status {
-	return FromProto((*spb.Status)(se))
+func (e *Error) GRPCStatus() *Status {
+	return e.s
 }
 
 // Is implements future error.Is functionality.
 // A Error is equivalent if the code and message are identical.
-func (se *Error) Is(target error) bool {
+func (e *Error) Is(target error) bool {
 	tse, ok := target.(*Error)
 	if !ok {
 		return false
 	}
-	return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
+	return proto.Equal(e.s.s, tse.s.s)
 }
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
index 43281a3..b3a7227 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
 /*
  *
  * Copyright 2018 gRPC authors.
@@ -32,35 +30,35 @@
 	"google.golang.org/grpc/grpclog"
 )
 
+var logger = grpclog.Component("core")
+
 // GetCPUTime returns the how much CPU time has passed since the start of this process.
 func GetCPUTime() int64 {
 	var ts unix.Timespec
 	if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
-		grpclog.Fatal(err)
+		logger.Fatal(err)
 	}
 	return ts.Nano()
 }
 
-// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
-type Rusage syscall.Rusage
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
 
 // GetRusage returns the resource usage of current process.
-func GetRusage() (rusage *Rusage) {
-	rusage = new(Rusage)
-	syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
-	return
+func GetRusage() *Rusage {
+	rusage := new(Rusage)
+	syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+	return rusage
 }
 
 // CPUTimeDiff returns the differences of user CPU time and system CPU time used
 // between two Rusage structs.
 func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
-	f := (*syscall.Rusage)(first)
-	l := (*syscall.Rusage)(latest)
 	var (
-		utimeDiffs  = l.Utime.Sec - f.Utime.Sec
-		utimeDiffus = l.Utime.Usec - f.Utime.Usec
-		stimeDiffs  = l.Stime.Sec - f.Stime.Sec
-		stimeDiffus = l.Stime.Usec - f.Stime.Usec
+		utimeDiffs  = latest.Utime.Sec - first.Utime.Sec
+		utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+		stimeDiffs  = latest.Stime.Sec - first.Stime.Sec
+		stimeDiffus = latest.Stime.Usec - first.Stime.Usec
 	)
 
 	uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index d3fd9da..999f52c 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
 
 /*
  *
@@ -18,6 +19,8 @@
  *
  */
 
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
 package syscall
 
 import (
@@ -29,44 +32,45 @@
 )
 
 var once sync.Once
+var logger = grpclog.Component("core")
 
 func log() {
 	once.Do(func() {
-		grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
+		logger.Info("CPU time info is unavailable on non-linux environments.")
 	})
 }
 
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-// It always returns 0 under non-linux or appengine environment.
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
 func GetCPUTime() int64 {
 	log()
 	return 0
 }
 
-// Rusage is an empty struct under non-linux or appengine environment.
+// Rusage is an empty struct under non-linux environments.
 type Rusage struct{}
 
-// GetRusage is a no-op function under non-linux or appengine environment.
-func GetRusage() (rusage *Rusage) {
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
 	log()
 	return nil
 }
 
 // CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux or appengine environment.
+// between two Rusage structs. It a no-op function for non-linux environments.
 func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
 	log()
 	return 0, 0
 }
 
-// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
+// SetTCPUserTimeout is a no-op function under non-linux environments.
 func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
 	log()
 	return nil
 }
 
-// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
-// a negative return value indicates the operation is not supported
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
 func GetTCPUserTimeout(conn net.Conn) (int, error) {
 	log()
 	return -1, nil
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index ddee20b..8394d25 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -20,13 +20,17 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"runtime"
+	"strconv"
 	"sync"
 	"sync/atomic"
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/status"
 )
 
 var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
@@ -128,6 +132,15 @@
 
 func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
 
+type earlyAbortStream struct {
+	httpStatus     uint32
+	streamID       uint32
+	contentSubtype string
+	status         *status.Status
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
 type dataFrame struct {
 	streamID  uint32
 	endStream bool
@@ -284,7 +297,7 @@
 	// closed and nilled when transportResponseFrames drops below the
 	// threshold.  Both fields are protected by mu.
 	transportResponseFrames int
-	trfChan                 atomic.Value // *chan struct{}
+	trfChan                 atomic.Value // chan struct{}
 }
 
 func newControlBuffer(done <-chan struct{}) *controlBuffer {
@@ -298,10 +311,10 @@
 // throttle blocks if there are too many incomingSettings/cleanupStreams in the
 // controlbuf.
 func (c *controlBuffer) throttle() {
-	ch, _ := c.trfChan.Load().(*chan struct{})
+	ch, _ := c.trfChan.Load().(chan struct{})
 	if ch != nil {
 		select {
-		case <-*ch:
+		case <-ch:
 		case <-c.done:
 		}
 	}
@@ -335,8 +348,7 @@
 		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
 			// We are adding the frame that puts us over the threshold; create
 			// a throttling channel.
-			ch := make(chan struct{})
-			c.trfChan.Store(&ch)
+			c.trfChan.Store(make(chan struct{}))
 		}
 	}
 	c.mu.Unlock()
@@ -377,9 +389,9 @@
 				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
 					// We are removing the frame that put us over the
 					// threshold; close and clear the throttling channel.
-					ch := c.trfChan.Load().(*chan struct{})
-					close(*ch)
-					c.trfChan.Store((*chan struct{})(nil))
+					ch := c.trfChan.Load().(chan struct{})
+					close(ch)
+					c.trfChan.Store((chan struct{})(nil))
 				}
 				c.transportResponseFrames--
 			}
@@ -395,7 +407,6 @@
 		select {
 		case <-c.ch:
 		case <-c.done:
-			c.finish()
 			return nil, ErrConnClosing
 		}
 	}
@@ -420,6 +431,14 @@
 			hdr.onOrphaned(ErrConnClosing)
 		}
 	}
+	// In case throttle() is currently in flight, it needs to be unblocked.
+	// Otherwise, the transport may not close, since the transport is closed by
+	// the reader encountering the connection error.
+	ch, _ := c.trfChan.Load().(chan struct{})
+	if ch != nil {
+		close(ch)
+	}
+	c.trfChan.Store((chan struct{})(nil))
 	c.mu.Unlock()
 }
 
@@ -505,7 +524,9 @@
 			// 1. When the connection is closed by some other known issue.
 			// 2. User closed the connection.
 			// 3. A graceful close of connection.
-			infof("transport: loopyWriter.run returning. %v", err)
+			if logger.V(logLevel) {
+				logger.Infof("transport: loopyWriter.run returning. %v", err)
+			}
 			err = nil
 		}
 	}()
@@ -605,7 +626,9 @@
 	if l.side == serverSide {
 		str, ok := l.estdStreams[h.streamID]
 		if !ok {
-			warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+			if logger.V(logLevel) {
+				logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+			}
 			return nil
 		}
 		// Case 1.A: Server is responding back with headers.
@@ -658,7 +681,9 @@
 	l.hBuf.Reset()
 	for _, f := range hf {
 		if err := l.hEnc.WriteField(f); err != nil {
-			warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
+			if logger.V(logLevel) {
+				logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
+			}
 		}
 	}
 	var (
@@ -743,6 +768,27 @@
 	return nil
 }
 
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+	if l.side == clientSide {
+		return errors.New("earlyAbortStream not handled on client")
+	}
+	// In case the caller forgets to set the http status, default to 200.
+	if eas.httpStatus == 0 {
+		eas.httpStatus = 200
+	}
+	headerFields := []hpack.HeaderField{
+		{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+		{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+		{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+		{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+	}
+
+	if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
 	if l.side == clientSide {
 		l.draining = true
@@ -781,6 +827,8 @@
 		return l.registerStreamHandler(i)
 	case *cleanupStream:
 		return l.cleanupStreamHandler(i)
+	case *earlyAbortStream:
+		return l.earlyAbortStreamHandler(i)
 	case *incomingGoAway:
 		return l.incomingGoAwayHandler(i)
 	case *dataFrame:
@@ -857,38 +905,45 @@
 		return false, nil
 	}
 	var (
-		idx int
 		buf []byte
 	)
-	if len(dataItem.h) != 0 { // data header has not been written out yet.
-		buf = dataItem.h
-	} else {
-		idx = 1
-		buf = dataItem.d
-	}
-	size := http2MaxFrameLen
-	if len(buf) < size {
-		size = len(buf)
-	}
+	// Figure out the maximum size we can send
+	maxSize := http2MaxFrameLen
 	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
 		str.state = waitingOnStreamQuota
 		return false, nil
-	} else if strQuota < size {
-		size = strQuota
+	} else if maxSize > strQuota {
+		maxSize = strQuota
+	}
+	if maxSize > int(l.sendQuota) { // connection-level flow control.
+		maxSize = int(l.sendQuota)
+	}
+	// Compute how much of the header and data we can send within quota and max frame length
+	hSize := min(maxSize, len(dataItem.h))
+	dSize := min(maxSize-hSize, len(dataItem.d))
+	if hSize != 0 {
+		if dSize == 0 {
+			buf = dataItem.h
+		} else {
+			// We can add some data to grpc message header to distribute bytes more equally across frames.
+			// Copy on the stack to avoid generating garbage
+			var localBuf [http2MaxFrameLen]byte
+			copy(localBuf[:hSize], dataItem.h)
+			copy(localBuf[hSize:], dataItem.d[:dSize])
+			buf = localBuf[:hSize+dSize]
+		}
+	} else {
+		buf = dataItem.d
 	}
 
-	if l.sendQuota < uint32(size) { // connection-level flow control.
-		size = int(l.sendQuota)
-	}
+	size := hSize + dSize
+
 	// Now that outgoing flow controls are checked we can replenish str's write quota
 	str.wq.replenish(size)
 	var endStream bool
 	// If this is the last data message on this stream and all of it can be written in this iteration.
-	if dataItem.endStream && size == len(buf) {
-		// buf contains either data or it contains header but data is empty.
-		if idx == 1 || len(dataItem.d) == 0 {
-			endStream = true
-		}
+	if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+		endStream = true
 	}
 	if dataItem.onEachWrite != nil {
 		dataItem.onEachWrite()
@@ -896,14 +951,10 @@
 	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
 		return false, err
 	}
-	buf = buf[size:]
 	str.bytesOutStanding += size
 	l.sendQuota -= uint32(size)
-	if idx == 0 {
-		dataItem.h = buf
-	} else {
-		dataItem.d = buf
-	}
+	dataItem.h = dataItem.h[hSize:]
+	dataItem.d = dataItem.d[dSize:]
 
 	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
 		str.itl.dequeue()
@@ -924,3 +975,10 @@
 	}
 	return false, nil
 }
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index f262edd..97198c5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -136,12 +136,10 @@
 
 // newLimit updates the inflow window to a new value n.
 // It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
+func (f *inFlow) newLimit(n uint32) {
 	f.mu.Lock()
-	d := n - f.limit
 	f.limit = n
 	f.mu.Unlock()
-	return d
 }
 
 func (f *inFlow) maybeAdjust(n uint32) uint32 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index fc44e97..1c3459c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -39,6 +39,7 @@
 	"golang.org/x/net/http2"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/grpcutil"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
@@ -57,7 +58,7 @@
 	}
 	contentType := r.Header.Get("Content-Type")
 	// TODO: do we assume contentType is lowercase? we did before
-	contentSubtype, validContentType := contentSubtype(contentType)
+	contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
 	if !validContentType {
 		return nil, errors.New("invalid gRPC request content-type")
 	}
@@ -140,9 +141,8 @@
 	stats stats.Handler
 }
 
-func (ht *serverHandlerTransport) Close() error {
+func (ht *serverHandlerTransport) Close() {
 	ht.closeOnce.Do(ht.closeCloseChanOnce)
-	return nil
 }
 
 func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 1cc586f..f0c72d3 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -24,6 +24,8 @@
 	"io"
 	"math"
 	"net"
+	"net/http"
+	"path/filepath"
 	"strconv"
 	"strings"
 	"sync"
@@ -32,15 +34,18 @@
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
-
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
+	icredentials "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
 	"google.golang.org/grpc/internal/syscall"
+	"google.golang.org/grpc/internal/transport/networktype"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 )
@@ -57,7 +62,7 @@
 	cancel     context.CancelFunc
 	ctxDone    <-chan struct{} // Cache the ctx.Done() chan.
 	userAgent  string
-	md         interface{}
+	md         metadata.MD
 	conn       net.Conn // underlying communication channel
 	loopy      *loopyWriter
 	remoteAddr net.Addr
@@ -112,6 +117,9 @@
 	// goAwayReason records the http2.ErrCode and debug data received with the
 	// GoAway frame.
 	goAwayReason GoAwayReason
+	// goAwayDebugMessage contains a detailed human readable string about a
+	// GoAway frame, useful for error messages.
+	goAwayDebugMessage string
 	// A condition variable used to signal when the keepalive goroutine should
 	// go dormant. The condition for dormancy is based on the number of active
 	// streams and the `PermitWithoutStream` keepalive client parameter. And
@@ -135,11 +143,34 @@
 	connectionID uint64
 }
 
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+	address := addr.Addr
+	networkType, ok := networktype.Get(addr)
 	if fn != nil {
-		return fn(ctx, addr)
+		// Special handling for unix scheme with custom dialer. Back in the day,
+		// we did not have a unix resolver and therefore targets with a unix
+		// scheme would end up using the passthrough resolver. So, user's used a
+		// custom dialer in this case and expected the original dial target to
+		// be passed to the custom dialer. Now, we have a unix resolver. But if
+		// a custom dialer is specified, we want to retain the old behavior in
+		// terms of the address being passed to the custom dialer.
+		if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+			// Supported unix targets are either "unix://absolute-path" or
+			// "unix:relative-path".
+			if filepath.IsAbs(address) {
+				return fn(ctx, "unix://"+address)
+			}
+			return fn(ctx, "unix:"+address)
+		}
+		return fn(ctx, address)
 	}
-	return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
+	if !ok {
+		networkType, address = parseDialTarget(address)
+	}
+	if networkType == "tcp" && useProxy {
+		return proxyDial(ctx, address, grpcUA)
+	}
+	return (&net.Dialer{}).DialContext(ctx, networkType, address)
 }
 
 func isTemporary(err error) bool {
@@ -161,7 +192,7 @@
 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
 // and starts to receive messages on it. Non-nil error returns if construction
 // fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
 	scheme := "http"
 	ctx, cancel := context.WithCancel(ctx)
 	defer func() {
@@ -170,7 +201,13 @@
 		}
 	}()
 
-	conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
+	// gRPC, resolver, balancer etc. can specify arbitrary data in the
+	// Attributes field of resolver.Address, which is shoved into connectCtx
+	// and passed to the dialer and credential handshaker. This makes it possible for
+	// address specific arbitrary data to reach custom dialers and credential handshakers.
+	connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+	conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
 	if err != nil {
 		if opts.FailOnNonTempDialError {
 			return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
@@ -214,12 +251,34 @@
 		}
 	}
 	if transportCreds != nil {
-		scheme = "https"
-		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+		rawConn := conn
+		// Pull the deadline from the connectCtx, which will be used for
+		// timeouts in the authentication protocol handshake. Can ignore the
+		// boolean as the deadline will return the zero value, which will make
+		// the conn not timeout on I/O operations.
+		deadline, _ := connectCtx.Deadline()
+		rawConn.SetDeadline(deadline)
+		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
+		rawConn.SetDeadline(time.Time{})
 		if err != nil {
 			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
 		}
+		for _, cd := range perRPCCreds {
+			if cd.RequireTransportSecurity() {
+				if ci, ok := authInfo.(interface {
+					GetCommonAuthInfo() credentials.CommonAuthInfo
+				}); ok {
+					secLevel := ci.GetCommonAuthInfo().SecurityLevel
+					if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+						return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+					}
+				}
+			}
+		}
 		isSecure = true
+		if transportCreds.Info().SecurityProtocol == "tls" {
+			scheme = "https"
+		}
 	}
 	dynamicWindow := true
 	icwz := int32(initialWindowSize)
@@ -238,7 +297,6 @@
 		ctxDone:               ctx.Done(), // Cache Done chan.
 		cancel:                cancel,
 		userAgent:             opts.UserAgent,
-		md:                    addr.Metadata,
 		conn:                  conn,
 		remoteAddr:            conn.RemoteAddr(),
 		localAddr:             conn.LocalAddr(),
@@ -266,6 +324,12 @@
 		keepaliveEnabled:      keepaliveEnabled,
 		bufferPool:            newBufferPool(),
 	}
+
+	if md, ok := addr.Metadata.(*metadata.MD); ok {
+		t.md = *md
+	} else if md := imetadata.Get(addr); md != nil {
+		t.md = md
+	}
 	t.controlBuf = newControlBuffer(t.ctxDone)
 	if opts.InitialWindowSize >= defaultWindowSize {
 		t.initialWindowSize = opts.InitialWindowSize
@@ -302,12 +366,14 @@
 	// Send connection preface to server.
 	n, err := t.conn.Write(clientPreface)
 	if err != nil {
-		t.Close()
-		return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+		err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+		t.Close(err)
+		return nil, err
 	}
 	if n != len(clientPreface) {
-		t.Close()
-		return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+		err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+		t.Close(err)
+		return nil, err
 	}
 	var ss []http2.Setting
 
@@ -325,14 +391,16 @@
 	}
 	err = t.framer.fr.WriteSettings(ss...)
 	if err != nil {
-		t.Close()
-		return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+		err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+		t.Close(err)
+		return nil, err
 	}
 	// Adjust the connection flow control window if needed.
 	if delta := uint32(icwz - defaultWindowSize); delta > 0 {
 		if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
-			t.Close()
-			return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+			err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+			t.Close(err)
+			return nil, err
 		}
 	}
 
@@ -345,13 +413,14 @@
 		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
 		err := t.loopy.run()
 		if err != nil {
-			errorf("transport: loopyWriter.run returning. Err: %v", err)
+			if logger.V(logLevel) {
+				logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
+			}
 		}
-		// If it's a connection error, let reader goroutine handle it
-		// since there might be data in the buffers.
-		if _, ok := err.(net.Error); !ok {
-			t.conn.Close()
-		}
+		// Do not close the transport.  Let reader goroutine handle it since
+		// there might be data in the buffers.
+		t.conn.Close()
+		t.controlBuf.finish()
 		close(t.writerDone)
 	}()
 	return t, nil
@@ -367,6 +436,7 @@
 		buf:            newRecvBuffer(),
 		headerChan:     make(chan struct{}),
 		contentSubtype: callHdr.ContentSubtype,
+		doneFunc:       callHdr.DoneFunc,
 	}
 	s.wq = newWriteQuota(defaultWriteQuota, s.done)
 	s.requestRead = func(n int) {
@@ -406,7 +476,7 @@
 		Method:   callHdr.Method,
 		AuthInfo: t.authInfo,
 	}
-	ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+	ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
 	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
 	if err != nil {
 		return nil, err
@@ -425,7 +495,7 @@
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
-	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
 	if callHdr.PreviousAttempts > 0 {
@@ -440,7 +510,7 @@
 		// Send out timeout regardless its value. The server can detect timeout context by itself.
 		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
 		timeout := time.Until(dl)
-		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
 	}
 	for k, v := range authData {
 		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
@@ -469,25 +539,23 @@
 		for _, vv := range added {
 			for i, v := range vv {
 				if i%2 == 0 {
-					k = v
+					k = strings.ToLower(v)
 					continue
 				}
 				// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
 				if isReservedHeader(k) {
 					continue
 				}
-				headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
 			}
 		}
 	}
-	if md, ok := t.md.(*metadata.MD); ok {
-		for k, vv := range *md {
-			if isReservedHeader(k) {
-				continue
-			}
-			for _, v := range vv {
-				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
-			}
+	for k, vv := range t.md {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
 		}
 	}
 	return headerFields, nil
@@ -520,7 +588,7 @@
 				return nil, err
 			}
 
-			return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
+			return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
 		}
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2.
@@ -537,8 +605,11 @@
 	// Note: if these credentials are provided both via dial options and call
 	// options, then both sets of credentials will be applied.
 	if callCreds := callHdr.Creds; callCreds != nil {
-		if !t.isSecure && callCreds.RequireTransportSecurity() {
-			return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+		if callCreds.RequireTransportSecurity() {
+			ri, _ := credentials.RequestInfoFromContext(ctx)
+			if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+				return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+			}
 		}
 		data, err := callCreds.GetRequestMetadata(ctx, audience)
 		if err != nil {
@@ -554,13 +625,35 @@
 	return callAuthData, nil
 }
 
+// NewStreamError wraps an error and reports additional information.  Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire.  However, there are two notable exceptions:
+//
+// 1. If the stream headers violate the max header list size allowed by the
+//    server.  In this case there is no reason to retry at all, as it is
+//    assumed the RPC would continue to fail on subsequent attempts.
+// 2. If the credentials errored when requesting their headers.  In this case,
+//    it's possible a retry can fix the problem, but indefinitely transparently
+//    retrying is not appropriate as it is likely the credentials, if they can
+//    eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+	Err error
+
+	DoNotRetry            bool
+	DoNotTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+	return e.Err.Error()
+}
+
 // NewStream creates a stream and registers it into the transport as "active"
-// streams.
+// streams.  All non-nil errors returned will be *NewStreamError.
 func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
 	ctx = peer.NewContext(ctx, t.getPeer())
 	headerFields, err := t.createHeaderFields(ctx, callHdr)
 	if err != nil {
-		return nil, err
+		return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
 	}
 	s := t.newStream(ctx, callHdr)
 	cleanup := func(err error) {
@@ -660,23 +753,23 @@
 			return true
 		}, hdr)
 		if err != nil {
-			return nil, err
+			return nil, &NewStreamError{Err: err}
 		}
 		if success {
 			break
 		}
 		if hdrListSizeErr != nil {
-			return nil, hdrListSizeErr
+			return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
 		}
 		firstTry = false
 		select {
 		case <-ch:
-		case <-s.ctx.Done():
-			return nil, ContextErr(s.ctx.Err())
+		case <-ctx.Done():
+			return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
 		case <-t.goAway:
-			return nil, errStreamDrain
+			return nil, &NewStreamError{Err: errStreamDrain}
 		case <-t.ctx.Done():
-			return nil, ErrConnClosing
+			return nil, &NewStreamError{Err: ErrConnClosing}
 		}
 	}
 	if t.statsHandler != nil {
@@ -771,6 +864,9 @@
 	t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
 	// This will unblock write.
 	close(s.done)
+	if s.doneFunc != nil {
+		s.doneFunc()
+	}
 }
 
 // Close kicks off the shutdown process of the transport. This should be called
@@ -780,12 +876,12 @@
 // This method blocks until the addrConn that initiated this transport is
 // re-connected. This happens because t.onClose() begins reconnect logic at the
 // addrConn level and blocks until the addrConn is successfully connected.
-func (t *http2Client) Close() error {
+func (t *http2Client) Close(err error) {
 	t.mu.Lock()
 	// Make sure we only Close once.
 	if t.state == closing {
 		t.mu.Unlock()
-		return nil
+		return
 	}
 	// Call t.onClose before setting the state to closing to prevent the client
 	// from attempting to create new streams ASAP.
@@ -801,13 +897,25 @@
 	t.mu.Unlock()
 	t.controlBuf.finish()
 	t.cancel()
-	err := t.conn.Close()
+	t.conn.Close()
 	if channelz.IsOn() {
 		channelz.RemoveEntry(t.channelzID)
 	}
+	// Append info about previous goaways if there were any, since this may be important
+	// for understanding the root cause for this connection to be closed.
+	_, goAwayDebugMessage := t.GetGoAwayReason()
+
+	var st *status.Status
+	if len(goAwayDebugMessage) > 0 {
+		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+		err = st.Err()
+	} else {
+		st = status.New(codes.Unavailable, err.Error())
+	}
+
 	// Notify all active streams.
 	for _, s := range streams {
-		t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
+		t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
 	}
 	if t.statsHandler != nil {
 		connEnd := &stats.ConnEnd{
@@ -815,7 +923,6 @@
 		}
 		t.statsHandler.HandleConn(t.ctx, connEnd)
 	}
-	return err
 }
 
 // GracefulClose sets the state to draining, which prevents new streams from
@@ -834,7 +941,7 @@
 	active := len(t.activeStreams)
 	t.mu.Unlock()
 	if active == 0 {
-		t.Close()
+		t.Close(ErrConnClosing)
 		return
 	}
 	t.controlBuf.put(&incomingGoAway{})
@@ -854,18 +961,10 @@
 	df := &dataFrame{
 		streamID:  s.id,
 		endStream: opts.Last,
+		h:         hdr,
+		d:         data,
 	}
-	if hdr != nil || data != nil { // If it's not an empty data frame.
-		// Add some data to grpc message header so that we can equally
-		// distribute bytes across frames.
-		emptyLen := http2MaxFrameLen - len(hdr)
-		if emptyLen > len(data) {
-			emptyLen = len(data)
-		}
-		hdr = append(hdr, data[:emptyLen]...)
-		data = data[emptyLen:]
-		df.h, df.d = hdr, data
-		// TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
+	if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
 		if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
 			return err
 		}
@@ -983,7 +1082,7 @@
 	}
 	// The server has closed the stream without sending trailers.  Record that
 	// the read direction is closed, and set the status appropriately.
-	if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+	if f.StreamEnded() {
 		t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
 	}
 }
@@ -999,7 +1098,9 @@
 	}
 	statusCode, ok := http2ErrConvTab[f.ErrCode]
 	if !ok {
-		warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+		if logger.V(logLevel) {
+			logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+		}
 		statusCode = codes.Unknown
 	}
 	if statusCode == codes.Canceled {
@@ -1081,12 +1182,14 @@
 		return
 	}
 	if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
-		infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+		if logger.V(logLevel) {
+			logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+		}
 	}
 	id := f.LastStreamID
-	if id > 0 && id%2 != 1 {
+	if id > 0 && id%2 == 0 {
 		t.mu.Unlock()
-		t.Close()
+		t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
 		return
 	}
 	// A client can receive multiple GoAways from the server (see
@@ -1104,7 +1207,7 @@
 		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
 		if id > t.prevGoAwayID {
 			t.mu.Unlock()
-			t.Close()
+			t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
 			return
 		}
 	default:
@@ -1134,7 +1237,7 @@
 	active := len(t.activeStreams)
 	t.mu.Unlock()
 	if active == 0 {
-		t.Close()
+		t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
 	}
 }
 
@@ -1150,12 +1253,17 @@
 			t.goAwayReason = GoAwayTooManyPings
 		}
 	}
+	if len(f.DebugData()) == 0 {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+	} else {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+	}
 }
 
-func (t *http2Client) GetGoAwayReason() GoAwayReason {
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
 	t.mu.Lock()
 	defer t.mu.Unlock()
-	return t.goAwayReason
+	return t.goAwayReason, t.goAwayDebugMessage
 }
 
 func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -1182,35 +1290,128 @@
 		return
 	}
 
-	state := &decodeState{}
-	// Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
-	state.data.isGRPC = !initialHeader
-	if err := state.decodeHeader(frame); err != nil {
-		t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		se := status.New(codes.Internal, "peer header list size exceeded limit")
+		t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
+		return
+	}
+
+	var (
+		// If a gRPC Response-Headers has already been received, then it means
+		// that the peer is speaking gRPC and we are in gRPC mode.
+		isGRPC         = !initialHeader
+		mdata          = make(map[string][]string)
+		contentTypeErr = "malformed header: missing HTTP content-type"
+		grpcMessage    string
+		statusGen      *status.Status
+		recvCompress   string
+		httpStatusCode *int
+		httpStatusErr  string
+		rawStatusCode  = codes.Unknown
+		// headerError is set if an error is encountered while parsing the headers
+		headerError string
+	)
+
+	if initialHeader {
+		httpStatusErr = "malformed header: missing HTTP status"
+	}
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+				contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+				break
+			}
+			contentTypeErr = ""
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			isGRPC = true
+		case "grpc-encoding":
+			recvCompress = hf.Value
+		case "grpc-status":
+			code, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			rawStatusCode = codes.Code(uint32(code))
+		case "grpc-message":
+			grpcMessage = decodeGrpcMessage(hf.Value)
+		case "grpc-status-details-bin":
+			var err error
+			statusGen, err = decodeGRPCStatusDetails(hf.Value)
+			if err != nil {
+				headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
+			}
+		case ":status":
+			if hf.Value == "200" {
+				httpStatusErr = ""
+				statusCode := 200
+				httpStatusCode = &statusCode
+				break
+			}
+
+			c, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			statusCode := int(c)
+			httpStatusCode = &statusCode
+
+			httpStatusErr = fmt.Sprintf(
+				"unexpected HTTP status code received from server: %d (%s)",
+				statusCode,
+				http.StatusText(statusCode),
+			)
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	if !isGRPC || httpStatusErr != "" {
+		var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+		if httpStatusCode != nil {
+			var ok bool
+			code, ok = HTTPStatusConvTab[*httpStatusCode]
+			if !ok {
+				code = codes.Unknown
+			}
+		}
+		var errs []string
+		if httpStatusErr != "" {
+			errs = append(errs, httpStatusErr)
+		}
+		if contentTypeErr != "" {
+			errs = append(errs, contentTypeErr)
+		}
+		// Verify the HTTP response is a 200.
+		se := status.New(code, strings.Join(errs, "; "))
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
+
+	if headerError != "" {
+		se := status.New(codes.Internal, headerError)
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
 		return
 	}
 
 	isHeader := false
-	defer func() {
-		if t.statsHandler != nil {
-			if isHeader {
-				inHeader := &stats.InHeader{
-					Client:      true,
-					WireLength:  int(frame.Header().Length),
-					Header:      s.header.Copy(),
-					Compression: s.recvCompress,
-				}
-				t.statsHandler.HandleRPC(s.ctx, inHeader)
-			} else {
-				inTrailer := &stats.InTrailer{
-					Client:     true,
-					WireLength: int(frame.Header().Length),
-					Trailer:    s.trailer.Copy(),
-				}
-				t.statsHandler.HandleRPC(s.ctx, inTrailer)
-			}
-		}
-	}()
 
 	// If headerChan hasn't been closed yet
 	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
@@ -1221,9 +1422,9 @@
 			// These values can be set without any synchronization because
 			// stream goroutine will read it only after seeing a closed
 			// headerChan which we'll close after setting this.
-			s.recvCompress = state.data.encoding
-			if len(state.data.mdata) > 0 {
-				s.header = state.data.mdata
+			s.recvCompress = recvCompress
+			if len(mdata) > 0 {
+				s.header = mdata
 			}
 		} else {
 			// HEADERS frame block carries a Trailers-Only.
@@ -1232,13 +1433,36 @@
 		close(s.headerChan)
 	}
 
+	if t.statsHandler != nil {
+		if isHeader {
+			inHeader := &stats.InHeader{
+				Client:      true,
+				WireLength:  int(frame.Header().Length),
+				Header:      metadata.MD(mdata).Copy(),
+				Compression: s.recvCompress,
+			}
+			t.statsHandler.HandleRPC(s.ctx, inHeader)
+		} else {
+			inTrailer := &stats.InTrailer{
+				Client:     true,
+				WireLength: int(frame.Header().Length),
+				Trailer:    metadata.MD(mdata).Copy(),
+			}
+			t.statsHandler.HandleRPC(s.ctx, inTrailer)
+		}
+	}
+
 	if !endStream {
 		return
 	}
 
+	if statusGen == nil {
+		statusGen = status.New(rawStatusCode, grpcMessage)
+	}
+
 	// if client received END_STREAM from server while stream was still active, send RST_STREAM
 	rst := s.getState() == streamActive
-	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
+	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
 }
 
 // reader runs as a separate goroutine in charge of reading data from network
@@ -1252,7 +1476,8 @@
 	// Check the validity of server preface.
 	frame, err := t.framer.fr.ReadFrame()
 	if err != nil {
-		t.Close() // this kicks off resetTransport, so must be last before return
+		err = connectionErrorf(true, err, "error reading server preface: %v", err)
+		t.Close(err) // this kicks off resetTransport, so must be last before return
 		return
 	}
 	t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
@@ -1261,7 +1486,8 @@
 	}
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
-		t.Close() // this kicks off resetTransport, so must be last before return
+		// this kicks off resetTransport, so must be last before return
+		t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
 		return
 	}
 	t.onPrefaceReceipt()
@@ -1285,13 +1511,19 @@
 				if s != nil {
 					// use error detail to provide better err message
 					code := http2ErrConvTab[se.Code]
-					msg := t.framer.fr.ErrorDetail().Error()
+					errorDetail := t.framer.fr.ErrorDetail()
+					var msg string
+					if errorDetail != nil {
+						msg = errorDetail.Error()
+					} else {
+						msg = "received invalid frame"
+					}
 					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
 				}
 				continue
 			} else {
 				// Transport error.
-				t.Close()
+				t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
 				return
 			}
 		}
@@ -1311,7 +1543,9 @@
 		case *http2.WindowUpdateFrame:
 			t.handleWindowUpdate(frame)
 		default:
-			errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+			}
 		}
 	}
 }
@@ -1323,7 +1557,7 @@
 	return b
 }
 
-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
 func (t *http2Client) keepalive() {
 	p := &ping{data: [8]byte{}}
 	// True iff a ping has been sent, and no data has been received since then.
@@ -1348,7 +1582,7 @@
 				continue
 			}
 			if outstandingPing && timeoutLeft <= 0 {
-				t.Close()
+				t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
 				return
 			}
 			t.mu.Lock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index fa33ffb..2c6eaf0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -26,6 +26,7 @@
 	"io"
 	"math"
 	"net"
+	"net/http"
 	"strconv"
 	"sync"
 	"sync/atomic"
@@ -34,10 +35,10 @@
 	"github.com/golang/protobuf/proto"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpcutil"
 
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/keepalive"
@@ -72,7 +73,6 @@
 	writerDone  chan struct{} // sync point to enable testing.
 	remoteAddr  net.Addr
 	localAddr   net.Addr
-	maxStreamID uint32               // max stream ID ever seen
 	authInfo    credentials.AuthInfo // auth info about the connection
 	inTapHandle tap.ServerInHandle
 	framer      *framer
@@ -101,11 +101,11 @@
 
 	mu sync.Mutex // guard the following
 
-	// drainChan is initialized when drain(...) is called the first time.
+	// drainChan is initialized when Drain() is called the first time.
 	// After which the server writes out the first GoAway(with ID 2^31-1) frame.
 	// Then an independent goroutine will be launched to later send the second GoAway.
 	// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
-	// Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
+	// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
 	// already underway.
 	drainChan     chan struct{}
 	state         transportState
@@ -122,11 +122,37 @@
 	bufferPool *bufferPool
 
 	connectionID uint64
+
+	// maxStreamMu guards the maximum stream ID
+	// This lock may not be taken if mu is already held.
+	maxStreamMu sync.Mutex
+	maxStreamID uint32 // max stream ID ever seen
 }
 
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+	var authInfo credentials.AuthInfo
+	rawConn := conn
+	if config.Credentials != nil {
+		var err error
+		conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+		if err != nil {
+			// ErrConnDispatched means that the connection was dispatched away
+			// from gRPC; those connections should be left open. io.EOF means
+			// the connection was closed before handshaking completed, which can
+			// happen naturally from probers. Return these errors directly.
+			if err == credentials.ErrConnDispatched || err == io.EOF {
+				return nil, err
+			}
+			return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+		}
+	}
 	writeBufSize := config.WriteBufferSize
 	readBufSize := config.ReadBufferSize
 	maxHeaderListSize := defaultServerMaxHeaderListSize
@@ -209,14 +235,15 @@
 	if kep.MinTime == 0 {
 		kep.MinTime = defaultKeepalivePolicyMinTime
 	}
+
 	done := make(chan struct{})
 	t := &http2Server{
-		ctx:               context.Background(),
+		ctx:               setConnection(context.Background(), rawConn),
 		done:              done,
 		conn:              conn,
 		remoteAddr:        conn.RemoteAddr(),
 		localAddr:         conn.LocalAddr(),
-		authInfo:          config.AuthInfo,
+		authInfo:          authInfo,
 		framer:            framer,
 		readerDone:        make(chan struct{}),
 		writerDone:        make(chan struct{}),
@@ -265,6 +292,14 @@
 	// Check the validity of client preface.
 	preface := make([]byte, len(clientPreface))
 	if _, err := io.ReadFull(t.conn, preface); err != nil {
+		// In deployments where a gRPC server runs behind a cloud load balancer
+		// which performs regular TCP level health checks, the connection is
+		// closed immediately by the latter.  Returning io.EOF here allows the
+		// grpc server implementation to recognize this scenario and suppress
+		// logging to reduce spam.
+		if err == io.EOF {
+			return nil, io.EOF
+		}
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
 	}
 	if !bytes.Equal(preface, clientPreface) {
@@ -289,9 +324,12 @@
 		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
 		t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
 		if err := t.loopy.run(); err != nil {
-			errorf("transport: loopyWriter.run returning. Err: %v", err)
+			if logger.V(logLevel) {
+				logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
+			}
 		}
 		t.conn.Close()
+		t.controlBuf.finish()
 		close(t.writerDone)
 	}()
 	go t.keepalive()
@@ -300,38 +338,144 @@
 
 // operateHeader takes action on the decoded headers.
 func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
+	// Acquire max stream ID lock for entire duration
+	t.maxStreamMu.Lock()
+	defer t.maxStreamMu.Unlock()
+
 	streamID := frame.Header().StreamID
-	state := &decodeState{
-		serverSide: true,
-	}
-	if err := state.decodeHeader(frame); err != nil {
-		if se, ok := status.FromError(err); ok {
-			t.controlBuf.put(&cleanupStream{
-				streamID: streamID,
-				rst:      true,
-				rstCode:  statusCodeConvTab[se.Code()],
-				onWrite:  func() {},
-			})
-		}
+
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeFrameSize,
+			onWrite:  func() {},
+		})
 		return false
 	}
 
+	if streamID%2 != 1 || streamID <= t.maxStreamID {
+		// illegal gRPC stream id.
+		if logger.V(logLevel) {
+			logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+		}
+		return true
+	}
+	t.maxStreamID = streamID
+
 	buf := newRecvBuffer()
 	s := &Stream{
-		id:             streamID,
-		st:             t,
-		buf:            buf,
-		fc:             &inFlow{limit: uint32(t.initialWindowSize)},
-		recvCompress:   state.data.encoding,
-		method:         state.data.method,
-		contentSubtype: state.data.contentSubtype,
+		id:  streamID,
+		st:  t,
+		buf: buf,
+		fc:  &inFlow{limit: uint32(t.initialWindowSize)},
 	}
+	var (
+		// If a gRPC Response-Headers has already been received, then it means
+		// that the peer is speaking gRPC and we are in gRPC mode.
+		isGRPC     = false
+		mdata      = make(map[string][]string)
+		httpMethod string
+		// headerError is set if an error is encountered while parsing the headers
+		headerError bool
+
+		timeoutSet bool
+		timeout    time.Duration
+	)
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+			if !validContentType {
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			s.contentSubtype = contentSubtype
+			isGRPC = true
+		case "grpc-encoding":
+			s.recvCompress = hf.Value
+		case ":method":
+			httpMethod = hf.Value
+		case ":path":
+			s.method = hf.Value
+		case "grpc-timeout":
+			timeoutSet = true
+			var err error
+			if timeout, err = decodeTimeout(hf.Value); err != nil {
+				headerError = true
+			}
+		// "Transports must consider requests containing the Connection header
+		// as malformed." - A41
+		case "connection":
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
+			}
+			headerError = true
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = true
+				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	// "If multiple Host headers or multiple :authority headers are present, the
+	// request must be rejected with an HTTP status code 400 as required by Host
+	// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+	// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+	// error, this takes precedence over a client not speaking gRPC.
+	if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+		errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+		if logger.V(logLevel) {
+			logger.Errorf("transport: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     400,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+		})
+		return false
+	}
+
+	if !isGRPC || headerError {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeProtocol,
+			onWrite:  func() {},
+		})
+		return false
+	}
+
+	// "If :authority is missing, Host must be renamed to :authority." - A41
+	if len(mdata[":authority"]) == 0 {
+		// No-op if host isn't present, no eventual :authority header is a valid
+		// RPC.
+		if host, ok := mdata["host"]; ok {
+			mdata[":authority"] = host
+			delete(mdata, "host")
+		}
+	} else {
+		// "If :authority is present, Host must be discarded" - A41
+		delete(mdata, "host")
+	}
+
 	if frame.StreamEnded() {
 		// s is just created by the caller. No lock needed.
 		s.state = streamReadDone
 	}
-	if state.data.timeoutSet {
-		s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
+	if timeoutSet {
+		s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
 	} else {
 		s.ctx, s.cancel = context.WithCancel(t.ctx)
 	}
@@ -344,31 +488,13 @@
 	}
 	s.ctx = peer.NewContext(s.ctx, pr)
 	// Attach the received metadata to the context.
-	if len(state.data.mdata) > 0 {
-		s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
-	}
-	if state.data.statsTags != nil {
-		s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
-	}
-	if state.data.statsTrace != nil {
-		s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
-	}
-	if t.inTapHandle != nil {
-		var err error
-		info := &tap.Info{
-			FullMethodName: state.data.method,
+	if len(mdata) > 0 {
+		s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
+		if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
+			s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
 		}
-		s.ctx, err = t.inTapHandle(s.ctx, info)
-		if err != nil {
-			warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
-			t.controlBuf.put(&cleanupStream{
-				streamID: s.id,
-				rst:      true,
-				rstCode:  http2.ErrCodeRefusedStream,
-				onWrite:  func() {},
-			})
-			s.cancel()
-			return false
+		if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
+			s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
 		}
 	}
 	t.mu.Lock()
@@ -388,14 +514,40 @@
 		s.cancel()
 		return false
 	}
-	if streamID%2 != 1 || streamID <= t.maxStreamID {
+	if httpMethod != http.MethodPost {
 		t.mu.Unlock()
-		// illegal gRPC stream id.
-		errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+		if logger.V(logLevel) {
+			logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
+		}
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeProtocol,
+			onWrite:  func() {},
+		})
 		s.cancel()
-		return true
+		return false
 	}
-	t.maxStreamID = streamID
+	if t.inTapHandle != nil {
+		var err error
+		if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+			t.mu.Unlock()
+			if logger.V(logLevel) {
+				logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
+			}
+			stat, ok := status.FromError(err)
+			if !ok {
+				stat = status.New(codes.PermissionDenied, err.Error())
+			}
+			t.controlBuf.put(&earlyAbortStream{
+				httpStatus:     200,
+				streamID:       s.id,
+				contentSubtype: s.contentSubtype,
+				status:         stat,
+			})
+			return false
+		}
+	}
 	t.activeStreams[streamID] = s
 	if len(t.activeStreams) == 1 {
 		t.idle = time.Time{}
@@ -417,7 +569,7 @@
 			LocalAddr:   t.localAddr,
 			Compression: s.recvCompress,
 			WireLength:  int(frame.Header().Length),
-			Header:      metadata.MD(state.data.mdata).Copy(),
+			Header:      metadata.MD(mdata).Copy(),
 		}
 		t.stats.HandleRPC(s.ctx, inHeader)
 	}
@@ -454,7 +606,9 @@
 		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 		if err != nil {
 			if se, ok := err.(http2.StreamError); ok {
-				warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+				if logger.V(logLevel) {
+					logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+				}
 				t.mu.Lock()
 				s := t.activeStreams[se.StreamID]
 				t.mu.Unlock()
@@ -474,7 +628,9 @@
 				t.Close()
 				return
 			}
-			warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+			if logger.V(logLevel) {
+				logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+			}
 			t.Close()
 			return
 		}
@@ -497,7 +653,9 @@
 		case *http2.GoAwayFrame:
 			// TODO: Handle GoAway from the client appropriately.
 		default:
-			errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+			}
 		}
 	}
 }
@@ -599,6 +757,10 @@
 	if !ok {
 		return
 	}
+	if s.getState() == streamReadDone {
+		t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+		return
+	}
 	if size > 0 {
 		if err := s.fc.onData(size); err != nil {
 			t.closeStream(s, true, http2.ErrCodeFlowControl, false)
@@ -619,7 +781,7 @@
 			s.write(recvMsg{buffer: buffer})
 		}
 	}
-	if f.Header().Flags.Has(http2.FlagDataEndStream) {
+	if f.StreamEnded() {
 		// Received the end of stream from the client.
 		s.compareAndSwapState(streamActive, streamReadDone)
 		s.write(recvMsg{err: io.EOF})
@@ -719,7 +881,9 @@
 
 	if t.pingStrikes > maxPingStrikes {
 		// Send goaway and close the connection.
-		errorf("transport: Got too many pings from the client, closing the connection.")
+		if logger.V(logLevel) {
+			logger.Errorf("transport: Got too many pings from the client, closing the connection.")
+		}
 		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
 	}
 }
@@ -752,7 +916,9 @@
 	var sz int64
 	for _, f := range hdrFrame.hf {
 		if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
-			errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			if logger.V(logLevel) {
+				logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			}
 			return false
 		}
 	}
@@ -789,7 +955,7 @@
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
-	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
 	if s.sendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
 	}
@@ -839,7 +1005,7 @@
 			}
 		} else { // Send a trailer only response.
 			headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
-			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
 		}
 	}
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
@@ -849,7 +1015,7 @@
 		stBytes, err := proto.Marshal(p)
 		if err != nil {
 			// TODO: return error instead, when callers are able to handle it.
-			grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+			logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
 		} else {
 			headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
 		}
@@ -909,13 +1075,6 @@
 			return ContextErr(s.ctx.Err())
 		}
 	}
-	// Add some data to header frame so that we can equally distribute bytes across frames.
-	emptyLen := http2MaxFrameLen - len(hdr)
-	if emptyLen > len(data) {
-		emptyLen = len(data)
-	}
-	hdr = append(hdr, data[:emptyLen]...)
-	data = data[emptyLen:]
 	df := &dataFrame{
 		streamID:    s.id,
 		h:           hdr,
@@ -977,17 +1136,19 @@
 			if val <= 0 {
 				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
 				// Gracefully close the connection.
-				t.drain(http2.ErrCodeNo, []byte{})
+				t.Drain()
 				return
 			}
 			idleTimer.Reset(val)
 		case <-ageTimer.C:
-			t.drain(http2.ErrCodeNo, []byte{})
+			t.Drain()
 			ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
 			select {
 			case <-ageTimer.C:
 				// Close the connection after grace period.
-				infof("transport: closing server transport due to maximum connection age.")
+				if logger.V(logLevel) {
+					logger.Infof("transport: closing server transport due to maximum connection age.")
+				}
 				t.Close()
 			case <-t.done:
 			}
@@ -1004,7 +1165,9 @@
 				continue
 			}
 			if outstandingPing && kpTimeoutLeft <= 0 {
-				infof("transport: closing server transport due to idleness.")
+				if logger.V(logLevel) {
+					logger.Infof("transport: closing server transport due to idleness.")
+				}
 				t.Close()
 				return
 			}
@@ -1032,11 +1195,11 @@
 // Close starts shutting down the http2Server transport.
 // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
 // could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() error {
+func (t *http2Server) Close() {
 	t.mu.Lock()
 	if t.state == closing {
 		t.mu.Unlock()
-		return errors.New("transport: Close() was already called")
+		return
 	}
 	t.state = closing
 	streams := t.activeStreams
@@ -1044,7 +1207,9 @@
 	t.mu.Unlock()
 	t.controlBuf.finish()
 	close(t.done)
-	err := t.conn.Close()
+	if err := t.conn.Close(); err != nil && logger.V(logLevel) {
+		logger.Infof("transport: error closing conn during Close: %v", err)
+	}
 	if channelz.IsOn() {
 		channelz.RemoveEntry(t.channelzID)
 	}
@@ -1056,7 +1221,6 @@
 		connEnd := &stats.ConnEnd{}
 		t.stats.HandleConn(t.ctx, connEnd)
 	}
-	return err
 }
 
 // deleteStream deletes the stream s from transport's active streams.
@@ -1121,17 +1285,13 @@
 }
 
 func (t *http2Server) Drain() {
-	t.drain(http2.ErrCodeNo, []byte{})
-}
-
-func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
 	t.mu.Lock()
 	defer t.mu.Unlock()
 	if t.drainChan != nil {
 		return
 	}
 	t.drainChan = make(chan struct{})
-	t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
+	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
 }
 
 var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@@ -1139,20 +1299,23 @@
 // Handles outgoing GoAway and returns true if loopy needs to put itself
 // in draining mode.
 func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+	t.maxStreamMu.Lock()
 	t.mu.Lock()
 	if t.state == closing { // TODO(mmukhi): This seems unnecessary.
 		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
 		// The transport is closing.
 		return false, ErrConnClosing
 	}
-	sid := t.maxStreamID
 	if !g.headsUp {
 		// Stop accepting more streams now.
 		t.state = draining
+		sid := t.maxStreamID
 		if len(t.activeStreams) == 0 {
 			g.closeConn = true
 		}
 		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
 		if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
 			return false, err
 		}
@@ -1165,6 +1328,7 @@
 		return true, nil
 	}
 	t.mu.Unlock()
+	t.maxStreamMu.Unlock()
 	// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
 	// Follow that with a ping and wait for the ack to come back or a timer
 	// to expire. During this time accept new streams since they might have
@@ -1249,3 +1413,18 @@
 	j := grpcrand.Int63n(2*r) - r
 	return time.Duration(j)
 }
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+	conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+	return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func setConnection(ctx context.Context, conn net.Conn) context.Context {
+	return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 8f5f334..d8247bc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -27,6 +27,7 @@
 	"math"
 	"net"
 	"net/http"
+	"net/url"
 	"strconv"
 	"strings"
 	"time"
@@ -37,6 +38,7 @@
 	"golang.org/x/net/http2/hpack"
 	spb "google.golang.org/genproto/googleapis/rpc/status"
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/status"
 )
 
@@ -50,7 +52,7 @@
 	// "proto" as a suffix after "+" or ";".  See
 	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
 	// for more details.
-	baseContentType = "application/grpc"
+
 )
 
 var (
@@ -71,13 +73,6 @@
 		http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
 		http2.ErrCodeHTTP11Required:     codes.Internal,
 	}
-	statusCodeConvTab = map[codes.Code]http2.ErrCode{
-		codes.Internal:          http2.ErrCodeInternal,
-		codes.Canceled:          http2.ErrCodeCancel,
-		codes.Unavailable:       http2.ErrCodeRefusedStream,
-		codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
-		codes.PermissionDenied:  http2.ErrCodeInadequateSecurity,
-	}
 	// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
 	HTTPStatusConvTab = map[int]codes.Code{
 		// 400 Bad Request - INTERNAL.
@@ -97,54 +92,9 @@
 		// 504 Gateway timeout - UNAVAILABLE.
 		http.StatusGatewayTimeout: codes.Unavailable,
 	}
+	logger = grpclog.Component("transport")
 )
 
-type parsedHeaderData struct {
-	encoding string
-	// statusGen caches the stream status received from the trailer the server
-	// sent.  Client side only.  Do not access directly.  After all trailers are
-	// parsed, use the status method to retrieve the status.
-	statusGen *status.Status
-	// rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
-	// intended for direct access outside of parsing.
-	rawStatusCode *int
-	rawStatusMsg  string
-	httpStatus    *int
-	// Server side only fields.
-	timeoutSet bool
-	timeout    time.Duration
-	method     string
-	// key-value metadata map from the peer.
-	mdata          map[string][]string
-	statsTags      []byte
-	statsTrace     []byte
-	contentSubtype string
-
-	// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
-	//
-	// We are in gRPC mode (peer speaking gRPC) if:
-	// 	* We are client side and have already received a HEADER frame that indicates gRPC peer.
-	//  * The header contains valid  a content-type, i.e. a string starts with "application/grpc"
-	// And we should handle error specific to gRPC.
-	//
-	// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
-	// are in HTTP fallback mode, and should handle error specific to HTTP.
-	isGRPC         bool
-	grpcErr        error
-	httpErr        error
-	contentTypeErr string
-}
-
-// decodeState configures decoding criteria and records the decoded data.
-type decodeState struct {
-	// whether decoding on server side or not
-	serverSide bool
-
-	// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
-	// frame once decodeHeader function has been invoked and returned.
-	data parsedHeaderData
-}
-
 // isReservedHeader checks whether hdr belongs to HTTP2 headers
 // reserved by gRPC protocol. Any other headers are classified as the
 // user-specified metadata.
@@ -182,54 +132,6 @@
 	}
 }
 
-// contentSubtype returns the content-subtype for the given content-type.  The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
-	if contentType == baseContentType {
-		return "", true
-	}
-	if !strings.HasPrefix(contentType, baseContentType) {
-		return "", false
-	}
-	// guaranteed since != baseContentType and has baseContentType prefix
-	switch contentType[len(baseContentType)] {
-	case '+', ';':
-		// this will return true for "application/grpc+" or "application/grpc;"
-		// which the previous validContentType function tested to be valid, so we
-		// just say that no content-subtype is specified in this case
-		return contentType[len(baseContentType)+1:], true
-	default:
-		return "", false
-	}
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
-	if contentSubtype == "" {
-		return baseContentType
-	}
-	return baseContentType + "+" + contentSubtype
-}
-
-func (d *decodeState) status() *status.Status {
-	if d.data.statusGen == nil {
-		// No status-details were provided; generate status using code/msg.
-		d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
-	}
-	return d.data.statusGen
-}
-
 const binHdrSuffix = "-bin"
 
 func encodeBinHeader(v []byte) string {
@@ -259,164 +161,16 @@
 	return v, nil
 }
 
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
-	// frame.Truncated is set to true when framer detects that the current header
-	// list size hits MaxHeaderListSize limit.
-	if frame.Truncated {
-		return status.Error(codes.Internal, "peer header list size exceeded limit")
+func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
+	v, err := decodeBinHeader(rawDetails)
+	if err != nil {
+		return nil, err
 	}
-
-	for _, hf := range frame.Fields {
-		d.processHeaderField(hf)
+	st := &spb.Status{}
+	if err = proto.Unmarshal(v, st); err != nil {
+		return nil, err
 	}
-
-	if d.data.isGRPC {
-		if d.data.grpcErr != nil {
-			return d.data.grpcErr
-		}
-		if d.serverSide {
-			return nil
-		}
-		if d.data.rawStatusCode == nil && d.data.statusGen == nil {
-			// gRPC status doesn't exist.
-			// Set rawStatusCode to be unknown and return nil error.
-			// So that, if the stream has ended this Unknown status
-			// will be propagated to the user.
-			// Otherwise, it will be ignored. In which case, status from
-			// a later trailer, that has StreamEnded flag set, is propagated.
-			code := int(codes.Unknown)
-			d.data.rawStatusCode = &code
-		}
-		return nil
-	}
-
-	// HTTP fallback mode
-	if d.data.httpErr != nil {
-		return d.data.httpErr
-	}
-
-	var (
-		code = codes.Internal // when header does not include HTTP status, return INTERNAL
-		ok   bool
-	)
-
-	if d.data.httpStatus != nil {
-		code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
-		if !ok {
-			code = codes.Unknown
-		}
-	}
-
-	return status.Error(code, d.constructHTTPErrMsg())
-}
-
-// constructErrMsg constructs error message to be returned in HTTP fallback mode.
-// Format: HTTP status code and its corresponding message + content-type error message.
-func (d *decodeState) constructHTTPErrMsg() string {
-	var errMsgs []string
-
-	if d.data.httpStatus == nil {
-		errMsgs = append(errMsgs, "malformed header: missing HTTP status")
-	} else {
-		errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
-	}
-
-	if d.data.contentTypeErr == "" {
-		errMsgs = append(errMsgs, "transport: missing content-type field")
-	} else {
-		errMsgs = append(errMsgs, d.data.contentTypeErr)
-	}
-
-	return strings.Join(errMsgs, "; ")
-}
-
-func (d *decodeState) addMetadata(k, v string) {
-	if d.data.mdata == nil {
-		d.data.mdata = make(map[string][]string)
-	}
-	d.data.mdata[k] = append(d.data.mdata[k], v)
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
-	switch f.Name {
-	case "content-type":
-		contentSubtype, validContentType := contentSubtype(f.Value)
-		if !validContentType {
-			d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
-			return
-		}
-		d.data.contentSubtype = contentSubtype
-		// TODO: do we want to propagate the whole content-type in the metadata,
-		// or come up with a way to just propagate the content-subtype if it was set?
-		// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
-		// in the metadata?
-		d.addMetadata(f.Name, f.Value)
-		d.data.isGRPC = true
-	case "grpc-encoding":
-		d.data.encoding = f.Value
-	case "grpc-status":
-		code, err := strconv.Atoi(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
-			return
-		}
-		d.data.rawStatusCode = &code
-	case "grpc-message":
-		d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
-	case "grpc-status-details-bin":
-		v, err := decodeBinHeader(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
-			return
-		}
-		s := &spb.Status{}
-		if err := proto.Unmarshal(v, s); err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
-			return
-		}
-		d.data.statusGen = status.FromProto(s)
-	case "grpc-timeout":
-		d.data.timeoutSet = true
-		var err error
-		if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
-		}
-	case ":path":
-		d.data.method = f.Value
-	case ":status":
-		code, err := strconv.Atoi(f.Value)
-		if err != nil {
-			d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
-			return
-		}
-		d.data.httpStatus = &code
-	case "grpc-tags-bin":
-		v, err := decodeBinHeader(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
-			return
-		}
-		d.data.statsTags = v
-		d.addMetadata(f.Name, string(v))
-	case "grpc-trace-bin":
-		v, err := decodeBinHeader(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
-			return
-		}
-		d.data.statsTrace = v
-		d.addMetadata(f.Name, string(v))
-	default:
-		if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
-			break
-		}
-		v, err := decodeMetadataHeader(f.Name, f.Value)
-		if err != nil {
-			errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
-			return
-		}
-		d.addMetadata(f.Name, v)
-	}
+	return status.FromProto(st), nil
 }
 
 type timeoutUnit uint8
@@ -449,41 +203,6 @@
 	return
 }
 
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
-	if m := d % r; m > 0 {
-		return int64(d/r + 1)
-	}
-	return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func encodeTimeout(t time.Duration) string {
-	if t <= 0 {
-		return "0n"
-	}
-	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "n"
-	}
-	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "u"
-	}
-	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "m"
-	}
-	if d := div(t, time.Second); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "S"
-	}
-	if d := div(t, time.Minute); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "M"
-	}
-	// Note that maxTimeoutValue * time.Hour > MaxInt64.
-	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
 func decodeTimeout(s string) (time.Duration, error) {
 	size := len(s)
 	if size < 2 {
@@ -675,3 +394,31 @@
 	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
 	return f
 }
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+	net := "tcp"
+	m1 := strings.Index(target, ":")
+	m2 := strings.Index(target, ":/")
+	// handle unix:addr which will fail with url.Parse
+	if m1 >= 0 && m2 < 0 {
+		if n := target[0:m1]; n == "unix" {
+			return n, target[m1+1:]
+		}
+	}
+	if m2 >= 0 {
+		t, err := url.Parse(target)
+		if err != nil {
+			return net, target
+		}
+		scheme := t.Scheme
+		addr := t.Path
+		if scheme == "unix" {
+			if addr == "" {
+				addr = t.Host
+			}
+			return scheme, addr
+		}
+	}
+	return net, target
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
deleted file mode 100644
index 879df80..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains wrappers for grpclog functions.
-// The transport package only logs to verbose level 2 by default.
-
-package transport
-
-import "google.golang.org/grpc/grpclog"
-
-const logLevel = 2
-
-func infof(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Infof(format, args...)
-	}
-}
-
-func warningf(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Warningf(format, args...)
-	}
-}
-
-func errorf(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Errorf(format, args...)
-	}
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 0000000..c11b527
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+	address.Attributes = address.Attributes.WithValue(key, networkType)
+	return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+	v := address.Attributes.Value(key)
+	if v == nil {
+		return "", false
+	}
+	return v.(string), true
+}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
similarity index 71%
rename from vendor/google.golang.org/grpc/proxy.go
rename to vendor/google.golang.org/grpc/internal/transport/proxy.go
index f8f69bf..4159619 100644
--- a/vendor/google.golang.org/grpc/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -16,13 +16,12 @@
  *
  */
 
-package grpc
+package transport
 
 import (
 	"bufio"
 	"context"
 	"encoding/base64"
-	"errors"
 	"fmt"
 	"io"
 	"net"
@@ -34,13 +33,11 @@
 const proxyAuthHeaderKey = "Proxy-Authorization"
 
 var (
-	// errDisabled indicates that proxy is disabled for the address.
-	errDisabled = errors.New("proxy is disabled for the address")
 	// The following variable will be overwritten in the tests.
 	httpProxyFromEnvironment = http.ProxyFromEnvironment
 )
 
-func mapAddress(ctx context.Context, address string) (*url.URL, error) {
+func mapAddress(address string) (*url.URL, error) {
 	req := &http.Request{
 		URL: &url.URL{
 			Scheme: "https",
@@ -51,9 +48,6 @@
 	if err != nil {
 		return nil, err
 	}
-	if url == nil {
-		return nil, errDisabled
-	}
 	return url, nil
 }
 
@@ -76,7 +70,7 @@
 	return base64.StdEncoding.EncodeToString([]byte(auth))
 }
 
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
 	defer func() {
 		if err != nil {
 			conn.Close()
@@ -115,32 +109,28 @@
 	return &bufConn{Conn: conn, r: r}, nil
 }
 
-// newProxyDialer returns a dialer that connects to proxy first if necessary.
-// The returned dialer checks if a proxy is necessary, dial to the proxy with the
-// provided dialer, does HTTP CONNECT handshake and returns the connection.
-func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
-	return func(ctx context.Context, addr string) (conn net.Conn, err error) {
-		var newAddr string
-		proxyURL, err := mapAddress(ctx, addr)
-		if err != nil {
-			if err != errDisabled {
-				return nil, err
-			}
-			newAddr = addr
-		} else {
-			newAddr = proxyURL.Host
-		}
+// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
+// is necessary, dials, does the HTTP CONNECT handshake, and returns the
+// connection.
+func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
+	newAddr := addr
+	proxyURL, err := mapAddress(addr)
+	if err != nil {
+		return nil, err
+	}
+	if proxyURL != nil {
+		newAddr = proxyURL.Host
+	}
 
-		conn, err = dialer(ctx, newAddr)
-		if err != nil {
-			return
-		}
-		if proxyURL != nil {
-			// proxy is disabled if proxyURL is nil.
-			conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
-		}
+	conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
+	if err != nil {
 		return
 	}
+	if proxyURL != nil {
+		// proxy is disabled if proxyURL is nil.
+		conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+	}
+	return
 }
 
 func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index a30da9e..d3bf65b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -30,16 +30,20 @@
 	"net"
 	"sync"
 	"sync/atomic"
+	"time"
 
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/tap"
 )
 
+const logLevel = 2
+
 type bufferPool struct {
 	pool sync.Pool
 }
@@ -238,6 +242,7 @@
 	ctx          context.Context    // the associated context of the stream
 	cancel       context.CancelFunc // always nil for client side Stream
 	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
+	doneFunc     func()             // invoked at the end of stream on client side.
 	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance)
 	method       string             // the associated RPC method of the stream
 	recvCompress string
@@ -514,7 +519,8 @@
 // ServerConfig consists of all the configurations to establish a server transport.
 type ServerConfig struct {
 	MaxStreams            uint32
-	AuthInfo              credentials.AuthInfo
+	ConnectionTimeout     time.Duration
+	Credentials           credentials.TransportCredentials
 	InTapHandle           tap.ServerInHandle
 	StatsHandler          stats.Handler
 	KeepaliveParams       keepalive.ServerParameters
@@ -528,12 +534,6 @@
 	HeaderTableSize       *uint32
 }
 
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
-	return newHTTP2Server(conn, config)
-}
-
 // ConnectOptions covers all relevant options for communicating with the server.
 type ConnectOptions struct {
 	// UserAgent is the application user agent.
@@ -566,19 +566,14 @@
 	ChannelzParentID int64
 	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
 	MaxHeaderListSize *uint32
-}
-
-// TargetInfo contains the information of the target such as network address and metadata.
-type TargetInfo struct {
-	Addr      string
-	Metadata  interface{}
-	Authority string
+	// UseProxy specifies if a proxy should be used.
+	UseProxy bool
 }
 
 // NewClientTransport establishes the transport with the required ConnectOptions
 // and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
-	return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
+	return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
 }
 
 // Options provides additional hints and information for message
@@ -613,6 +608,8 @@
 	ContentSubtype string
 
 	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+	DoneFunc func() // called when the stream is finished
 }
 
 // ClientTransport is the common interface for all gRPC client-side transport
@@ -621,7 +618,7 @@
 	// Close tears down this transport. Once it returns, the transport
 	// should not be accessed any more. The caller must make sure this
 	// is called only once.
-	Close() error
+	Close(err error)
 
 	// GracefulClose starts to tear down the transport: the transport will stop
 	// accepting new RPCs and NewStream will return error. Once all streams are
@@ -655,8 +652,9 @@
 	// HTTP/2).
 	GoAway() <-chan struct{}
 
-	// GetGoAwayReason returns the reason why GoAway frame was received.
-	GetGoAwayReason() GoAwayReason
+	// GetGoAwayReason returns the reason why GoAway frame was received, along
+	// with a human readable string with debug info.
+	GetGoAwayReason() (GoAwayReason, string)
 
 	// RemoteAddr returns the remote network address.
 	RemoteAddr() net.Addr
@@ -692,7 +690,7 @@
 	// Close tears down the transport. Once it is called, the transport
 	// should not be accessed any more. All the pending streams and their
 	// handlers will be terminated asynchronously.
-	Close() error
+	Close()
 
 	// RemoteAddr returns the remote network address.
 	RemoteAddr() net.Addr
diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
new file mode 100644
index 0000000..e8b4927
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package internal
+
+import (
+	"google.golang.org/grpc/attributes"
+	"google.golang.org/grpc/resolver"
+)
+
+// handshakeClusterNameKey is the type used as the key to store cluster name in
+// the Attributes field of resolver.Address.
+type handshakeClusterNameKey struct{}
+
+// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
+// is updated with the cluster name.
+func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
+	return addr
+}
+
+// GetXDSHandshakeClusterName returns cluster name stored in attr.
+func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
+	v := attr.Value(handshakeClusterNameKey{})
+	name, ok := v.(string)
+	return name, ok
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index cf6d1b9..3604c78 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -75,13 +75,9 @@
 		panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
 	}
 	md := MD{}
-	var key string
-	for i, s := range kv {
-		if i%2 == 0 {
-			key = strings.ToLower(s)
-			continue
-		}
-		md[key] = append(md[key], s)
+	for i := 0; i < len(kv); i += 2 {
+		key := strings.ToLower(kv[i])
+		md[key] = append(md[key], kv[i+1])
 	}
 	return md
 }
@@ -97,12 +93,16 @@
 }
 
 // Get obtains the values for a given key.
+//
+// k is converted to lowercase before searching in md.
 func (md MD) Get(k string) []string {
 	k = strings.ToLower(k)
 	return md[k]
 }
 
 // Set sets the value of a given key with a slice of values.
+//
+// k is converted to lowercase before storing in md.
 func (md MD) Set(k string, vals ...string) {
 	if len(vals) == 0 {
 		return
@@ -111,7 +111,10 @@
 	md[k] = vals
 }
 
-// Append adds the values to key k, not overwriting what was already stored at that key.
+// Append adds the values to key k, not overwriting what was already stored at
+// that key.
+//
+// k is converted to lowercase before storing in md.
 func (md MD) Append(k string, vals ...string) {
 	if len(vals) == 0 {
 		return
@@ -120,9 +123,17 @@
 	md[k] = append(md[k], vals...)
 }
 
+// Delete removes the values for a given key k which is converted to lowercase
+// before removing it from md.
+func (md MD) Delete(k string) {
+	k = strings.ToLower(k)
+	delete(md, k)
+}
+
 // Join joins any number of mds into a single MD.
-// The order of values for each key is determined by the order in which
-// the mds containing those values are presented to Join.
+//
+// The order of values for each key is determined by the order in which the mds
+// containing those values are presented to Join.
 func Join(mds ...MD) MD {
 	out := MD{}
 	for _, md := range mds {
@@ -149,8 +160,8 @@
 }
 
 // AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the
-// documentation of Pairs for a description of kv.
+// with any existing metadata in the context. Please refer to the documentation
+// of Pairs for a description of kv.
 func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
 	if len(kv)%2 == 1 {
 		panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
@@ -163,20 +174,34 @@
 	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
 }
 
-// FromIncomingContext returns the incoming metadata in ctx if it exists.  The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
-	md, ok = ctx.Value(mdIncomingKey{}).(MD)
-	return
+// FromIncomingContext returns the incoming metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromIncomingContext(ctx context.Context) (MD, bool) {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil, false
+	}
+	out := MD{}
+	for k, v := range md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = v
+	}
+	return out, true
 }
 
-// FromOutgoingContextRaw returns the un-merged, intermediary contents
-// of rawMD. Remember to perform strings.ToLower on the keys. The returned
-// MD should not be modified. Writing to it may cause races. Modification
-// should be made to copies of the returned MD.
+// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
 //
-// This is intended for gRPC-internal use ONLY.
+// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
+// is a map, there's no guarantee it's created using our helper functions) and
+// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
+// lowercase).
+//
+// This is intended for gRPC-internal use ONLY. Users should use
+// FromOutgoingContext instead.
 func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
 	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
 	if !ok {
@@ -186,21 +211,34 @@
 	return raw.md, raw.added, true
 }
 
-// FromOutgoingContext returns the outgoing metadata in ctx if it exists.  The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
 func FromOutgoingContext(ctx context.Context) (MD, bool) {
 	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
 	if !ok {
 		return nil, false
 	}
 
-	mds := make([]MD, 0, len(raw.added)+1)
-	mds = append(mds, raw.md)
-	for _, vv := range raw.added {
-		mds = append(mds, Pairs(vv...))
+	out := MD{}
+	for k, v := range raw.md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = v
 	}
-	return Join(mds...), ok
+	for _, added := range raw.added {
+		if len(added)%2 == 1 {
+			panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
+		}
+
+		for i := 0; i < len(added); i += 2 {
+			key := strings.ToLower(added[i])
+			out[key] = append(out[key], added[i+1])
+		}
+	}
+	return out, ok
 }
 
 type rawMD struct {
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
deleted file mode 100644
index c9f79dc..0000000
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net"
-	"strconv"
-	"time"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-const (
-	defaultPort = "443"
-	defaultFreq = time.Minute * 30
-)
-
-var (
-	errMissingAddr  = errors.New("missing address")
-	errWatcherClose = errors.New("watcher has been closed")
-
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
-)
-
-// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
-// create watchers that poll the DNS server using the frequency set by freq.
-func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
-	return &dnsResolver{freq: freq}, nil
-}
-
-// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
-// watchers that poll the DNS server using the default frequency defined by defaultFreq.
-func NewDNSResolver() (Resolver, error) {
-	return NewDNSResolverWithFreq(defaultFreq)
-}
-
-// dnsResolver handles name resolution for names following the DNS scheme
-type dnsResolver struct {
-	// frequency of polling the DNS server that the watchers created by this resolver will use.
-	freq time.Duration
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
-	ip := net.ParseIP(addr)
-	if ip == nil {
-		return "", false
-	}
-	if ip.To4() != nil {
-		return addr, true
-	}
-	return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
-	if target == "" {
-		return "", "", errMissingAddr
-	}
-
-	if ip := net.ParseIP(target); ip != nil {
-		// target is an IPv4 or IPv6(without brackets) address
-		return target, defaultPort, nil
-	}
-	if host, port, err := net.SplitHostPort(target); err == nil {
-		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
-		if host == "" {
-			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
-			host = "localhost"
-		}
-		if port == "" {
-			// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
-			port = defaultPort
-		}
-		return host, port, nil
-	}
-	if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
-		// target doesn't have port
-		return host, port, nil
-	}
-	return "", "", fmt.Errorf("invalid target address %v", target)
-}
-
-// Resolve creates a watcher that watches the name resolution of the target.
-func (r *dnsResolver) Resolve(target string) (Watcher, error) {
-	host, port, err := parseTarget(target)
-	if err != nil {
-		return nil, err
-	}
-
-	if net.ParseIP(host) != nil {
-		ipWatcher := &ipWatcher{
-			updateChan: make(chan *Update, 1),
-		}
-		host, _ = formatIP(host)
-		ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
-		return ipWatcher, nil
-	}
-
-	ctx, cancel := context.WithCancel(context.Background())
-	return &dnsWatcher{
-		r:      r,
-		host:   host,
-		port:   port,
-		ctx:    ctx,
-		cancel: cancel,
-		t:      time.NewTimer(0),
-	}, nil
-}
-
-// dnsWatcher watches for the name resolution update for a specific target
-type dnsWatcher struct {
-	r    *dnsResolver
-	host string
-	port string
-	// The latest resolved address set
-	curAddrs map[string]*Update
-	ctx      context.Context
-	cancel   context.CancelFunc
-	t        *time.Timer
-}
-
-// ipWatcher watches for the name resolution update for an IP address.
-type ipWatcher struct {
-	updateChan chan *Update
-}
-
-// Next returns the address resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unnecessary. Therefore,
-// Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exists until watcher is closed.
-func (i *ipWatcher) Next() ([]*Update, error) {
-	u, ok := <-i.updateChan
-	if !ok {
-		return nil, errWatcherClose
-	}
-	return []*Update{u}, nil
-}
-
-// Close closes the ipWatcher.
-func (i *ipWatcher) Close() {
-	close(i.updateChan)
-}
-
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
-	// Backend indicates the server is a backend server.
-	Backend AddressType = iota
-	// GRPCLB indicates the server is a grpclb load balancer.
-	GRPCLB
-)
-
-// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
-// name resolver used by the grpclb balancer is required to provide this type of metadata in
-// its address updates.
-type AddrMetadataGRPCLB struct {
-	// AddrType is the type of server (grpc load balancer or backend).
-	AddrType AddressType
-	// ServerName is the name of the grpc load balancer. Used for authentication.
-	ServerName string
-}
-
-// compileUpdate compares the old resolved addresses and newly resolved addresses,
-// and generates an update list
-func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
-	var res []*Update
-	for a, u := range w.curAddrs {
-		if _, ok := newAddrs[a]; !ok {
-			u.Op = Delete
-			res = append(res, u)
-		}
-	}
-	for a, u := range newAddrs {
-		if _, ok := w.curAddrs[a]; !ok {
-			res = append(res, u)
-		}
-	}
-	return res
-}
-
-func (w *dnsWatcher) lookupSRV() map[string]*Update {
-	newAddrs := make(map[string]*Update)
-	_, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
-	if err != nil {
-		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
-		return nil
-	}
-	for _, s := range srvs {
-		lbAddrs, err := lookupHost(w.ctx, s.Target)
-		if err != nil {
-			grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
-			continue
-		}
-		for _, a := range lbAddrs {
-			a, ok := formatIP(a)
-			if !ok {
-				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-				continue
-			}
-			addr := a + ":" + strconv.Itoa(int(s.Port))
-			newAddrs[addr] = &Update{Addr: addr,
-				Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
-		}
-	}
-	return newAddrs
-}
-
-func (w *dnsWatcher) lookupHost() map[string]*Update {
-	newAddrs := make(map[string]*Update)
-	addrs, err := lookupHost(w.ctx, w.host)
-	if err != nil {
-		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
-		return nil
-	}
-	for _, a := range addrs {
-		a, ok := formatIP(a)
-		if !ok {
-			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-			continue
-		}
-		addr := a + ":" + w.port
-		newAddrs[addr] = &Update{Addr: addr}
-	}
-	return newAddrs
-}
-
-func (w *dnsWatcher) lookup() []*Update {
-	newAddrs := w.lookupSRV()
-	if newAddrs == nil {
-		// If failed to get any balancer address (either no corresponding SRV for the
-		// target, or caused by failure during resolution/parsing of the balancer target),
-		// return any A record info available.
-		newAddrs = w.lookupHost()
-	}
-	result := w.compileUpdate(newAddrs)
-	w.curAddrs = newAddrs
-	return result
-}
-
-// Next returns the resolved address update(delta) for the target. If there's no
-// change, it will sleep for 30 mins and try to resolve again after that.
-func (w *dnsWatcher) Next() ([]*Update, error) {
-	for {
-		select {
-		case <-w.ctx.Done():
-			return nil, errWatcherClose
-		case <-w.t.C:
-		}
-		result := w.lookup()
-		// Next lookup should happen after an interval defined by w.r.freq.
-		w.t.Reset(w.r.freq)
-		if len(result) > 0 {
-			return result, nil
-		}
-	}
-}
-
-func (w *dnsWatcher) Close() {
-	w.cancel()
-}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
deleted file mode 100644
index f4c1c8b..0000000
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package naming defines the naming API and related data structures for gRPC.
-//
-// This package is deprecated: please use package resolver instead.
-package naming
-
-// Operation defines the corresponding operations for a name resolution change.
-//
-// Deprecated: please use package resolver.
-type Operation uint8
-
-const (
-	// Add indicates a new address is added.
-	Add Operation = iota
-	// Delete indicates an existing address is deleted.
-	Delete
-)
-
-// Update defines a name resolution update. Notice that it is not valid having both
-// empty string Addr and nil Metadata in an Update.
-//
-// Deprecated: please use package resolver.
-type Update struct {
-	// Op indicates the operation of the update.
-	Op Operation
-	// Addr is the updated address. It is empty string if there is no address update.
-	Addr string
-	// Metadata is the updated metadata. It is nil if there is no metadata update.
-	// Metadata is not required for a custom naming implementation.
-	Metadata interface{}
-}
-
-// Resolver creates a Watcher for a target to track its resolution changes.
-//
-// Deprecated: please use package resolver.
-type Resolver interface {
-	// Resolve creates a Watcher for target.
-	Resolve(target string) (Watcher, error)
-}
-
-// Watcher watches for the updates on the specified target.
-//
-// Deprecated: please use package resolver.
-type Watcher interface {
-	// Next blocks until an update or error happens. It may return one or more
-	// updates. The first call should get the full set of the results. It should
-	// return an error if and only if Watcher cannot recover.
-	Next() ([]*Update, error)
-	// Close closes the Watcher.
-	Close()
-}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 0044789..e8367cb 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,80 +20,31 @@
 
 import (
 	"context"
-	"fmt"
 	"io"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/status"
 )
 
-// v2PickerWrapper wraps a balancer.Picker while providing the
-// balancer.V2Picker API.  It requires a pickerWrapper to generate errors
-// including the latest connectionError.  To be deleted when balancer.Picker is
-// updated to the balancer.V2Picker API.
-type v2PickerWrapper struct {
-	picker  balancer.Picker
-	connErr *connErr
-}
-
-func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
-	sc, done, err := v.picker.Pick(info.Ctx, info)
-	if err != nil {
-		if err == balancer.ErrTransientFailure {
-			return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
-		}
-		return balancer.PickResult{}, err
-	}
-	return balancer.PickResult{SubConn: sc, Done: done}, nil
-}
-
 // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
 // actions and unblock when there's a picker update.
 type pickerWrapper struct {
 	mu         sync.Mutex
 	done       bool
 	blockingCh chan struct{}
-	picker     balancer.V2Picker
-
-	// The latest connection error.  TODO: remove when V1 picker is deprecated;
-	// balancer should be responsible for providing the error.
-	*connErr
-}
-
-type connErr struct {
-	mu  sync.Mutex
-	err error
-}
-
-func (c *connErr) updateConnectionError(err error) {
-	c.mu.Lock()
-	c.err = err
-	c.mu.Unlock()
-}
-
-func (c *connErr) connectionError() error {
-	c.mu.Lock()
-	err := c.err
-	c.mu.Unlock()
-	return err
+	picker     balancer.Picker
 }
 
 func newPickerWrapper() *pickerWrapper {
-	return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
+	return &pickerWrapper{blockingCh: make(chan struct{})}
 }
 
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
 func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
-	pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
-}
-
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
 	pw.mu.Lock()
 	if pw.done {
 		pw.mu.Unlock()
@@ -154,8 +105,6 @@
 				var errStr string
 				if lastPickErr != nil {
 					errStr = "latest balancer error: " + lastPickErr.Error()
-				} else if connectionErr := pw.connectionError(); connectionErr != nil {
-					errStr = "latest connection error: " + connectionErr.Error()
 				} else {
 					errStr = ctx.Err().Error()
 				}
@@ -180,26 +129,25 @@
 			if err == balancer.ErrNoSubConnAvailable {
 				continue
 			}
-			if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
-				if !failfast {
-					lastPickErr = err
-					continue
-				}
-				return nil, nil, status.Error(codes.Unavailable, err.Error())
-			}
 			if _, ok := status.FromError(err); ok {
+				// Status error: end the RPC unconditionally with this status.
 				return nil, nil, err
 			}
-			// err is some other error.
-			return nil, nil, status.Error(codes.Unknown, err.Error())
+			// For all other errors, wait for ready RPCs should block and other
+			// RPCs should fail with unavailable.
+			if !failfast {
+				lastPickErr = err
+				continue
+			}
+			return nil, nil, status.Error(codes.Unavailable, err.Error())
 		}
 
 		acw, ok := pickResult.SubConn.(*acBalancerWrapper)
 		if !ok {
-			grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
+			logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
 			continue
 		}
-		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
+		if t := acw.getAddrConn().getReadyTransport(); t != nil {
 			if channelz.IsOn() {
 				return t, doneChannelzWrapper(acw, pickResult.Done), nil
 			}
@@ -210,7 +158,7 @@
 			// DoneInfo with default value works.
 			pickResult.Done(balancer.DoneInfo{})
 		}
-		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+		logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.
 		// A valid picker always returns READY subConn. This means the state of ac
 		// just changed, and picker will be updated shortly.
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index c43dac9..5168b62 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -20,13 +20,10 @@
 
 import (
 	"errors"
+	"fmt"
 
 	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-	"google.golang.org/grpc/status"
 )
 
 // PickFirstBalancerName is the name of the pick_first balancer.
@@ -52,30 +49,16 @@
 	sc    balancer.SubConn
 }
 
-var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
-
-func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	if err != nil {
-		b.ResolverError(err)
-		return
-	}
-	b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
-}
-
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
-}
-
 func (b *pickfirstBalancer) ResolverError(err error) {
 	switch b.state {
 	case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
 		// Set a failing picker if we don't have a good picker.
 		b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
-			Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
-		)
+			Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+		})
 	}
-	if grpclog.V(2) {
-		grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
+	if logger.V(2) {
+		logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
 	}
 }
 
@@ -88,32 +71,32 @@
 		var err error
 		b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
 		if err != nil {
-			if grpclog.V(2) {
-				grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+			if logger.V(2) {
+				logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
 			}
 			b.state = connectivity.TransientFailure
 			b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
-				Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
-			)
+				Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+			})
 			return balancer.ErrBadResolverState
 		}
 		b.state = connectivity.Idle
 		b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
 		b.sc.Connect()
 	} else {
-		b.sc.UpdateAddresses(cs.ResolverState.Addresses)
+		b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses)
 		b.sc.Connect()
 	}
 	return nil
 }
 
 func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
-	if grpclog.V(2) {
-		grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+	if logger.V(2) {
+		logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
 	}
 	if b.sc != sc {
-		if grpclog.V(2) {
-			grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+		if logger.V(2) {
+			logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
 		}
 		return
 	}
@@ -124,20 +107,16 @@
 	}
 
 	switch s.ConnectivityState {
-	case connectivity.Ready, connectivity.Idle:
+	case connectivity.Ready:
 		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
 	case connectivity.Connecting:
 		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
+	case connectivity.Idle:
+		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})
 	case connectivity.TransientFailure:
-		err := balancer.ErrTransientFailure
-		// TODO: this can be unconditional after the V1 API is removed, as
-		// SubConnState will always contain a connection error.
-		if s.ConnectionError != nil {
-			err = balancer.TransientFailureError(s.ConnectionError)
-		}
 		b.cc.UpdateState(balancer.State{
 			ConnectivityState: s.ConnectivityState,
-			Picker:            &picker{err: err},
+			Picker:            &picker{err: s.ConnectionError},
 		})
 	}
 }
@@ -145,6 +124,12 @@
 func (b *pickfirstBalancer) Close() {
 }
 
+func (b *pickfirstBalancer) ExitIdle() {
+	if b.sc != nil && b.state == connectivity.Idle {
+		b.sc.Connect()
+	}
+}
+
 type picker struct {
 	result balancer.PickResult
 	err    error
@@ -154,6 +139,17 @@
 	return p.result, p.err
 }
 
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+	sc balancer.SubConn
+}
+
+func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	i.sc.Connect()
+	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
 func init() {
 	balancer.Register(newPickfirstBuilder())
 }
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 76acbbc..0a1e975 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -25,7 +25,10 @@
 
 // PreparedMsg is responsible for creating a Marshalled and Compressed object.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type PreparedMsg struct {
 	// Struct for preparing msg before sending them
 	encodedData []byte
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
new file mode 100644
index 0000000..58c802f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/regenerate.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+# Copyright 2020 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eu -o pipefail
+
+WORKDIR=$(mktemp -d)
+
+function finish {
+  rm -rf "$WORKDIR"
+}
+trap finish EXIT
+
+export GOBIN=${WORKDIR}/bin
+export PATH=${GOBIN}:${PATH}
+mkdir -p ${GOBIN}
+
+echo "remove existing generated files"
+# grpc_testingv3/testv3.pb.go is not re-generated because it was
+# intentionally generated by an older version of protoc-gen-go.
+rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go')
+
+echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
+(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
+
+echo "go install cmd/protoc-gen-go-grpc"
+(cd cmd/protoc-gen-go-grpc && go install .)
+
+echo "git clone https://github.com/grpc/grpc-proto"
+git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
+
+echo "git clone https://github.com/protocolbuffers/protobuf"
+git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
+
+# Pull in code.proto as a proto dependency
+mkdir -p ${WORKDIR}/googleapis/google/rpc
+echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
+curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
+
+mkdir -p ${WORKDIR}/out
+
+# Generates sources without the embed requirement
+LEGACY_SOURCES=(
+  ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
+  ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
+  ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
+  ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
+  profiling/proto/service.proto
+  reflection/grpc_reflection_v1alpha/reflection.proto
+)
+
+# Generates only the new gRPC Service symbols
+SOURCES=(
+  $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
+  ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
+  ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
+  ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
+  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
+  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
+  ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto
+  ${WORKDIR}/grpc-proto/grpc/testing/*.proto
+  ${WORKDIR}/grpc-proto/grpc/core/*.proto
+)
+
+# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
+# import path of 'bar' in the generated code when 'foo.proto' is imported in
+# one of the sources.
+#
+# Note that the protos listed here are all for testing purposes. All protos to
+# be used externally should have a go_package option (and they don't need to be
+# listed here).
+OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
+Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
+Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
+
+for src in ${SOURCES[@]}; do
+  echo "protoc ${src}"
+  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
+    -I"." \
+    -I${WORKDIR}/grpc-proto \
+    -I${WORKDIR}/googleapis \
+    -I${WORKDIR}/protobuf/src \
+    ${src}
+done
+
+for src in ${LEGACY_SOURCES[@]}; do
+  echo "protoc ${src}"
+  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
+    -I"." \
+    -I${WORKDIR}/grpc-proto \
+    -I${WORKDIR}/googleapis \
+    -I${WORKDIR}/protobuf/src \
+    ${src}
+done
+
+# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
+# current location. Move it into the right place.
+mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
+mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
+
+# grpc_testingv3/testv3.pb.go is not re-generated because it was
+# intentionally generated by an older version of protoc-gen-go.
+rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go
+
+# grpc/service_config/service_config.proto does not have a go_package option.
+mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
+
+# grpc/testing does not have a go_package option.
+mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
+mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
+
+cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
new file mode 100644
index 0000000..e87ecd0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package resolver
+
+type addressMapEntry struct {
+	addr  Address
+	value interface{}
+}
+
+// AddressMap is a map of addresses to arbitrary values taking into account
+// Attributes.  BalancerAttributes are ignored, as are Metadata and Type.
+// Multiple accesses may not be performed concurrently.  Must be created via
+// NewAddressMap; do not construct directly.
+type AddressMap struct {
+	m map[string]addressMapEntryList
+}
+
+type addressMapEntryList []*addressMapEntry
+
+// NewAddressMap creates a new AddressMap.
+func NewAddressMap() *AddressMap {
+	return &AddressMap{m: make(map[string]addressMapEntryList)}
+}
+
+// find returns the index of addr in the addressMapEntry slice, or -1 if not
+// present.
+func (l addressMapEntryList) find(addr Address) int {
+	if len(l) == 0 {
+		return -1
+	}
+	for i, entry := range l {
+		if entry.addr.ServerName == addr.ServerName &&
+			entry.addr.Attributes.Equal(addr.Attributes) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Get returns the value for the address in the map, if present.
+func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
+	entryList := a.m[addr.Addr]
+	if entry := entryList.find(addr); entry != -1 {
+		return entryList[entry].value, true
+	}
+	return nil, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (a *AddressMap) Set(addr Address, value interface{}) {
+	entryList := a.m[addr.Addr]
+	if entry := entryList.find(addr); entry != -1 {
+		a.m[addr.Addr][entry].value = value
+		return
+	}
+	a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value})
+}
+
+// Delete removes addr from the map.
+func (a *AddressMap) Delete(addr Address) {
+	entryList := a.m[addr.Addr]
+	entry := entryList.find(addr)
+	if entry == -1 {
+		return
+	}
+	if len(entryList) == 1 {
+		entryList = nil
+	} else {
+		copy(entryList[entry:], entryList[entry+1:])
+		entryList = entryList[:len(entryList)-1]
+	}
+	a.m[addr.Addr] = entryList
+}
+
+// Len returns the number of entries in the map.
+func (a *AddressMap) Len() int {
+	ret := 0
+	for _, entryList := range a.m {
+		ret += len(entryList)
+	}
+	return ret
+}
+
+// Keys returns a slice of all current map keys.
+func (a *AddressMap) Keys() []Address {
+	ret := make([]Address, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.addr)
+		}
+	}
+	return ret
+}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index fe14b2f..e28b680 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -23,6 +23,7 @@
 import (
 	"context"
 	"net"
+	"net/url"
 
 	"google.golang.org/grpc/attributes"
 	"google.golang.org/grpc/credentials"
@@ -85,12 +86,19 @@
 	Backend AddressType = iota
 	// GRPCLB indicates the address is for a grpclb load balancer.
 	//
-	// Deprecated: use Attributes in Address instead.
+	// Deprecated: to select the GRPCLB load balancing policy, use a service
+	// config with a corresponding loadBalancingConfig.  To supply balancer
+	// addresses to the GRPCLB load balancing policy, set State.Attributes
+	// using balancer/grpclb/state.Set.
 	GRPCLB
 )
 
 // Address represents a server the client connects to.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type Address struct {
 	// Addr is the server address on which a connection will be established.
 	Addr string
@@ -109,9 +117,14 @@
 	ServerName string
 
 	// Attributes contains arbitrary data about this address intended for
-	// consumption by the load balancing policy.
+	// consumption by the SubConn.
 	Attributes *attributes.Attributes
 
+	// BalancerAttributes contains arbitrary data about this address intended
+	// for consumption by the LB policy.  These attribes do not affect SubConn
+	// creation, connection establishment, handshaking, etc.
+	BalancerAttributes *attributes.Attributes
+
 	// Type is the type of this address.
 	//
 	// Deprecated: use Attributes instead.
@@ -124,6 +137,15 @@
 	Metadata interface{}
 }
 
+// Equal returns whether a and o are identical.  Metadata is compared directly,
+// not with any recursive introspection.
+func (a *Address) Equal(o Address) bool {
+	return a.Addr == o.Addr && a.ServerName == o.ServerName &&
+		a.Attributes.Equal(o.Attributes) &&
+		a.BalancerAttributes.Equal(o.BalancerAttributes) &&
+		a.Type == o.Type && a.Metadata == o.Metadata
+}
+
 // BuildOptions includes additional information for the builder to create
 // the resolver.
 type BuildOptions struct {
@@ -174,7 +196,7 @@
 // gRPC to add new methods to this interface.
 type ClientConn interface {
 	// UpdateState updates the state of the ClientConn appropriately.
-	UpdateState(State)
+	UpdateState(State) error
 	// ReportError notifies the ClientConn that the Resolver encountered an
 	// error.  The ClientConn will notify the load balancer and begin calling
 	// ResolveNow on the Resolver with exponential backoff.
@@ -197,25 +219,36 @@
 
 // Target represents a target for gRPC, as specified in:
 // https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
-// grpc passes it to the resolver and the balancer.
+// It is parsed from the target string that gets passed into Dial or DialContext
+// by the user. And gRPC passes it to the resolver and the balancer.
 //
-// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
-// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
-// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
+// If the target follows the naming spec, and the parsed scheme is registered
+// with gRPC, we will parse the target string according to the spec. If the
+// target does not contain a scheme or if the parsed scheme is not registered
+// (i.e. no corresponding resolver available to resolve the endpoint), we will
+// apply the default scheme, and will attempt to reparse it.
 //
-// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
-// be the full target string. e.g. "foo.bar" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
+// Examples:
 //
-// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
-// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
-// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
+// - "dns://some_authority/foo.bar"
+//   Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
+// - "foo.bar"
+//   Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
+// - "unknown_scheme://authority/endpoint"
+//   Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
 type Target struct {
-	Scheme    string
+	// Deprecated: use URL.Scheme instead.
+	Scheme string
+	// Deprecated: use URL.Host instead.
 	Authority string
-	Endpoint  string
+	// Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
+	// the former is empty.
+	Endpoint string
+	// URL contains the parsed dial target with an optional default scheme added
+	// to it if the original dial target contained no scheme or contained an
+	// unregistered scheme. Any query params specified in the original dial
+	// target can be accessed from here.
+	URL url.URL
 }
 
 // Builder creates a resolver that will be used to watch name resolution updates.
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index edfda86..2c47cd5 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -22,7 +22,6 @@
 	"fmt"
 	"strings"
 	"sync"
-	"time"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/credentials"
@@ -41,8 +40,7 @@
 	done       *grpcsync.Event
 	curState   resolver.State
 
-	pollingMu sync.Mutex
-	polling   chan struct{}
+	incomingMu sync.Mutex // Synchronizes all the incoming calls.
 }
 
 // newCCResolverWrapper uses the resolver.Builder to build a Resolver and
@@ -93,104 +91,71 @@
 	ccr.resolverMu.Unlock()
 }
 
-// poll begins or ends asynchronous polling of the resolver based on whether
-// err is ErrBadResolverState.
-func (ccr *ccResolverWrapper) poll(err error) {
-	ccr.pollingMu.Lock()
-	defer ccr.pollingMu.Unlock()
-	if err != balancer.ErrBadResolverState {
-		// stop polling
-		if ccr.polling != nil {
-			close(ccr.polling)
-			ccr.polling = nil
-		}
-		return
-	}
-	if ccr.polling != nil {
-		// already polling
-		return
-	}
-	p := make(chan struct{})
-	ccr.polling = p
-	go func() {
-		for i := 0; ; i++ {
-			ccr.resolveNow(resolver.ResolveNowOptions{})
-			t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
-			select {
-			case <-p:
-				t.Stop()
-				return
-			case <-ccr.done.Done():
-				// Resolver has been closed.
-				t.Stop()
-				return
-			case <-t.C:
-				select {
-				case <-p:
-					return
-				default:
-				}
-				// Timer expired; re-resolve.
-			}
-		}
-	}()
-}
-
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
 	if ccr.done.HasFired() {
-		return
+		return nil
 	}
-	channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
+	channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
 	if channelz.IsOn() {
 		ccr.addChannelzTraceEvent(s)
 	}
 	ccr.curState = s
-	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+	if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
+		return balancer.ErrBadResolverState
+	}
+	return nil
 }
 
 func (ccr *ccResolverWrapper) ReportError(err error) {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
 	if ccr.done.HasFired() {
 		return
 	}
-	channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
-	ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
+	channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+	ccr.cc.updateResolverState(resolver.State{}, err)
 }
 
 // NewAddress is called by the resolver implementation to send addresses to gRPC.
 func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
 	if ccr.done.HasFired() {
 		return
 	}
-	channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
+	channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
 	if channelz.IsOn() {
 		ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
 	}
 	ccr.curState.Addresses = addrs
-	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+	ccr.cc.updateResolverState(ccr.curState, nil)
 }
 
 // NewServiceConfig is called by the resolver implementation to send service
 // configs to gRPC.
 func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
 	if ccr.done.HasFired() {
 		return
 	}
-	channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
+	channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
 	if ccr.cc.dopts.disableServiceConfig {
-		channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
+		channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
 		return
 	}
 	scpr := parseServiceConfig(sc)
 	if scpr.Err != nil {
-		channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
-		ccr.poll(balancer.ErrBadResolverState)
+		channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
 		return
 	}
 	if channelz.IsOn() {
 		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
 	}
 	ccr.curState.ServiceConfig = scpr
-	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+	ccr.cc.updateResolverState(ccr.curState, nil)
 }
 
 func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
@@ -215,8 +180,8 @@
 	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
 		updates = append(updates, "resolver returned new addresses")
 	}
-	channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
+	channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
 		Desc:     fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
-		Severity: channelz.CtINFO,
+		Severity: channelz.CtInfo,
 	})
 }
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index cf9dbe7..5d407b0 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -27,7 +27,6 @@
 	"io"
 	"io/ioutil"
 	"math"
-	"net/url"
 	"strings"
 	"sync"
 	"time"
@@ -155,7 +154,6 @@
 type callInfo struct {
 	compressorType        string
 	failFast              bool
-	stream                ClientStream
 	maxReceiveMessageSize *int
 	maxSendMessageSize    *int
 	creds                 credentials.PerRPCCredentials
@@ -180,7 +178,7 @@
 
 	// after is called after the call has completed.  after cannot return an
 	// error, so any failures should be reported via output parameters.
-	after(*callInfo)
+	after(*callInfo, *csAttempt)
 }
 
 // EmptyCallOption does not alter the Call configuration.
@@ -188,8 +186,8 @@
 // by interceptors.
 type EmptyCallOption struct{}
 
-func (EmptyCallOption) before(*callInfo) error { return nil }
-func (EmptyCallOption) after(*callInfo)        {}
+func (EmptyCallOption) before(*callInfo) error      { return nil }
+func (EmptyCallOption) after(*callInfo, *csAttempt) {}
 
 // Header returns a CallOptions that retrieves the header metadata
 // for a unary RPC.
@@ -199,16 +197,18 @@
 
 // HeaderCallOption is a CallOption for collecting response header metadata.
 // The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type HeaderCallOption struct {
 	HeaderAddr *metadata.MD
 }
 
 func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo) {
-	if c.stream != nil {
-		*o.HeaderAddr, _ = c.stream.Header()
-	}
+func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
+	*o.HeaderAddr, _ = attempt.s.Header()
 }
 
 // Trailer returns a CallOptions that retrieves the trailer metadata
@@ -219,16 +219,18 @@
 
 // TrailerCallOption is a CallOption for collecting response trailer metadata.
 // The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type TrailerCallOption struct {
 	TrailerAddr *metadata.MD
 }
 
 func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo) {
-	if c.stream != nil {
-		*o.TrailerAddr = c.stream.Trailer()
-	}
+func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
+	*o.TrailerAddr = attempt.s.Trailer()
 }
 
 // Peer returns a CallOption that retrieves peer information for a unary RPC.
@@ -239,22 +241,25 @@
 
 // PeerCallOption is a CallOption for collecting the identity of the remote
 // peer. The peer field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type PeerCallOption struct {
 	PeerAddr *peer.Peer
 }
 
 func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo) {
-	if c.stream != nil {
-		if x, ok := peer.FromContext(c.stream.Context()); ok {
-			*o.PeerAddr = *x
-		}
+func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
+	if x, ok := peer.FromContext(attempt.s.Context()); ok {
+		*o.PeerAddr = *x
 	}
 }
 
 // WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false, the RPC will fail
+// connections or unreachable servers. If waitForReady is false and the
+// connection is in the TRANSIENT_FAILURE state, the RPC will fail
 // immediately. Otherwise, the RPC client will block the call until a
 // connection is available (or the call is canceled or times out) and will
 // retry the call if it fails due to a transient error.  gRPC will not retry if
@@ -276,7 +281,11 @@
 
 // FailFastCallOption is a CallOption for indicating whether an RPC should fail
 // fast or not.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type FailFastCallOption struct {
 	FailFast bool
 }
@@ -285,7 +294,7 @@
 	c.failFast = o.FailFast
 	return nil
 }
-func (o FailFastCallOption) after(c *callInfo) {}
+func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
 // in bytes the client can receive.
@@ -295,7 +304,11 @@
 
 // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
 // size in bytes the client can receive.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type MaxRecvMsgSizeCallOption struct {
 	MaxRecvMsgSize int
 }
@@ -304,7 +317,7 @@
 	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
 	return nil
 }
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // MaxCallSendMsgSize returns a CallOption which sets the maximum message size
 // in bytes the client can send.
@@ -314,7 +327,11 @@
 
 // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
 // size in bytes the client can send.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type MaxSendMsgSizeCallOption struct {
 	MaxSendMsgSize int
 }
@@ -323,7 +340,7 @@
 	c.maxSendMessageSize = &o.MaxSendMsgSize
 	return nil
 }
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
 // for a call.
@@ -333,7 +350,11 @@
 
 // PerRPCCredsCallOption is a CallOption that indicates the per-RPC
 // credentials to use for the call.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type PerRPCCredsCallOption struct {
 	Creds credentials.PerRPCCredentials
 }
@@ -342,19 +363,26 @@
 	c.creds = o.Creds
 	return nil
 }
-func (o PerRPCCredsCallOption) after(c *callInfo) {}
+func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // UseCompressor returns a CallOption which sets the compressor used when
 // sending the request.  If WithCompressor is also set, UseCompressor has
 // higher priority.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func UseCompressor(name string) CallOption {
 	return CompressorCallOption{CompressorType: name}
 }
 
 // CompressorCallOption is a CallOption that indicates the compressor to use.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type CompressorCallOption struct {
 	CompressorType string
 }
@@ -363,7 +391,7 @@
 	c.compressorType = o.CompressorType
 	return nil
 }
-func (o CompressorCallOption) after(c *callInfo) {}
+func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // CallContentSubtype returns a CallOption that will set the content-subtype
 // for a call. For example, if content-subtype is "json", the Content-Type over
@@ -387,7 +415,11 @@
 
 // ContentSubtypeCallOption is a CallOption that indicates the content-subtype
 // used for marshaling messages.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ContentSubtypeCallOption struct {
 	ContentSubtype string
 }
@@ -396,11 +428,12 @@
 	c.contentSubtype = o.ContentSubtype
 	return nil
 }
-func (o ContentSubtypeCallOption) after(c *callInfo) {}
+func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
 
-// ForceCodec returns a CallOption that will set the given Codec to be
-// used for all request and response messages for a call. The result of calling
-// String() will be used as the content-subtype in a case-insensitive manner.
+// ForceCodec returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
 //
 // See Content-Type on
 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
@@ -411,7 +444,10 @@
 // This function is provided for advanced users; prefer to use only
 // CallContentSubtype to select a registered codec instead.
 //
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func ForceCodec(codec encoding.Codec) CallOption {
 	return ForceCodecCallOption{Codec: codec}
 }
@@ -419,7 +455,10 @@
 // ForceCodecCallOption is a CallOption that indicates the codec used for
 // marshaling messages.
 //
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ForceCodecCallOption struct {
 	Codec encoding.Codec
 }
@@ -428,7 +467,7 @@
 	c.codec = o.Codec
 	return nil
 }
-func (o ForceCodecCallOption) after(c *callInfo) {}
+func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
 // an encoding.Codec.
@@ -441,7 +480,10 @@
 // CustomCodecCallOption is a CallOption that indicates the codec used for
 // marshaling messages.
 //
-// This is an EXPERIMENTAL API.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type CustomCodecCallOption struct {
 	Codec Codec
 }
@@ -450,19 +492,26 @@
 	c.codec = o.Codec
 	return nil
 }
-func (o CustomCodecCallOption) after(c *callInfo) {}
+func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
 // used for buffering this RPC's requests for retry purposes.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func MaxRetryRPCBufferSize(bytes int) CallOption {
 	return MaxRetryRPCBufferSizeCallOption{bytes}
 }
 
 // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
 // memory to be used for caching this RPC for retry purposes.
-// This is an EXPERIMENTAL API.
+//
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type MaxRetryRPCBufferSizeCallOption struct {
 	MaxRetryRPCBufferSize int
 }
@@ -471,7 +520,7 @@
 	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
 	return nil
 }
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
 
 // The format of the payload: compressed or not?
 type payloadFormat uint8
@@ -663,13 +712,11 @@
 		if err != nil {
 			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
 		}
-	} else {
-		size = len(d)
-	}
-	if size > maxReceiveMessageSize {
-		// TODO: Revisit the error code. Currently keep it consistent with java
-		// implementation.
-		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+		if size > maxReceiveMessageSize {
+			// TODO: Revisit the error code. Currently keep it consistent with java
+			// implementation.
+			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+		}
 	}
 	return d, nil
 }
@@ -780,33 +827,45 @@
 
 // toRPCErr converts an error into an error from the status package.
 func toRPCErr(err error) error {
-	if err == nil || err == io.EOF {
+	switch err {
+	case nil, io.EOF:
 		return err
-	}
-	if err == io.ErrUnexpectedEOF {
+	case context.DeadlineExceeded:
+		return status.Error(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return status.Error(codes.Canceled, err.Error())
+	case io.ErrUnexpectedEOF:
 		return status.Error(codes.Internal, err.Error())
 	}
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
+
 	switch e := err.(type) {
 	case transport.ConnectionError:
 		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		}
+	case *transport.NewStreamError:
+		return toRPCErr(e.Err)
 	}
+
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+
 	return status.Error(codes.Unknown, err.Error())
 }
 
 // setCallInfoCodec should only be called after CallOptions have been applied.
 func setCallInfoCodec(c *callInfo) error {
 	if c.codec != nil {
-		// codec was already set by a CallOption; use it.
+		// codec was already set by a CallOption; use it, but set the content
+		// subtype if it is not set.
+		if c.contentSubtype == "" {
+			// c.codec is a baseCodec to hide the difference between grpc.Codec and
+			// encoding.Codec (Name vs. String method name).  We only support
+			// setting content subtype from encoding.Codec to avoid a behavior
+			// change with the deprecated version.
+			if ec, ok := c.codec.(encoding.Codec); ok {
+				c.contentSubtype = strings.ToLower(ec.Name())
+			}
+		}
 		return nil
 	}
 
@@ -824,40 +883,6 @@
 	return nil
 }
 
-// parseDialTarget returns the network and address to pass to dialer
-func parseDialTarget(target string) (net string, addr string) {
-	net = "tcp"
-
-	m1 := strings.Index(target, ":")
-	m2 := strings.Index(target, ":/")
-
-	// handle unix:addr which will fail with url.Parse
-	if m1 >= 0 && m2 < 0 {
-		if n := target[0:m1]; n == "unix" {
-			net = n
-			addr = target[m1+1:]
-			return net, addr
-		}
-	}
-	if m2 >= 0 {
-		t, err := url.Parse(target)
-		if err != nil {
-			return net, target
-		}
-		scheme := t.Scheme
-		addr = t.Path
-		if scheme == "unix" {
-			net = scheme
-			if addr == "" {
-				addr = t.Host
-			}
-			return net, addr
-		}
-	}
-
-	return net, target
-}
-
 // channelzData is used to store channelz related data for ClientConn, addrConn and Server.
 // These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
 // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
@@ -873,10 +898,9 @@
 
 // The SupportPackageIsVersion variables are referenced from generated protocol
 // buffer files to ensure compatibility with the gRPC version used.  The latest
-// support package version is 6.
+// support package version is 7.
 //
-// Older versions are kept for compatibility. They may be removed if
-// compatibility cannot be maintained.
+// Older versions are kept for compatibility.
 //
 // These constants should not be referenced from any other code.
 const (
@@ -884,6 +908,7 @@
 	SupportPackageIsVersion4 = true
 	SupportPackageIsVersion5 = true
 	SupportPackageIsVersion6 = true
+	SupportPackageIsVersion7 = true
 )
 
 const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index edfcdca..eadf9e0 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -40,8 +40,10 @@
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding/proto"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
@@ -55,9 +57,26 @@
 const (
 	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
 	defaultServerMaxSendMessageSize    = math.MaxInt32
+
+	// Server transports are tracked in a map which is keyed on listener
+	// address. For regular gRPC traffic, connections are accepted in Serve()
+	// through a call to Accept(), and we use the actual listener address as key
+	// when we add it to the map. But for connections received through
+	// ServeHTTP(), we do not have a listener and hence use this dummy value.
+	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
 )
 
+func init() {
+	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
+		return srv.opts.creds
+	}
+	internal.DrainServerTransports = func(srv *Server, addr string) {
+		srv.drainServerTransports(addr)
+	}
+}
+
 var statusOK = status.New(codes.OK, "")
+var logger = grpclog.Component("core")
 
 type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
 
@@ -78,27 +97,37 @@
 	Metadata    interface{}
 }
 
-// service consists of the information of the server serving this service and
-// the methods in this service.
-type service struct {
-	server interface{} // the server for service methods
-	md     map[string]*MethodDesc
-	sd     map[string]*StreamDesc
-	mdata  interface{}
+// serviceInfo wraps information about a service. It is very similar to
+// ServiceDesc and is constructed from it for internal purposes.
+type serviceInfo struct {
+	// Contains the implementation for the methods in this service.
+	serviceImpl interface{}
+	methods     map[string]*MethodDesc
+	streams     map[string]*StreamDesc
+	mdata       interface{}
+}
+
+type serverWorkerData struct {
+	st     transport.ServerTransport
+	wg     *sync.WaitGroup
+	stream *transport.Stream
 }
 
 // Server is a gRPC server to serve RPC requests.
 type Server struct {
 	opts serverOptions
 
-	mu     sync.Mutex // guards following
-	lis    map[net.Listener]bool
-	conns  map[transport.ServerTransport]bool
-	serve  bool
-	drain  bool
-	cv     *sync.Cond          // signaled when connections close for GracefulStop
-	m      map[string]*service // service name -> service info
-	events trace.EventLog
+	mu  sync.Mutex // guards following
+	lis map[net.Listener]bool
+	// conns contains all active server transports. It is a map keyed on a
+	// listener address with the value being the set of active transports
+	// belonging to that listener.
+	conns    map[string]map[transport.ServerTransport]bool
+	serve    bool
+	drain    bool
+	cv       *sync.Cond              // signaled when connections close for GracefulStop
+	services map[string]*serviceInfo // service name -> service info
+	events   trace.EventLog
 
 	quit               *grpcsync.Event
 	done               *grpcsync.Event
@@ -107,6 +136,8 @@
 
 	channelzID int64 // channelz unique identification number
 	czData     *channelzData
+
+	serverWorkerChannels []chan *serverWorkerData
 }
 
 type serverOptions struct {
@@ -133,6 +164,7 @@
 	connectionTimeout     time.Duration
 	maxHeaderListSize     *uint32
 	headerTableSize       *uint32
+	numServerWorkers      uint32
 }
 
 var defaultServerOptions = serverOptions{
@@ -151,7 +183,10 @@
 // EmptyServerOption does not alter the server configuration. It can be embedded
 // in another structure to build custom server options.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type EmptyServerOption struct{}
 
 func (EmptyServerOption) apply(*serverOptions) {}
@@ -213,7 +248,7 @@
 // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
 func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
 	if kp.Time > 0 && kp.Time < time.Second {
-		grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
 		kp.Time = time.Second
 	}
 
@@ -232,19 +267,55 @@
 // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
 //
 // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
+//
+// Deprecated: register codecs using encoding.RegisterCodec. The server will
+// automatically use registered codecs based on the incoming requests' headers.
+// See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
 func CustomCodec(codec Codec) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.codec = codec
 	})
 }
 
+// ForceServerCodec returns a ServerOption that sets a codec for message
+// marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered
+// with RegisterCodec.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between encoding.Codec
+// and content-subtype.
+//
+// This function is provided for advanced users; prefer to register codecs
+// using encoding.RegisterCodec.
+// The server will automatically use registered codecs based on the incoming
+// requests' headers. See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodec(codec encoding.Codec) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = codec
+	})
+}
+
 // RPCCompressor returns a ServerOption that sets a compressor for outbound
 // messages.  For backward compatibility, all outbound messages will be sent
 // using this compressor, regardless of incoming message compression.  By
 // default, server messages will be sent using the same compressor with which
 // request messages were sent.
 //
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
 func RPCCompressor(cp Compressor) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.cp = cp
@@ -255,7 +326,8 @@
 // messages.  It has higher priority than decompressors registered via
 // encoding.RegisterCompressor.
 //
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
 func RPCDecompressor(dc Decompressor) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.dc = dc
@@ -265,7 +337,7 @@
 // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
 // If this is not set, gRPC uses the default limit.
 //
-// Deprecated: use MaxRecvMsgSize instead.
+// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
 func MaxMsgSize(m int) ServerOption {
 	return MaxRecvMsgSize(m)
 }
@@ -335,7 +407,7 @@
 }
 
 // ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
-// for stream RPCs. The first interceptor will be the outer most,
+// for streaming RPCs. The first interceptor will be the outer most,
 // while the last interceptor will be the inner most wrapper around the real call.
 // All stream interceptors added by this method will be chained.
 func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
@@ -346,6 +418,11 @@
 
 // InTapHandle returns a ServerOption that sets the tap handle for all the server
 // transport to be created. Only one can be installed.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func InTapHandle(h tap.ServerInHandle) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		if o.inTapHandle != nil {
@@ -385,7 +462,10 @@
 // new connections.  If this is not set, the default is 120 seconds.  A zero or
 // negative value will result in an immediate timeout.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func ConnectionTimeout(d time.Duration) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.connectionTimeout = d
@@ -403,13 +483,79 @@
 // HeaderTableSize returns a ServerOption that sets the size of dynamic
 // header table for stream.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func HeaderTableSize(s uint32) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.headerTableSize = &s
 	})
 }
 
+// NumStreamWorkers returns a ServerOption that sets the number of worker
+// goroutines that should be used to process incoming streams. Setting this to
+// zero (default) will disable workers and spawn a new goroutine for each
+// stream.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NumStreamWorkers(numServerWorkers uint32) ServerOption {
+	// TODO: If/when this API gets stabilized (i.e. stream workers become the
+	// only way streams are processed), change the behavior of the zero value to
+	// a sane default. Preliminary experiments suggest that a value equal to the
+	// number of CPUs available is most performant; requires thorough testing.
+	return newFuncServerOption(func(o *serverOptions) {
+		o.numServerWorkers = numServerWorkers
+	})
+}
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+// serverWorkers blocks on a *transport.Stream channel forever and waits for
+// data to be fed by serveStreams. This allows different requests to be
+// processed by the same goroutine, removing the need for expensive stack
+// re-allocations (see the runtime.morestack problem [1]).
+//
+// [1] https://github.com/golang/go/issues/18138
+func (s *Server) serverWorker(ch chan *serverWorkerData) {
+	// To make sure all server workers don't reset at the same time, choose a
+	// random number of iterations before resetting.
+	threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
+	for completed := 0; completed < threshold; completed++ {
+		data, ok := <-ch
+		if !ok {
+			return
+		}
+		s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
+		data.wg.Done()
+	}
+	go s.serverWorker(ch)
+}
+
+// initServerWorkers creates worker goroutines and channels to process incoming
+// connections to reduce the time spent overall on runtime.morestack.
+func (s *Server) initServerWorkers() {
+	s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
+	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+		s.serverWorkerChannels[i] = make(chan *serverWorkerData)
+		go s.serverWorker(s.serverWorkerChannels[i])
+	}
+}
+
+func (s *Server) stopServerWorkers() {
+	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+		close(s.serverWorkerChannels[i])
+	}
+}
+
 // NewServer creates a gRPC server which has no service registered and has not
 // started to accept requests yet.
 func NewServer(opt ...ServerOption) *Server {
@@ -418,13 +564,13 @@
 		o.apply(&opts)
 	}
 	s := &Server{
-		lis:    make(map[net.Listener]bool),
-		opts:   opts,
-		conns:  make(map[transport.ServerTransport]bool),
-		m:      make(map[string]*service),
-		quit:   grpcsync.NewEvent(),
-		done:   grpcsync.NewEvent(),
-		czData: new(channelzData),
+		lis:      make(map[net.Listener]bool),
+		opts:     opts,
+		conns:    make(map[string]map[transport.ServerTransport]bool),
+		services: make(map[string]*serviceInfo),
+		quit:     grpcsync.NewEvent(),
+		done:     grpcsync.NewEvent(),
+		czData:   new(channelzData),
 	}
 	chainUnaryServerInterceptors(s)
 	chainStreamServerInterceptors(s)
@@ -434,6 +580,10 @@
 		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
 	}
 
+	if s.opts.numServerWorkers > 0 {
+		s.initServerWorkers()
+	}
+
 	if channelz.IsOn() {
 		s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
 	}
@@ -456,14 +606,29 @@
 	}
 }
 
+// ServiceRegistrar wraps a single method that supports service registration. It
+// enables users to pass concrete types other than grpc.Server to the service
+// registration methods exported by the IDL generated code.
+type ServiceRegistrar interface {
+	// RegisterService registers a service and its implementation to the
+	// concrete type implementing this interface.  It may not be called
+	// once the server has started serving.
+	// desc describes the service and its methods and handlers. impl is the
+	// service implementation which is passed to the method handlers.
+	RegisterService(desc *ServiceDesc, impl interface{})
+}
+
 // RegisterService registers a service and its implementation to the gRPC
 // server. It is called from the IDL generated code. This must be called before
-// invoking Serve.
+// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
+// ensure it implements sd.HandlerType.
 func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
-	ht := reflect.TypeOf(sd.HandlerType).Elem()
-	st := reflect.TypeOf(ss)
-	if !st.Implements(ht) {
-		grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+	if ss != nil {
+		ht := reflect.TypeOf(sd.HandlerType).Elem()
+		st := reflect.TypeOf(ss)
+		if !st.Implements(ht) {
+			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+		}
 	}
 	s.register(sd, ss)
 }
@@ -473,26 +638,26 @@
 	defer s.mu.Unlock()
 	s.printf("RegisterService(%q)", sd.ServiceName)
 	if s.serve {
-		grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
 	}
-	if _, ok := s.m[sd.ServiceName]; ok {
-		grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+	if _, ok := s.services[sd.ServiceName]; ok {
+		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
 	}
-	srv := &service{
-		server: ss,
-		md:     make(map[string]*MethodDesc),
-		sd:     make(map[string]*StreamDesc),
-		mdata:  sd.Metadata,
+	info := &serviceInfo{
+		serviceImpl: ss,
+		methods:     make(map[string]*MethodDesc),
+		streams:     make(map[string]*StreamDesc),
+		mdata:       sd.Metadata,
 	}
 	for i := range sd.Methods {
 		d := &sd.Methods[i]
-		srv.md[d.MethodName] = d
+		info.methods[d.MethodName] = d
 	}
 	for i := range sd.Streams {
 		d := &sd.Streams[i]
-		srv.sd[d.StreamName] = d
+		info.streams[d.StreamName] = d
 	}
-	s.m[sd.ServiceName] = srv
+	s.services[sd.ServiceName] = info
 }
 
 // MethodInfo contains the information of an RPC including its method name and type.
@@ -516,16 +681,16 @@
 // Service names include the package names, in the form of <package>.<service>.
 func (s *Server) GetServiceInfo() map[string]ServiceInfo {
 	ret := make(map[string]ServiceInfo)
-	for n, srv := range s.m {
-		methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
-		for m := range srv.md {
+	for n, srv := range s.services {
+		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
+		for m := range srv.methods {
 			methods = append(methods, MethodInfo{
 				Name:           m,
 				IsClientStream: false,
 				IsServerStream: false,
 			})
 		}
-		for m, d := range srv.sd {
+		for m, d := range srv.streams {
 			methods = append(methods, MethodInfo{
 				Name:           m,
 				IsClientStream: d.ClientStreams,
@@ -545,13 +710,6 @@
 // the server being stopped.
 var ErrServerStopped = errors.New("grpc: the server has been stopped")
 
-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
-	if s.opts.creds == nil {
-		return rawConn, nil, nil
-	}
-	return s.opts.creds.ServerHandshake(rawConn)
-}
-
 type listenSocket struct {
 	net.Listener
 	channelzID int64
@@ -660,7 +818,7 @@
 		// s.conns before this conn can be added.
 		s.serveWG.Add(1)
 		go func() {
-			s.handleRawConn(rawConn)
+			s.handleRawConn(lis.Addr().String(), rawConn)
 			s.serveWG.Done()
 		}()
 	}
@@ -668,49 +826,45 @@
 
 // handleRawConn forks a goroutine to handle a just-accepted connection that
 // has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(rawConn net.Conn) {
+func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
 	if s.quit.HasFired() {
 		rawConn.Close()
 		return
 	}
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
-	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
-	if err != nil {
-		// ErrConnDispatched means that the connection was dispatched away from
-		// gRPC; those connections should be left open.
-		if err != credentials.ErrConnDispatched {
-			s.mu.Lock()
-			s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
-			s.mu.Unlock()
-			channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
-			rawConn.Close()
-		}
-		rawConn.SetDeadline(time.Time{})
-		return
-	}
 
 	// Finish handshaking (HTTP2)
-	st := s.newHTTP2Transport(conn, authInfo)
+	st := s.newHTTP2Transport(rawConn)
+	rawConn.SetDeadline(time.Time{})
 	if st == nil {
 		return
 	}
 
-	rawConn.SetDeadline(time.Time{})
-	if !s.addConn(st) {
+	if !s.addConn(lisAddr, st) {
 		return
 	}
 	go func() {
 		s.serveStreams(st)
-		s.removeConn(st)
+		s.removeConn(lisAddr, st)
 	}()
 }
 
+func (s *Server) drainServerTransports(addr string) {
+	s.mu.Lock()
+	conns := s.conns[addr]
+	for st := range conns {
+		st.Drain()
+	}
+	s.mu.Unlock()
+}
+
 // newHTTP2Transport sets up a http/2 transport (using the
 // gRPC http2 server transport in transport/http2_server.go).
-func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
+func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
 	config := &transport.ServerConfig{
 		MaxStreams:            s.opts.maxConcurrentStreams,
-		AuthInfo:              authInfo,
+		ConnectionTimeout:     s.opts.connectionTimeout,
+		Credentials:           s.opts.creds,
 		InTapHandle:           s.opts.inTapHandle,
 		StatsHandler:          s.opts.statsHandler,
 		KeepaliveParams:       s.opts.keepaliveParams,
@@ -723,13 +877,20 @@
 		MaxHeaderListSize:     s.opts.maxHeaderListSize,
 		HeaderTableSize:       s.opts.headerTableSize,
 	}
-	st, err := transport.NewServerTransport("http2", c, config)
+	st, err := transport.NewServerTransport(c, config)
 	if err != nil {
 		s.mu.Lock()
 		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
 		s.mu.Unlock()
-		c.Close()
-		channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
+		// ErrConnDispatched means that the connection was dispatched away from
+		// gRPC; those connections should be left open.
+		if err != credentials.ErrConnDispatched {
+			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
+			if err != io.EOF {
+				channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
+			}
+			c.Close()
+		}
 		return nil
 	}
 
@@ -739,12 +900,27 @@
 func (s *Server) serveStreams(st transport.ServerTransport) {
 	defer st.Close()
 	var wg sync.WaitGroup
+
+	var roundRobinCounter uint32
 	st.HandleStreams(func(stream *transport.Stream) {
 		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			s.handleStream(st, stream, s.traceInfo(st, stream))
-		}()
+		if s.opts.numServerWorkers > 0 {
+			data := &serverWorkerData{st: st, wg: &wg, stream: stream}
+			select {
+			case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
+			default:
+				// If all stream workers are busy, fallback to the default code path.
+				go func() {
+					s.handleStream(st, stream, s.traceInfo(st, stream))
+					wg.Done()
+				}()
+			}
+		} else {
+			go func() {
+				defer wg.Done()
+				s.handleStream(st, stream, s.traceInfo(st, stream))
+			}()
+		}
 	}, func(ctx context.Context, method string) context.Context {
 		if !EnableTracing {
 			return ctx
@@ -779,18 +955,22 @@
 // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
 // separate from grpc-go's HTTP/2 server. Performance and features may vary
 // between the two paths. ServeHTTP does not support some gRPC features
-// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
-// and subject to change.
+// available through grpc-go's HTTP/2 server.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
 	if err != nil {
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
-	if !s.addConn(st) {
+	if !s.addConn(listenerAddressForServeHTTP, st) {
 		return
 	}
-	defer s.removeConn(st)
+	defer s.removeConn(listenerAddressForServeHTTP, st)
 	s.serveStreams(st)
 }
 
@@ -818,7 +998,7 @@
 	return trInfo
 }
 
-func (s *Server) addConn(st transport.ServerTransport) bool {
+func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if s.conns == nil {
@@ -830,15 +1010,28 @@
 		// immediately.
 		st.Drain()
 	}
-	s.conns[st] = true
+
+	if s.conns[addr] == nil {
+		// Create a map entry if this is the first connection on this listener.
+		s.conns[addr] = make(map[transport.ServerTransport]bool)
+	}
+	s.conns[addr][st] = true
 	return true
 }
 
-func (s *Server) removeConn(st transport.ServerTransport) {
+func (s *Server) removeConn(addr string, st transport.ServerTransport) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	if s.conns != nil {
-		delete(s.conns, st)
+
+	conns := s.conns[addr]
+	if conns != nil {
+		delete(conns, st)
+		if len(conns) == 0 {
+			// If the last connection for this address is being removed, also
+			// remove the map entry corresponding to the address. This is used
+			// in GracefulStop() when waiting for all connections to be closed.
+			delete(s.conns, addr)
+		}
 		s.cv.Broadcast()
 	}
 }
@@ -868,12 +1061,12 @@
 func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
 	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
 	if err != nil {
-		channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err)
+		channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
 		return err
 	}
 	compData, err := compress(data, cp, comp)
 	if err != nil {
-		channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err)
+		channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
 		return err
 	}
 	hdr, payload := msgHeader(data, compData)
@@ -903,26 +1096,33 @@
 	} else if len(interceptors) == 1 {
 		chainedInt = interceptors[0]
 	} else {
-		chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
-			return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
-		}
+		chainedInt = chainUnaryInterceptors(interceptors)
 	}
 
 	s.opts.unaryInt = chainedInt
 }
 
-// getChainUnaryHandler recursively generate the chained UnaryHandler
-func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
-	if curr == len(interceptors)-1 {
-		return finalHandler
-	}
-
-	return func(ctx context.Context, req interface{}) (interface{}, error) {
-		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
+	return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
+		// the struct ensures the variables are allocated together, rather than separately, since we
+		// know they should be garbage collected together. This saves 1 allocation and decreases
+		// time/call by about 10% on the microbenchmark.
+		var state struct {
+			i    int
+			next UnaryHandler
+		}
+		state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
+			if state.i == len(interceptors)-1 {
+				return interceptors[state.i](ctx, req, info, handler)
+			}
+			state.i++
+			return interceptors[state.i-1](ctx, req, info, state.next)
+		}
+		return state.next(ctx, req)
 	}
 }
 
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
 	sh := s.opts.statsHandler
 	if sh != nil || trInfo != nil || channelz.IsOn() {
 		if channelz.IsOn() {
@@ -932,7 +1132,9 @@
 		if sh != nil {
 			beginTime := time.Now()
 			statsBegin = &stats.Begin{
-				BeginTime: beginTime,
+				BeginTime:      beginTime,
+				IsClientStream: false,
+				IsServerStream: false,
 			}
 			sh.HandleRPC(stream.Context(), statsBegin)
 		}
@@ -1045,10 +1247,8 @@
 	}
 	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
 	if err != nil {
-		if st, ok := status.FromError(err); ok {
-			if e := t.WriteStatus(stream, st); e != nil {
-				channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
-			}
+		if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
+			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
 		}
 		return err
 	}
@@ -1063,7 +1263,7 @@
 			sh.HandleRPC(stream.Context(), &stats.InPayload{
 				RecvTime:   time.Now(),
 				Payload:    v,
-				WireLength: payInfo.wireLength,
+				WireLength: payInfo.wireLength + headerLen,
 				Data:       d,
 				Length:     len(d),
 			})
@@ -1079,7 +1279,7 @@
 		return nil
 	}
 	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
-	reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
+	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
 	if appErr != nil {
 		appStatus, ok := status.FromError(appErr)
 		if !ok {
@@ -1092,7 +1292,7 @@
 			trInfo.tr.SetError()
 		}
 		if e := t.WriteStatus(stream, appStatus); e != nil {
-			channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 		}
 		if binlog != nil {
 			if h, _ := stream.Header(); h.Len() > 0 {
@@ -1121,7 +1321,7 @@
 		}
 		if sts, ok := status.FromError(err); ok {
 			if e := t.WriteStatus(stream, sts); e != nil {
-				channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+				channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 			}
 		} else {
 			switch st := err.(type) {
@@ -1186,26 +1386,33 @@
 	} else if len(interceptors) == 1 {
 		chainedInt = interceptors[0]
 	} else {
-		chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
-			return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
-		}
+		chainedInt = chainStreamInterceptors(interceptors)
 	}
 
 	s.opts.streamInt = chainedInt
 }
 
-// getChainStreamHandler recursively generate the chained StreamHandler
-func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
-	if curr == len(interceptors)-1 {
-		return finalHandler
-	}
-
-	return func(srv interface{}, ss ServerStream) error {
-		return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
+	return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+		// the struct ensures the variables are allocated together, rather than separately, since we
+		// know they should be garbage collected together. This saves 1 allocation and decreases
+		// time/call by about 10% on the microbenchmark.
+		var state struct {
+			i    int
+			next StreamHandler
+		}
+		state.next = func(srv interface{}, ss ServerStream) error {
+			if state.i == len(interceptors)-1 {
+				return interceptors[state.i](srv, ss, info, handler)
+			}
+			state.i++
+			return interceptors[state.i-1](srv, ss, info, state.next)
+		}
+		return state.next(srv, ss)
 	}
 }
 
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
 	if channelz.IsOn() {
 		s.incrCallsStarted()
 	}
@@ -1214,7 +1421,9 @@
 	if sh != nil {
 		beginTime := time.Now()
 		statsBegin = &stats.Begin{
-			BeginTime: beginTime,
+			BeginTime:      beginTime,
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
 		}
 		sh.HandleRPC(stream.Context(), statsBegin)
 	}
@@ -1317,13 +1526,15 @@
 		}
 	}
 
+	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
+
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
 	}
 	var appErr error
 	var server interface{}
-	if srv != nil {
-		server = srv.server
+	if info != nil {
+		server = info.serviceImpl
 	}
 	if s.opts.streamInt == nil {
 		appErr = sd.Handler(server, ss)
@@ -1384,12 +1595,12 @@
 			trInfo.tr.SetError()
 		}
 		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
-		if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
+		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
 			if trInfo != nil {
 				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
 				trInfo.tr.SetError()
 			}
-			channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
+			channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
 		}
 		if trInfo != nil {
 			trInfo.tr.Finish()
@@ -1399,13 +1610,13 @@
 	service := sm[:pos]
 	method := sm[pos+1:]
 
-	srv, knownService := s.m[service]
+	srv, knownService := s.services[service]
 	if knownService {
-		if md, ok := srv.md[method]; ok {
+		if md, ok := srv.methods[method]; ok {
 			s.processUnaryRPC(t, stream, srv, md, trInfo)
 			return
 		}
-		if sd, ok := srv.sd[method]; ok {
+		if sd, ok := srv.streams[method]; ok {
 			s.processStreamingRPC(t, stream, srv, sd, trInfo)
 			return
 		}
@@ -1430,7 +1641,7 @@
 			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
 			trInfo.tr.SetError()
 		}
-		channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
+		channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
 	}
 	if trInfo != nil {
 		trInfo.tr.Finish()
@@ -1443,7 +1654,10 @@
 // NewContextWithServerTransportStream creates a new context from ctx and
 // attaches stream to it.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
 	return context.WithValue(ctx, streamKey{}, stream)
 }
@@ -1455,7 +1669,10 @@
 //
 // See also NewContextWithServerTransportStream.
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ServerTransportStream interface {
 	Method() string
 	SetHeader(md metadata.MD) error
@@ -1467,7 +1684,10 @@
 // ctx. Returns nil if the given context has no stream associated with it
 // (which implies it is not an RPC invocation context).
 //
-// This API is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
 	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
 	return s
@@ -1495,7 +1715,7 @@
 	s.mu.Lock()
 	listeners := s.lis
 	s.lis = nil
-	st := s.conns
+	conns := s.conns
 	s.conns = nil
 	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
 	s.cv.Broadcast()
@@ -1504,8 +1724,13 @@
 	for lis := range listeners {
 		lis.Close()
 	}
-	for c := range st {
-		c.Close()
+	for _, cs := range conns {
+		for st := range cs {
+			st.Close()
+		}
+	}
+	if s.opts.numServerWorkers > 0 {
+		s.stopServerWorkers()
 	}
 
 	s.mu.Lock()
@@ -1539,8 +1764,10 @@
 	}
 	s.lis = nil
 	if !s.drain {
-		for st := range s.conns {
-			st.Drain()
+		for _, conns := range s.conns {
+			for st := range conns {
+				st.Drain()
+			}
 		}
 		s.drain = true
 	}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 5a80a57..22c4240 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -20,15 +20,16 @@
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
+	"reflect"
 	"strconv"
 	"strings"
 	"time"
 
-	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
+	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
 	"google.golang.org/grpc/serviceconfig"
 )
 
@@ -40,29 +41,7 @@
 // Deprecated: Users should not use this struct. Service config should be received
 // through name resolver, as specified here
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig struct {
-	// WaitForReady indicates whether RPCs sent to this method should wait until
-	// the connection is ready by default (!failfast). The value specified via the
-	// gRPC client API will override the value set here.
-	WaitForReady *bool
-	// Timeout is the default timeout for RPCs sent to this method. The actual
-	// deadline used will be the minimum of the value specified here and the value
-	// set by the application via the gRPC client API.  If either one is not set,
-	// then the other will be used.  If neither is set, then the RPC has no deadline.
-	Timeout *time.Duration
-	// MaxReqSize is the maximum allowed payload size for an individual request in a
-	// stream (client->server) in bytes. The size which is measured is the serialized
-	// payload after per-message compression (but before stream compression) in bytes.
-	// The actual value used is the minimum of the value specified here and the value set
-	// by the application via the gRPC client API. If either one is not set, then the other
-	// will be used.  If neither is set, then the built-in default is used.
-	MaxReqSize *int
-	// MaxRespSize is the maximum allowed payload size for an individual response in a
-	// stream (server->client) in bytes.
-	MaxRespSize *int
-	// RetryPolicy configures retry options for the method.
-	retryPolicy *retryPolicy
-}
+type MethodConfig = internalserviceconfig.MethodConfig
 
 type lbConfig struct {
 	name string
@@ -79,7 +58,7 @@
 	serviceconfig.Config
 
 	// LB is the load balancer the service providers recommends. The balancer
-	// specified via grpc.WithBalancer will override this.  This is deprecated;
+	// specified via grpc.WithBalancerName will override this.  This is deprecated;
 	// lbConfigs is preferred.  If lbConfig and LB are both present, lbConfig
 	// will be used.
 	LB *string
@@ -126,34 +105,6 @@
 	ServiceName string
 }
 
-// retryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryPolicy struct {
-	// MaxAttempts is the maximum number of attempts, including the original RPC.
-	//
-	// This field is required and must be two or greater.
-	maxAttempts int
-
-	// Exponential backoff parameters. The initial retry attempt will occur at
-	// random(0, initialBackoff). In general, the nth attempt will occur at
-	// random(0,
-	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
-	//
-	// These fields are required and must be greater than zero.
-	initialBackoff    time.Duration
-	maxBackoff        time.Duration
-	backoffMultiplier float64
-
-	// The set of status codes which may be retried.
-	//
-	// Status codes are specified as strings, e.g., "UNAVAILABLE".
-	//
-	// This field is required and must be non-empty.
-	// Note: a set is used to store this for easy lookup.
-	retryableStatusCodes map[codes.Code]bool
-}
-
 type jsonRetryPolicy struct {
 	MaxAttempts          int
 	InitialBackoff       string
@@ -224,19 +175,27 @@
 }
 
 type jsonName struct {
-	Service *string
-	Method  *string
+	Service string
+	Method  string
 }
 
-func (j jsonName) generatePath() (string, bool) {
-	if j.Service == nil {
-		return "", false
+var (
+	errDuplicatedName             = errors.New("duplicated name")
+	errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
+)
+
+func (j jsonName) generatePath() (string, error) {
+	if j.Service == "" {
+		if j.Method != "" {
+			return "", errEmptyServiceNonEmptyMethod
+		}
+		return "", nil
 	}
-	res := "/" + *j.Service + "/"
-	if j.Method != nil {
-		res += *j.Method
+	res := "/" + j.Service + "/"
+	if j.Method != "" {
+		res += j.Method
 	}
-	return res, true
+	return res, nil
 }
 
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
@@ -249,12 +208,10 @@
 	RetryPolicy             *jsonRetryPolicy
 }
 
-type loadBalancingConfig map[string]json.RawMessage
-
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 type jsonSC struct {
 	LoadBalancingPolicy *string
-	LoadBalancingConfig *[]loadBalancingConfig
+	LoadBalancingConfig *internalserviceconfig.BalancerConfig
 	MethodConfig        *[]jsonMC
 	RetryThrottling     *retryThrottlingPolicy
 	HealthCheckConfig   *healthCheckConfig
@@ -270,7 +227,7 @@
 	var rsc jsonSC
 	err := json.Unmarshal([]byte(js), &rsc)
 	if err != nil {
-		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+		logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
 		return &serviceconfig.ParseResult{Err: err}
 	}
 	sc := ServiceConfig{
@@ -280,53 +237,25 @@
 		healthCheckConfig: rsc.HealthCheckConfig,
 		rawJSONString:     js,
 	}
-	if rsc.LoadBalancingConfig != nil {
-		for i, lbcfg := range *rsc.LoadBalancingConfig {
-			if len(lbcfg) != 1 {
-				err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
-				grpclog.Warningf(err.Error())
-				return &serviceconfig.ParseResult{Err: err}
-			}
-			var name string
-			var jsonCfg json.RawMessage
-			for name, jsonCfg = range lbcfg {
-			}
-			builder := balancer.Get(name)
-			if builder == nil {
-				continue
-			}
-			sc.lbConfig = &lbConfig{name: name}
-			if parser, ok := builder.(balancer.ConfigParser); ok {
-				var err error
-				sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
-				if err != nil {
-					return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
-				}
-			} else if string(jsonCfg) != "{}" {
-				grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
-			}
-			break
-		}
-		if sc.lbConfig == nil {
-			// We had a loadBalancingConfig field but did not encounter a
-			// supported policy.  The config is considered invalid in this
-			// case.
-			err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
-			grpclog.Warningf(err.Error())
-			return &serviceconfig.ParseResult{Err: err}
+	if c := rsc.LoadBalancingConfig; c != nil {
+		sc.lbConfig = &lbConfig{
+			name: c.Name,
+			cfg:  c.Config,
 		}
 	}
 
 	if rsc.MethodConfig == nil {
 		return &serviceconfig.ParseResult{Config: &sc}
 	}
+
+	paths := map[string]struct{}{}
 	for _, m := range *rsc.MethodConfig {
 		if m.Name == nil {
 			continue
 		}
 		d, err := parseDuration(m.Timeout)
 		if err != nil {
-			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+			logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
 			return &serviceconfig.ParseResult{Err: err}
 		}
 
@@ -334,8 +263,8 @@
 			WaitForReady: m.WaitForReady,
 			Timeout:      d,
 		}
-		if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
-			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
+			logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
 			return &serviceconfig.ParseResult{Err: err}
 		}
 		if m.MaxRequestMessageBytes != nil {
@@ -352,10 +281,20 @@
 				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
 			}
 		}
-		for _, n := range *m.Name {
-			if path, valid := n.generatePath(); valid {
-				sc.Methods[path] = mc
+		for i, n := range *m.Name {
+			path, err := n.generatePath()
+			if err != nil {
+				logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
 			}
+
+			if _, ok := paths[path]; ok {
+				err = errDuplicatedName
+				logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
+			}
+			paths[path] = struct{}{}
+			sc.Methods[path] = mc
 		}
 	}
 
@@ -370,7 +309,7 @@
 	return &serviceconfig.ParseResult{Config: &sc}
 }
 
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
+func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
 	if jrp == nil {
 		return nil, nil
 	}
@@ -388,23 +327,23 @@
 		*mb <= 0 ||
 		jrp.BackoffMultiplier <= 0 ||
 		len(jrp.RetryableStatusCodes) == 0 {
-		grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
+		logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
 		return nil, nil
 	}
 
-	rp := &retryPolicy{
-		maxAttempts:          jrp.MaxAttempts,
-		initialBackoff:       *ib,
-		maxBackoff:           *mb,
-		backoffMultiplier:    jrp.BackoffMultiplier,
-		retryableStatusCodes: make(map[codes.Code]bool),
+	rp := &internalserviceconfig.RetryPolicy{
+		MaxAttempts:          jrp.MaxAttempts,
+		InitialBackoff:       *ib,
+		MaxBackoff:           *mb,
+		BackoffMultiplier:    jrp.BackoffMultiplier,
+		RetryableStatusCodes: make(map[codes.Code]bool),
 	}
-	if rp.maxAttempts > 5 {
+	if rp.MaxAttempts > 5 {
 		// TODO(retry): Make the max maxAttempts configurable.
-		rp.maxAttempts = 5
+		rp.MaxAttempts = 5
 	}
 	for _, code := range jrp.RetryableStatusCodes {
-		rp.retryableStatusCodes[code] = true
+		rp.RetryableStatusCodes[code] = true
 	}
 	return rp, nil
 }
@@ -432,3 +371,34 @@
 func newInt(b int) *int {
 	return &b
 }
+
+func init() {
+	internal.EqualServiceConfigForTesting = equalServiceConfig
+}
+
+// equalServiceConfig compares two configs. The rawJSONString field is ignored,
+// because they may diff in white spaces.
+//
+// If any of them is NOT *ServiceConfig, return false.
+func equalServiceConfig(a, b serviceconfig.Config) bool {
+	aa, ok := a.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	bb, ok := b.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	aaRaw := aa.rawJSONString
+	aa.rawJSONString = ""
+	bbRaw := bb.rawJSONString
+	bb.rawJSONString = ""
+	defer func() {
+		aa.rawJSONString = aaRaw
+		bb.rawJSONString = bbRaw
+	}()
+	// Using reflect.DeepEqual instead of cmp.Equal because many balancer
+	// configs are unexported, and cmp.Equal cannot compare unexported fields
+	// from unexported structs.
+	return reflect.DeepEqual(aa, bb)
+}
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
index 187c304..73a2f92 100644
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -19,7 +19,10 @@
 // Package serviceconfig defines types and methods for operating on gRPC
 // service configs.
 //
-// This package is EXPERIMENTAL.
+// Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package serviceconfig
 
 // Config represents an opaque data structure holding a service config.
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index a7970c7..0285dcc 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -16,8 +16,6 @@
  *
  */
 
-//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
-
 // Package stats is for collecting and reporting various network and RPC stats.
 // This package is for monitoring purpose only. All fields are read-only.
 // All APIs are experimental.
@@ -38,15 +36,22 @@
 	IsClient() bool
 }
 
-// Begin contains stats when an RPC begins.
+// Begin contains stats when an RPC attempt begins.
 // FailFast is only valid if this Begin is from client side.
 type Begin struct {
 	// Client is true if this Begin is from client side.
 	Client bool
-	// BeginTime is the time when the RPC begins.
+	// BeginTime is the time when the RPC attempt begins.
 	BeginTime time.Time
 	// FailFast indicates if this RPC is failfast.
 	FailFast bool
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+	// IsTransparentRetryAttempt indicates whether this attempt was initiated
+	// due to transparently retrying a previous attempt.
+	IsTransparentRetryAttempt bool
 }
 
 // IsClient indicates if the stats information is from client side.
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 01e182c..6d163b6 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -29,6 +29,7 @@
 
 import (
 	"context"
+	"errors"
 	"fmt"
 
 	spb "google.golang.org/genproto/googleapis/rpc/status"
@@ -73,9 +74,16 @@
 	return status.FromProto(s)
 }
 
-// FromError returns a Status representing err if it was produced from this
-// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
-// Status is returned with codes.Unknown and the original error message.
+// FromError returns a Status representation of err.
+//
+// - If err was produced by this package or implements the method `GRPCStatus()
+//   *Status`, the appropriate Status is returned.
+//
+// - If err is nil, a Status is returned with codes.OK and no message.
+//
+// - Otherwise, err is an error not compatible with this package.  In this
+//   case, a Status is returned with codes.Unknown and err's Error() message,
+//   and ok is false.
 func FromError(err error) (s *Status, ok bool) {
 	if err == nil {
 		return nil, true
@@ -110,18 +118,18 @@
 	return codes.Unknown
 }
 
-// FromContextError converts a context error into a Status.  It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
+// FromContextError converts a context error or wrapped context error into a
+// Status.  It returns a Status with codes.OK if err is nil, or a Status with
+// codes.Unknown if err is non-nil and not a context error.
 func FromContextError(err error) *Status {
-	switch err {
-	case nil:
+	if err == nil {
 		return nil
-	case context.DeadlineExceeded:
-		return New(codes.DeadlineExceeded, err.Error())
-	case context.Canceled:
-		return New(codes.Canceled, err.Error())
-	default:
-		return New(codes.Unknown, err.Error())
 	}
+	if errors.Is(err, context.DeadlineExceeded) {
+		return New(codes.DeadlineExceeded, err.Error())
+	}
+	if errors.Is(err, context.Canceled) {
+		return New(codes.Canceled, err.Error())
+	}
+	return New(codes.Unknown, err.Error())
 }
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 934ef68..625d47b 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -35,6 +35,9 @@
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/grpcutil"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/serviceconfig"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
@@ -49,14 +52,20 @@
 // of the RPC.
 type StreamHandler func(srv interface{}, stream ServerStream) error
 
-// StreamDesc represents a streaming RPC service's method specification.
+// StreamDesc represents a streaming RPC service's method specification.  Used
+// on the server when registering services and on the client when initiating
+// new streams.
 type StreamDesc struct {
-	StreamName string
-	Handler    StreamHandler
+	// StreamName and Handler are only used when registering handlers on a
+	// server.
+	StreamName string        // the name of the method excluding the service
+	Handler    StreamHandler // the handler called for the method
 
-	// At least one of these is true.
-	ServerStreams bool
-	ClientStreams bool
+	// ServerStreams and ClientStreams are used for registering handlers on a
+	// server as well as defining RPC behavior when passed to NewClientStream
+	// and ClientConn.NewStream.  At least one must be true.
+	ServerStreams bool // indicates the server can perform streaming sends
+	ClientStreams bool // indicates the client can perform streaming sends
 }
 
 // Stream defines the common interface a client or server stream has to satisfy.
@@ -163,13 +172,48 @@
 			}
 		}()
 	}
-	c := defaultCallInfo()
 	// Provide an opportunity for the first RPC to see the first service config
 	// provided by the resolver.
 	if err := cc.waitForResolvedAddrs(ctx); err != nil {
 		return nil, err
 	}
-	mc := cc.GetMethodConfig(method)
+
+	var mc serviceconfig.MethodConfig
+	var onCommit func()
+	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
+	}
+
+	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
+	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
+	if err != nil {
+		return nil, toRPCErr(err)
+	}
+
+	if rpcConfig != nil {
+		if rpcConfig.Context != nil {
+			ctx = rpcConfig.Context
+		}
+		mc = rpcConfig.MethodConfig
+		onCommit = rpcConfig.OnCommitted
+		if rpcConfig.Interceptor != nil {
+			rpcInfo.Context = nil
+			ns := newStream
+			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
+				if err != nil {
+					return nil, toRPCErr(err)
+				}
+				return cs, nil
+			}
+		}
+	}
+
+	return newStream(ctx, func() {})
+}
+
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
+	c := defaultCallInfo()
 	if mc.WaitForReady != nil {
 		c.failFast = !*mc.WaitForReady
 	}
@@ -206,6 +250,7 @@
 		Host:           cc.authority,
 		Method:         method,
 		ContentSubtype: c.contentSubtype,
+		DoneFunc:       doneFunc,
 	}
 
 	// Set our outgoing compression according to the UseCompressor CallOption, if
@@ -229,33 +274,6 @@
 	if c.creds != nil {
 		callHdr.Creds = c.creds
 	}
-	var trInfo *traceInfo
-	if EnableTracing {
-		trInfo = &traceInfo{
-			tr: trace.New("grpc.Sent."+methodFamily(method), method),
-			firstLine: firstLine{
-				client: true,
-			},
-		}
-		if deadline, ok := ctx.Deadline(); ok {
-			trInfo.firstLine.deadline = time.Until(deadline)
-		}
-		trInfo.tr.LazyLog(&trInfo.firstLine, false)
-		ctx = trace.NewContext(ctx, trInfo.tr)
-	}
-	ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
-	sh := cc.dopts.copts.StatsHandler
-	var beginTime time.Time
-	if sh != nil {
-		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
-		beginTime = time.Now()
-		begin := &stats.Begin{
-			Client:    true,
-			BeginTime: beginTime,
-			FailFast:  c.failFast,
-		}
-		sh.HandleRPC(ctx, begin)
-	}
 
 	cs := &clientStream{
 		callHdr:      callHdr,
@@ -269,18 +287,15 @@
 		cp:           cp,
 		comp:         comp,
 		cancel:       cancel,
-		beginTime:    beginTime,
 		firstAttempt: true,
+		onCommit:     onCommit,
 	}
 	if !cc.dopts.disableRetry {
 		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
 	}
 	cs.binlog = binarylog.GetMethodLogger(method)
 
-	cs.callInfo.stream = cs
-	// Only this initial attempt has stats/tracing.
-	// TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
-	if err := cs.newAttemptLocked(sh, trInfo); err != nil {
+	if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
 		cs.finish(err)
 		return nil, err
 	}
@@ -328,8 +343,43 @@
 
 // newAttemptLocked creates a new attempt with a transport.
 // If it succeeds, then it replaces clientStream's attempt with this new attempt.
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
+	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
+	method := cs.callHdr.Method
+	sh := cs.cc.dopts.copts.StatsHandler
+	var beginTime time.Time
+	if sh != nil {
+		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
+		beginTime = time.Now()
+		begin := &stats.Begin{
+			Client:                    true,
+			BeginTime:                 beginTime,
+			FailFast:                  cs.callInfo.failFast,
+			IsClientStream:            cs.desc.ClientStreams,
+			IsServerStream:            cs.desc.ServerStreams,
+			IsTransparentRetryAttempt: isTransparent,
+		}
+		sh.HandleRPC(ctx, begin)
+	}
+
+	var trInfo *traceInfo
+	if EnableTracing {
+		trInfo = &traceInfo{
+			tr: trace.New("grpc.Sent."+methodFamily(method), method),
+			firstLine: firstLine{
+				client: true,
+			},
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			trInfo.firstLine.deadline = time.Until(deadline)
+		}
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		ctx = trace.NewContext(ctx, trInfo.tr)
+	}
+
 	newAttempt := &csAttempt{
+		ctx:          ctx,
+		beginTime:    beginTime,
 		cs:           cs,
 		dc:           cs.cc.dopts.dc,
 		statsHandler: sh,
@@ -344,10 +394,18 @@
 		}
 	}()
 
-	if err := cs.ctx.Err(); err != nil {
+	if err := ctx.Err(); err != nil {
 		return toRPCErr(err)
 	}
-	t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+
+	if cs.cc.parsedTarget.Scheme == "xds" {
+		// Add extra metadata (metadata that will be added by transport) to context
+		// so the balancer can see them.
+		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+		))
+	}
+	t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
 	if err != nil {
 		return err
 	}
@@ -363,9 +421,11 @@
 func (a *csAttempt) newStream() error {
 	cs := a.cs
 	cs.callHdr.PreviousAttempts = cs.numRetries
-	s, err := a.t.NewStream(cs.ctx, cs.callHdr)
+	s, err := a.t.NewStream(a.ctx, cs.callHdr)
 	if err != nil {
-		return toRPCErr(err)
+		// Return without converting to an RPC error so retry code can
+		// inspect.
+		return err
 	}
 	cs.attempt.s = s
 	cs.attempt.p = &parser{r: s}
@@ -386,8 +446,7 @@
 
 	cancel context.CancelFunc // cancels all attempts
 
-	sentLast  bool // sent an end stream
-	beginTime time.Time
+	sentLast bool // sent an end stream
 
 	methodConfig *MethodConfig
 
@@ -418,7 +477,8 @@
 	// place where we need to check if the attempt is nil.
 	attempt *csAttempt
 	// TODO(hedging): hedging will have multiple attempts simultaneously.
-	committed  bool                       // active attempt committed for retry?
+	committed  bool // active attempt committed for retry?
+	onCommit   func()
 	buffer     []func(a *csAttempt) error // operations to replay on retry
 	bufferSize int                        // current size of buffer
 }
@@ -426,6 +486,7 @@
 // csAttempt implements a single transport stream attempt within a
 // clientStream.
 type csAttempt struct {
+	ctx  context.Context
 	cs   *clientStream
 	t    transport.ClientTransport
 	s    *transport.Stream
@@ -444,9 +505,13 @@
 	trInfo *traceInfo
 
 	statsHandler stats.Handler
+	beginTime    time.Time
 }
 
 func (cs *clientStream) commitAttemptLocked() {
+	if !cs.committed && cs.onCommit != nil {
+		cs.onCommit()
+	}
 	cs.committed = true
 	cs.buffer = nil
 }
@@ -458,37 +523,57 @@
 }
 
 // shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation.
-func (cs *clientStream) shouldRetry(err error) error {
-	if cs.attempt.s == nil && !cs.callInfo.failFast {
-		// In the event of any error from NewStream (attempt.s == nil), we
-		// never attempted to write anything to the wire, so we can retry
-		// indefinitely for non-fail-fast RPCs.
-		return nil
+// the error that should be returned by the operation.  If the RPC should be
+// retried, the bool indicates whether it is being retried transparently.
+func (cs *clientStream) shouldRetry(err error) (bool, error) {
+	if cs.attempt.s == nil {
+		// Error from NewClientStream.
+		nse, ok := err.(*transport.NewStreamError)
+		if !ok {
+			// Unexpected, but assume no I/O was performed and the RPC is not
+			// fatal, so retry indefinitely.
+			return true, nil
+		}
+
+		// Unwrap and convert error.
+		err = toRPCErr(nse.Err)
+
+		// Never retry DoNotRetry errors, which indicate the RPC should not be
+		// retried due to max header list size violation, etc.
+		if nse.DoNotRetry {
+			return false, err
+		}
+
+		// In the event of a non-IO operation error from NewStream, we never
+		// attempted to write anything to the wire, so we can retry
+		// indefinitely.
+		if !nse.DoNotTransparentRetry {
+			return true, nil
+		}
 	}
 	if cs.finished || cs.committed {
 		// RPC is finished or committed; cannot retry.
-		return err
+		return false, err
 	}
 	// Wait for the trailers.
+	unprocessed := false
 	if cs.attempt.s != nil {
 		<-cs.attempt.s.Done()
+		unprocessed = cs.attempt.s.Unprocessed()
 	}
-	if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+	if cs.firstAttempt && unprocessed {
 		// First attempt, stream unprocessed: transparently retry.
-		cs.firstAttempt = false
-		return nil
+		return true, nil
 	}
-	cs.firstAttempt = false
 	if cs.cc.dopts.disableRetry {
-		return err
+		return false, err
 	}
 
 	pushback := 0
 	hasPushback := false
 	if cs.attempt.s != nil {
 		if !cs.attempt.s.TrailersOnly() {
-			return err
+			return false, err
 		}
 
 		// TODO(retry): Move down if the spec changes to not check server pushback
@@ -497,15 +582,15 @@
 		if len(sps) == 1 {
 			var e error
 			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
-				channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
+				channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
 				cs.retryThrottler.throttle() // This counts as a failure for throttling.
-				return err
+				return false, err
 			}
 			hasPushback = true
 		} else if len(sps) > 1 {
-			channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
+			channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
 			cs.retryThrottler.throttle() // This counts as a failure for throttling.
-			return err
+			return false, err
 		}
 	}
 
@@ -516,18 +601,18 @@
 		code = status.Convert(err).Code()
 	}
 
-	rp := cs.methodConfig.retryPolicy
-	if rp == nil || !rp.retryableStatusCodes[code] {
-		return err
+	rp := cs.methodConfig.RetryPolicy
+	if rp == nil || !rp.RetryableStatusCodes[code] {
+		return false, err
 	}
 
 	// Note: the ordering here is important; we count this as a failure
 	// only if the code matched a retryable code.
 	if cs.retryThrottler.throttle() {
-		return err
+		return false, err
 	}
-	if cs.numRetries+1 >= rp.maxAttempts {
-		return err
+	if cs.numRetries+1 >= rp.MaxAttempts {
+		return false, err
 	}
 
 	var dur time.Duration
@@ -535,9 +620,9 @@
 		dur = time.Millisecond * time.Duration(pushback)
 		cs.numRetriesSincePushback = 0
 	} else {
-		fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
-		cur := float64(rp.initialBackoff) * fact
-		if max := float64(rp.maxBackoff); cur > max {
+		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
+		cur := float64(rp.InitialBackoff) * fact
+		if max := float64(rp.MaxBackoff); cur > max {
 			cur = max
 		}
 		dur = time.Duration(grpcrand.Int63n(int64(cur)))
@@ -550,22 +635,24 @@
 	select {
 	case <-t.C:
 		cs.numRetries++
-		return nil
+		return false, nil
 	case <-cs.ctx.Done():
 		t.Stop()
-		return status.FromContextError(cs.ctx.Err()).Err()
+		return false, status.FromContextError(cs.ctx.Err()).Err()
 	}
 }
 
 // Returns nil if a retry was performed and succeeded; error otherwise.
 func (cs *clientStream) retryLocked(lastErr error) error {
 	for {
-		cs.attempt.finish(lastErr)
-		if err := cs.shouldRetry(lastErr); err != nil {
+		cs.attempt.finish(toRPCErr(lastErr))
+		isTransparent, err := cs.shouldRetry(lastErr)
+		if err != nil {
 			cs.commitAttemptLocked()
 			return err
 		}
-		if err := cs.newAttemptLocked(nil, nil); err != nil {
+		cs.firstAttempt = false
+		if err := cs.newAttemptLocked(isTransparent); err != nil {
 			return err
 		}
 		if lastErr = cs.replayBufferLocked(); lastErr == nil {
@@ -586,7 +673,11 @@
 	for {
 		if cs.committed {
 			cs.mu.Unlock()
-			return op(cs.attempt)
+			// toRPCErr is used in case the error from the attempt comes from
+			// NewClientStream, which intentionally doesn't return a status
+			// error to allow for further inspection; all other errors should
+			// already be status errors.
+			return toRPCErr(op(cs.attempt))
 		}
 		a := cs.attempt
 		cs.mu.Unlock()
@@ -799,6 +890,15 @@
 	}
 	cs.finished = true
 	cs.commitAttemptLocked()
+	if cs.attempt != nil {
+		cs.attempt.finish(err)
+		// after functions all rely upon having a stream.
+		if cs.attempt.s != nil {
+			for _, o := range cs.opts {
+				o.after(cs.callInfo, cs.attempt)
+			}
+		}
+	}
 	cs.mu.Unlock()
 	// For binary logging. only log cancel in finish (could be caused by RPC ctx
 	// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
@@ -820,15 +920,6 @@
 			cs.cc.incrCallsSucceeded()
 		}
 	}
-	if cs.attempt != nil {
-		cs.attempt.finish(err)
-		// after functions all rely upon having a stream.
-		if cs.attempt.s != nil {
-			for _, o := range cs.opts {
-				o.after(cs.callInfo)
-			}
-		}
-	}
 	cs.cancel()
 }
 
@@ -851,7 +942,7 @@
 		return io.EOF
 	}
 	if a.statsHandler != nil {
-		a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
+		a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
 	}
 	if channelz.IsOn() {
 		a.t.IncrMsgSent()
@@ -899,13 +990,13 @@
 		a.mu.Unlock()
 	}
 	if a.statsHandler != nil {
-		a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
+		a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
 			Client:   true,
 			RecvTime: time.Now(),
 			Payload:  m,
 			// TODO truncate large payload.
 			Data:       payInfo.uncompressedBytes,
-			WireLength: payInfo.wireLength,
+			WireLength: payInfo.wireLength + headerLen,
 			Length:     len(payInfo.uncompressedBytes),
 		})
 	}
@@ -961,12 +1052,12 @@
 	if a.statsHandler != nil {
 		end := &stats.End{
 			Client:    true,
-			BeginTime: a.cs.beginTime,
+			BeginTime: a.beginTime,
 			EndTime:   time.Now(),
 			Trailer:   tr,
 			Error:     err,
 		}
-		a.statsHandler.HandleRPC(a.cs.ctx, end)
+		a.statsHandler.HandleRPC(a.ctx, end)
 	}
 	if a.trInfo != nil && a.trInfo.tr != nil {
 		if err == nil {
@@ -1066,7 +1157,6 @@
 		t:        t,
 	}
 
-	as.callInfo.stream = as
 	s, err := as.t.NewStream(as.ctx, as.callHdr)
 	if err != nil {
 		err = toRPCErr(err)
@@ -1488,7 +1578,7 @@
 			Payload:  m,
 			// TODO truncate large payload.
 			Data:       payInfo.uncompressedBytes,
-			WireLength: payInfo.wireLength,
+			WireLength: payInfo.wireLength + headerLen,
 			Length:     len(payInfo.uncompressedBytes),
 		})
 	}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index 584360f..dbf34e6 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -17,7 +17,12 @@
  */
 
 // Package tap defines the function handles which are executed on the transport
-// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
+// layer of gRPC-Go and related information.
+//
+// Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package tap
 
 import (
@@ -32,16 +37,16 @@
 	// TODO: More to be added.
 }
 
-// ServerInHandle defines the function which runs before a new stream is created
-// on the server side. If it returns a non-nil error, the stream will not be
-// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
-// The client will receive an RPC error "code = Unavailable, desc = stream
-// terminated by RST_STREAM with error code: REFUSED_STREAM".
+// ServerInHandle defines the function which runs before a new stream is
+// created on the server side. If it returns a non-nil error, the stream will
+// not be created and an error will be returned to the client.  If the error
+// returned is a status error, that status code and message will be used,
+// otherwise PermissionDenied will be the code and err.Error() will be the
+// message.
 //
 // It's intended to be used in situations where you don't want to waste the
-// resources to accept the new stream (e.g. rate-limiting). And the content of
-// the error will be ignored and won't be sent back to the client. For other
-// general usages, please use interceptors.
+// resources to accept the new stream (e.g. rate-limiting). For other general
+// usages, please use interceptors.
 //
 // Note that it is executed in the per-connection I/O goroutine(s) instead of
 // per-RPC goroutine. Therefore, users should NOT have any
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index ca5d55f..9d3fd73 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.29.1"
+const Version = "1.44.1-dev"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index e12024f..d923187 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -28,40 +28,35 @@
 }
 trap cleanup EXIT
 
-PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
+PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}"
+go version
 
 if [[ "$1" = "-install" ]]; then
-  # Check for module support
-  if go help mod >& /dev/null; then
-    # Install the pinned versions as defined in module tools.
-    pushd ./test/tools
-    go install \
-      golang.org/x/lint/golint \
-      golang.org/x/tools/cmd/goimports \
-      honnef.co/go/tools/cmd/staticcheck \
-      github.com/client9/misspell/cmd/misspell \
-      github.com/golang/protobuf/protoc-gen-go
-    popd
-  else
-    # Ye olde `go get` incantation.
-    # Note: this gets the latest version of all tools (vs. the pinned versions
-    # with Go modules).
-    go get -u \
-      golang.org/x/lint/golint \
-      golang.org/x/tools/cmd/goimports \
-      honnef.co/go/tools/cmd/staticcheck \
-      github.com/client9/misspell/cmd/misspell \
-      github.com/golang/protobuf/protoc-gen-go
-  fi
+  # Install the pinned versions as defined in module tools.
+  pushd ./test/tools
+  go install \
+    golang.org/x/lint/golint \
+    golang.org/x/tools/cmd/goimports \
+    honnef.co/go/tools/cmd/staticcheck \
+    github.com/client9/misspell/cmd/misspell
+  popd
   if [[ -z "${VET_SKIP_PROTO}" ]]; then
     if [[ "${TRAVIS}" = "true" ]]; then
-      PROTOBUF_VERSION=3.3.0
+      PROTOBUF_VERSION=3.14.0
       PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
       pushd /home/travis
       wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
       unzip ${PROTOC_FILENAME}
       bin/protoc --version
       popd
+    elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
+      PROTOBUF_VERSION=3.14.0
+      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
+      pushd /home/runner/go
+      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
+      unzip ${PROTOC_FILENAME}
+      bin/protoc --version
+      popd
     elif not which protoc > /dev/null; then
       die "Please install protoc into your path"
     fi
@@ -85,18 +80,14 @@
 #   thread safety.
 git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
 
+# - Do not call grpclog directly. Use grpclog.Component instead.
+git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
+
 # - Ensure all ptypes proto packages are renamed when importing.
 not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
 
-# - Check imports that are illegal in appengine (until Go 1.11).
-# TODO: Remove when we drop Go 1.10 support
-go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
-
-# - gofmt, goimports, golint (with exceptions for generated code), go vet.
-gofmt -s -d -l . 2>&1 | fail_on_output
-goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go"
-golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:"
-go vet -all ./...
+# - Ensure all xds proto imports are renamed to *pb or *grpc.
+git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
 
 misspell -error .
 
@@ -107,12 +98,22 @@
     (git status; git --no-pager diff; exit 1)
 fi
 
-# - Check that our module is tidy.
-if go help mod >& /dev/null; then
-  go mod tidy && \
-    git status --porcelain 2>&1 | fail_on_output || \
+# - gofmt, goimports, golint (with exceptions for generated code), go vet,
+# go mod tidy.
+# Perform these checks on each module inside gRPC.
+for MOD_FILE in $(find . -name 'go.mod'); do
+  MOD_DIR=$(dirname ${MOD_FILE})
+  pushd ${MOD_DIR}
+  go vet -all ./... | fail_on_output
+  gofmt -s -d -l . 2>&1 | fail_on_output
+  goimports -l . 2>&1 | not grep -vE "\.pb\.go"
+  golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:"
+
+  go mod tidy
+  git status --porcelain 2>&1 | fail_on_output || \
     (git status; git --no-pager diff; exit 1)
-fi
+  popd
+done
 
 # - Collection of static analysis checks
 #
@@ -123,18 +124,21 @@
 # Error if anything other than deprecation warnings are printed.
 not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
 # Only ignore the following deprecated types/fields/functions.
-not grep -Fv '.HandleResolvedAddrs
-.HandleSubConnStateChange
+not grep -Fv '.CredsBundle
 .HeaderMap
+.Metadata is deprecated: use Attributes
 .NewAddress
 .NewServiceConfig
-.Metadata is deprecated: use Attributes
 .Type is deprecated: use Attributes
-.UpdateBalancerState
+BuildVersion is deprecated
+balancer.ErrTransientFailure
 balancer.Picker
+extDesc.Filename is deprecated
+github.com/golang/protobuf/jsonpb is deprecated
 grpc.CallCustomCodec
 grpc.Code
 grpc.Compressor
+grpc.CustomCodec
 grpc.Decompressor
 grpc.MaxMsgSize
 grpc.MethodConfig
@@ -142,9 +146,7 @@
 grpc.NewGZIPDecompressor
 grpc.RPCCompressor
 grpc.RPCDecompressor
-grpc.RoundRobin
 grpc.ServiceConfig
-grpc.WithBalancer
 grpc.WithBalancerName
 grpc.WithCompressor
 grpc.WithDecompressor
@@ -154,10 +156,56 @@
 grpc.WithTimeout
 http.CloseNotifier
 info.SecurityVersion
-naming.Resolver
-naming.Update
-naming.Watcher
+proto is deprecated
+proto.InternalMessageInfo is deprecated
+proto.EnumName is deprecated
+proto.ErrInternalBadWireType is deprecated
+proto.FileDescriptor is deprecated
+proto.Marshaler is deprecated
+proto.MessageType is deprecated
+proto.RegisterEnum is deprecated
+proto.RegisterFile is deprecated
+proto.RegisterType is deprecated
+proto.RegisterExtension is deprecated
+proto.RegisteredExtension is deprecated
+proto.RegisteredExtensions is deprecated
+proto.RegisterMapType is deprecated
+proto.Unmarshaler is deprecated
 resolver.Backend
-resolver.GRPCLB' "${SC_OUT}"
+resolver.GRPCLB
+Target is deprecated: Use the Target field in the BuildOptions instead.
+xxx_messageInfo_
+' "${SC_OUT}"
+
+# - special golint on package comments.
+lint_package_comment_per_package() {
+  # Number of files in this go package.
+  fileCount=$(go list -f '{{len .GoFiles}}' $1)
+  if [ ${fileCount} -eq 0 ]; then
+    return 0
+  fi
+  # Number of package errors generated by golint.
+  lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
+  # golint complains about every file that's missing the package comment. If the
+  # number of files for this package is greater than the number of errors, there's
+  # at least one file with package comment, good. Otherwise, fail.
+  if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
+    echo "Package $1 (with ${fileCount} files) is missing package comment"
+    return 1
+  fi
+}
+lint_package_comment() {
+  set +ex
+
+  count=0
+  for i in $(go list ./...); do
+    lint_package_comment_per_package "$i"
+    ((count += $?))
+  done
+
+  set -ex
+  return $count
+}
+lint_package_comment
 
 echo SUCCESS