Import of https://github.com/ciena/voltctl at commit 40d61fbf3f910ed4017cf67c9c79e8e1f82a33a5
Change-Id: I8464c59e60d76cb8612891db3303878975b5416c
diff --git a/vendor/github.com/fullstorydev/grpcurl/.gitignore b/vendor/github.com/fullstorydev/grpcurl/.gitignore
new file mode 100644
index 0000000..849ddff
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.gitignore
@@ -0,0 +1 @@
+dist/
diff --git a/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml
new file mode 100644
index 0000000..e7bfd3e
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml
@@ -0,0 +1,24 @@
+builds:
+ - binary: grpcurl
+ main: ./cmd/grpcurl
+ goos:
+ - linux
+ - darwin
+ - windows
+ goarch:
+ - amd64
+ - 386
+ ldflags:
+ - -s -w -X main.version=v{{.Version}}
+
+archive:
+ format: tar.gz
+ format_overrides:
+ - goos: windows
+ format: zip
+ replacements:
+ amd64: x86_64
+ 386: x86_32
+ darwin: osx
+ files:
+ - LICENSE
diff --git a/vendor/github.com/fullstorydev/grpcurl/.travis.yml b/vendor/github.com/fullstorydev/grpcurl/.travis.yml
new file mode 100644
index 0000000..4b0d5eb
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: "1.9"
+ - go: "1.10"
+ - go: "1.11"
+ env:
+ - GO111MODULE=off
+ - VET=1
+ - go: "1.11"
+ env: GO111MODULE=on
+ - go: "1.12"
+ env: GO111MODULE=off
+ - go: "1.12"
+ env: GO111MODULE=on
+ - go: tip
+
+script:
+ - if [[ "$VET" = 1 ]]; then make ci; else make deps test; fi
diff --git a/vendor/github.com/fullstorydev/grpcurl/LICENSE b/vendor/github.com/fullstorydev/grpcurl/LICENSE
new file mode 100644
index 0000000..6b678c5
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 FullStory, Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/fullstorydev/grpcurl/Makefile b/vendor/github.com/fullstorydev/grpcurl/Makefile
new file mode 100644
index 0000000..982d043
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/Makefile
@@ -0,0 +1,76 @@
+dev_build_version=$(shell git describe --tags --always --dirty)
+
+# TODO: run golint and errcheck, but only to catch *new* violations and
+# decide whether to change code or not (e.g. we need to be able to whitelist
+# violations already in the code). They can be useful to catch errors, but
+# they are just too noisy to be a requirement for a CI -- we don't even *want*
+# to fix some of the things they consider to be violations.
+.PHONY: ci
+ci: deps checkgofmt vet staticcheck ineffassign predeclared test
+
+.PHONY: deps
+deps:
+ go get -d -v -t ./...
+
+.PHONY: updatedeps
+updatedeps:
+ go get -d -v -t -u -f ./...
+
+.PHONY: install
+install:
+ go install -ldflags '-X "main.version=dev build $(dev_build_version)"' ./...
+
+.PHONY: release
+release:
+ @GO111MODULE=off go get github.com/goreleaser/goreleaser
+ goreleaser --rm-dist
+
+.PHONY: checkgofmt
+checkgofmt:
+ gofmt -s -l .
+ @if [ -n "$$(gofmt -s -l .)" ]; then \
+ exit 1; \
+ fi
+
+.PHONY: vet
+vet:
+ go vet ./...
+
+# TODO: remove the ignored check; need it for now because it
+# is complaining about a deprecated comment added to grpc,
+# but it's not yet released. Once the new (non-deprecated)
+# API is included in a release, we can move to that new
+# version and fix the call site to no longer use deprecated
+# method.
+# This all works fine with Go modules, but without modules,
+# CI is just getting latest master for dependencies like grpc.
+.PHONY: staticcheck
+staticcheck:
+ @go get honnef.co/go/tools/cmd/staticcheck
+ staticcheck ./...
+
+.PHONY: ineffassign
+ineffassign:
+ @go get github.com/gordonklaus/ineffassign
+ ineffassign .
+
+.PHONY: predeclared
+predeclared:
+ @go get github.com/nishanths/predeclared
+ predeclared .
+
+# Intentionally omitted from CI, but target here for ad-hoc reports.
+.PHONY: golint
+golint:
+ @go get golang.org/x/lint/golint
+ golint -min_confidence 0.9 -set_exit_status ./...
+
+# Intentionally omitted from CI, but target here for ad-hoc reports.
+.PHONY: errcheck
+errcheck:
+ @go get github.com/kisielk/errcheck
+ errcheck ./...
+
+.PHONY: test
+test:
+ go test -race ./...
diff --git a/vendor/github.com/fullstorydev/grpcurl/README.md b/vendor/github.com/fullstorydev/grpcurl/README.md
new file mode 100644
index 0000000..1713f2f
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/README.md
@@ -0,0 +1,214 @@
+# gRPCurl
+[![Build Status](https://travis-ci.org/fullstorydev/grpcurl.svg?branch=master)](https://travis-ci.org/fullstorydev/grpcurl/branches)
+[![Go Report Card](https://goreportcard.com/badge/github.com/fullstorydev/grpcurl)](https://goreportcard.com/report/github.com/fullstorydev/grpcurl)
+
+`grpcurl` is a command-line tool that lets you interact with gRPC servers. It's
+basically `curl` for gRPC servers.
+
+The main purpose for this tool is to invoke RPC methods on a gRPC server from the
+command-line. gRPC servers use a binary encoding on the wire
+([protocol buffers](https://developers.google.com/protocol-buffers/), or "protobufs"
+for short). So they are basically impossible to interact with using regular `curl`
+(and older versions of `curl` that do not support HTTP/2 are of course non-starters).
+This program accepts messages using JSON encoding, which is much more friendly for both
+humans and scripts.
+
+With this tool you can also browse the schema for gRPC services, either by querying
+a server that supports [server reflection](https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto),
+by reading proto source files, or by loading in compiled "protoset" files (files that contain
+encoded file [descriptor protos](https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto)).
+In fact, the way the tool transforms JSON request data into a binary encoded protobuf
+is using that very same schema. So, if the server you interact with does not support
+reflection, you will either need the proto source files that define the service or need
+protoset files that `grpcurl` can use.
+
+This repo also provides a library package, `github.com/fullstorydev/grpcurl`, that has
+functions for simplifying the construction of other command-line tools that dynamically
+invoke gRPC endpoints. This code is a great example of how to use the various packages of
+the [protoreflect](https://godoc.org/github.com/jhump/protoreflect) library, and shows
+off what they can do.
+
+See also the [`grpcurl` talk at GopherCon 2018](https://www.youtube.com/watch?v=dDr-8kbMnaw).
+
+## Features
+`grpcurl` supports all kinds of RPC methods, including streaming methods. You can even
+operate bi-directional streaming methods interactively by running `grpcurl` from an
+interactive terminal and using stdin as the request body!
+
+`grpcurl` supports both plain-text and TLS servers and has numerous options for TLS
+configuration. It also supports mutual TLS, where the client is required to present a
+client certificate.
+
+As mentioned above, `grpcurl` works seamlessly if the server supports the reflection
+service. If not, you can supply the `.proto` source files or you can supply protoset
+files (containing compiled descriptors, produced by `protoc`) to `grpcurl`.
+
+## Installation
+
+### Binaries
+
+Download the binary from the [releases](https://github.com/fullstorydev/grpcurl/releases) page.
+
+On macOS, `grpcurl` is available via Homebrew:
+```shell
+brew install grpcurl
+```
+
+### From Source
+You can use the `go` tool to install `grpcurl`:
+```shell
+go get github.com/fullstorydev/grpcurl
+go install github.com/fullstorydev/grpcurl/cmd/grpcurl
+```
+
+This installs the command into the `bin` sub-folder of wherever your `$GOPATH`
+environment variable points. If this directory is already in your `$PATH`, then
+you should be good to go.
+
+If you have already pulled down this repo to a location that is not in your
+`$GOPATH` and want to build from the sources, you can `cd` into the repo and then
+run `make install`.
+
+If you encounter compile errors, you could have out-dated versions of `grpcurl`'s
+dependencies. You can update the dependencies by running `make updatedeps`. You can
+also use [`vgo`](https://github.com/golang/vgo) to install, which will use the right
+versions of dependencies. Or, if you are using Go 1.11, you can add `GO111MODULE=on`
+as a prefix to the commands above, which will also build using the right versions of
+dependencies (vs. whatever you may already in your `GOPATH`).
+
+## Usage
+The usage doc for the tool explains the numerous options:
+```shell
+grpcurl -help
+```
+
+In the sections below, you will find numerous examples demonstrating how to use
+`grpcurl`.
+
+### Invoking RPCs
+Invoking an RPC on a trusted server (e.g. TLS without self-signed key or custom CA)
+that requires no client certs and supports server reflection is the simplest thing to
+do with `grpcurl`. This minimal invocation sends an empty request body:
+```shell
+grpcurl grpc.server.com:443 my.custom.server.Service/Method
+```
+
+To send a non-empty request, use the `-d` argument. Note that all arguments must come
+*before* the server address and method name:
+```shell
+grpcurl -d '{"id": 1234, "tags": ["foo","bar"]}' \
+ grpc.server.com:443 my.custom.server.Service/Method
+```
+
+As can be seen in the example, the supplied body must be in JSON format. The body will
+be parsed and then transmitted to the server in the protobuf binary format.
+
+If you want to include `grpcurl` in a command pipeline, such as when using `jq` to
+create a request body, you can use `-d @`, which tells `grpcurl` to read the actual
+request body from stdin:
+```shell
+grpcurl -d @ grpc.server.com:443 my.custom.server.Service/Method <<EOM
+{
+ "id": 1234,
+ "tags": [
+ "foor",
+ "bar"
+ ]
+}
+EOM
+```
+
+### Listing Services
+To list all services exposed by a server, use the "list" verb. When using `.proto` source
+or protoset files instead of server reflection, this lists all services defined in the
+source or protoset files.
+```shell
+# Server supports reflection
+grpcurl localhost:8787 list
+
+# Using compiled protoset files
+grpcurl -protoset my-protos.bin list
+
+# Using proto sources
+grpcurl -import-path ../protos -proto my-stuff.proto list
+```
+
+The "list" verb also lets you see all methods in a particular service:
+```shell
+grpcurl localhost:8787 list my.custom.server.Service
+```
+
+### Describing Elements
+The "describe" verb will print the type of any symbol that the server knows about
+or that is found in a given protoset file. It also prints a description of that
+symbol, in the form of snippets of proto source. It won't necessarily be the
+original source that defined the element, but it will be equivalent.
+
+```shell
+# Server supports reflection
+grpcurl localhost:8787 describe my.custom.server.Service.MethodOne
+
+# Using compiled protoset files
+grpcurl -protoset my-protos.bin describe my.custom.server.Service.MethodOne
+
+# Using proto sources
+grpcurl -import-path ../protos -proto my-stuff.proto describe my.custom.server.Service.MethodOne
+```
+
+## Descriptor Sources
+The `grpcurl` tool can operate on a variety of sources for descriptors. The descriptors
+are required, in order for `grpcurl` to understand the RPC schema, translate inputs
+into the protobuf binary format as well as translate responses from the binary format
+into text. The sections below document the supported sources and what command-line flags
+are needed to use them.
+
+### Server Reflection
+
+Without any additional command-line flags, `grpcurl` will try to use [server reflection](https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto).
+
+Examples for how to set up server reflection can be found [here](https://github.com/grpc/grpc/blob/master/doc/server-reflection.md#known-implementations).
+
+When using reflection, the server address (host:port or path to Unix socket) is required
+even for "list" and "describe" operations, so that `grpcurl` can connect to the server
+and ask it for its descriptors.
+
+### Proto Source Files
+To use `grpcurl` on servers that do not support reflection, you can use `.proto` source
+files.
+
+In addition to using `-proto` flags to point `grpcurl` at the relevant proto source file(s),
+you may also need to supply `-import-path` flags to tell `grpcurl` the folders from which
+dependencies can be imported.
+
+Just like when compiling with `protoc`, you do *not* need to provide an import path for the
+location of the standard protos included with `protoc` (which contain various "well-known
+types" with a package definition of `google.protobuf`). These files are "known" by `grpcurl`
+as a snapshot of their descriptors is built into the `grpcurl` binary.
+
+When using proto sources, you can omit the server address (host:port or path to Unix socket)
+when using the "list" and "describe" operations since they only need to consult the proto
+source files.
+
+### Protoset Files
+You can also use compiled protoset files with `grpcurl`. If you are scripting `grpcurl` and
+need to re-use the same proto sources for many invocations, you will see better performance
+by using protoset files (since it skips the parsing and compilation steps with each
+invocation).
+
+Protoset files contain binary encoded `google.protobuf.FileDescriptorSet` protos. To create
+a protoset file, invoke `protoc` with the `*.proto` files that define the service:
+```shell
+protoc --proto_path=. \
+ --descriptor_set_out=myservice.protoset \
+ --include_imports \
+ my/custom/server/service.proto
+```
+
+The `--descriptor_set_out` argument is what tells `protoc` to produce a protoset,
+and the `--include_imports` argument is necessary for the protoset to contain
+everything that `grpcurl` needs to process and understand the schema.
+
+When using protosets, you can omit the server address (host:port or path to Unix socket)
+when using the "list" and "describe" operations since they only need to consult the
+protoset files.
+
diff --git a/vendor/github.com/fullstorydev/grpcurl/desc_source.go b/vendor/github.com/fullstorydev/grpcurl/desc_source.go
new file mode 100644
index 0000000..c23ae3d
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/desc_source.go
@@ -0,0 +1,253 @@
+package grpcurl
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/protoparse"
+ "github.com/jhump/protoreflect/dynamic"
+ "github.com/jhump/protoreflect/grpcreflect"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// ErrReflectionNotSupported is returned by DescriptorSource operations that
+// rely on interacting with the reflection service when the source does not
+// actually expose the reflection service. When this occurs, an alternate source
+// (like file descriptor sets) must be used.
+var ErrReflectionNotSupported = errors.New("server does not support the reflection API")
+
+// DescriptorSource is a source of protobuf descriptor information. It can be backed by a FileDescriptorSet
+// proto (like a file generated by protoc) or a remote server that supports the reflection API.
+type DescriptorSource interface {
+ // ListServices returns a list of fully-qualified service names. It will be all services in a set of
+ // descriptor files or the set of all services exposed by a gRPC server.
+ ListServices() ([]string, error)
+ // FindSymbol returns a descriptor for the given fully-qualified symbol name.
+ FindSymbol(fullyQualifiedName string) (desc.Descriptor, error)
+ // AllExtensionsForType returns all known extension fields that extend the given message type name.
+ AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error)
+}
+
+// DescriptorSourceFromProtoSets creates a DescriptorSource that is backed by the named files, whose contents
+// are encoded FileDescriptorSet protos.
+func DescriptorSourceFromProtoSets(fileNames ...string) (DescriptorSource, error) {
+ files := &descpb.FileDescriptorSet{}
+ for _, fileName := range fileNames {
+ b, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return nil, fmt.Errorf("could not load protoset file %q: %v", fileName, err)
+ }
+ var fs descpb.FileDescriptorSet
+ err = proto.Unmarshal(b, &fs)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse contents of protoset file %q: %v", fileName, err)
+ }
+ files.File = append(files.File, fs.File...)
+ }
+ return DescriptorSourceFromFileDescriptorSet(files)
+}
+
+// DescriptorSourceFromProtoFiles creates a DescriptorSource that is backed by the named files,
+// whose contents are Protocol Buffer source files. The given importPaths are used to locate
+// any imported files.
+func DescriptorSourceFromProtoFiles(importPaths []string, fileNames ...string) (DescriptorSource, error) {
+ fileNames, err := protoparse.ResolveFilenames(importPaths, fileNames...)
+ if err != nil {
+ return nil, err
+ }
+ p := protoparse.Parser{
+ ImportPaths: importPaths,
+ InferImportPaths: len(importPaths) == 0,
+ IncludeSourceCodeInfo: true,
+ }
+ fds, err := p.ParseFiles(fileNames...)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse given files: %v", err)
+ }
+ return DescriptorSourceFromFileDescriptors(fds...)
+}
+
+// DescriptorSourceFromFileDescriptorSet creates a DescriptorSource that is backed by the FileDescriptorSet.
+func DescriptorSourceFromFileDescriptorSet(files *descpb.FileDescriptorSet) (DescriptorSource, error) {
+ unresolved := map[string]*descpb.FileDescriptorProto{}
+ for _, fd := range files.File {
+ unresolved[fd.GetName()] = fd
+ }
+ resolved := map[string]*desc.FileDescriptor{}
+ for _, fd := range files.File {
+ _, err := resolveFileDescriptor(unresolved, resolved, fd.GetName())
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &fileSource{files: resolved}, nil
+}
+
+func resolveFileDescriptor(unresolved map[string]*descpb.FileDescriptorProto, resolved map[string]*desc.FileDescriptor, filename string) (*desc.FileDescriptor, error) {
+ if r, ok := resolved[filename]; ok {
+ return r, nil
+ }
+ fd, ok := unresolved[filename]
+ if !ok {
+ return nil, fmt.Errorf("no descriptor found for %q", filename)
+ }
+ deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+ for _, dep := range fd.GetDependency() {
+ depFd, err := resolveFileDescriptor(unresolved, resolved, dep)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, depFd)
+ }
+ result, err := desc.CreateFileDescriptor(fd, deps...)
+ if err != nil {
+ return nil, err
+ }
+ resolved[filename] = result
+ return result, nil
+}
+
+// DescriptorSourceFromFileDescriptors creates a DescriptorSource that is backed by the given
+// file descriptors
+func DescriptorSourceFromFileDescriptors(files ...*desc.FileDescriptor) (DescriptorSource, error) {
+ fds := map[string]*desc.FileDescriptor{}
+ for _, fd := range files {
+ if err := addFile(fd, fds); err != nil {
+ return nil, err
+ }
+ }
+ return &fileSource{files: fds}, nil
+}
+
+func addFile(fd *desc.FileDescriptor, fds map[string]*desc.FileDescriptor) error {
+ name := fd.GetName()
+ if existing, ok := fds[name]; ok {
+ // already added this file
+ if existing != fd {
+ // doh! duplicate files provided
+ return fmt.Errorf("given files include multiple copies of %q", name)
+ }
+ return nil
+ }
+ fds[name] = fd
+ for _, dep := range fd.GetDependencies() {
+ if err := addFile(dep, fds); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type fileSource struct {
+ files map[string]*desc.FileDescriptor
+ er *dynamic.ExtensionRegistry
+ erInit sync.Once
+}
+
+func (fs *fileSource) ListServices() ([]string, error) {
+ set := map[string]bool{}
+ for _, fd := range fs.files {
+ for _, svc := range fd.GetServices() {
+ set[svc.GetFullyQualifiedName()] = true
+ }
+ }
+ sl := make([]string, 0, len(set))
+ for svc := range set {
+ sl = append(sl, svc)
+ }
+ return sl, nil
+}
+
+// GetAllFiles returns all of the underlying file descriptors. This is
+// more thorough and more efficient than the fallback strategy used by
+// the GetAllFiles package method, for enumerating all files from a
+// descriptor source.
+func (fs *fileSource) GetAllFiles() ([]*desc.FileDescriptor, error) {
+ files := make([]*desc.FileDescriptor, len(fs.files))
+ i := 0
+ for _, fd := range fs.files {
+ files[i] = fd
+ i++
+ }
+ return files, nil
+}
+
+func (fs *fileSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+ for _, fd := range fs.files {
+ if dsc := fd.FindSymbol(fullyQualifiedName); dsc != nil {
+ return dsc, nil
+ }
+ }
+ return nil, notFound("Symbol", fullyQualifiedName)
+}
+
+func (fs *fileSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+ fs.erInit.Do(func() {
+ fs.er = &dynamic.ExtensionRegistry{}
+ for _, fd := range fs.files {
+ fs.er.AddExtensionsFromFile(fd)
+ }
+ })
+ return fs.er.AllExtensionsForType(typeName), nil
+}
+
+// DescriptorSourceFromServer creates a DescriptorSource that uses the given gRPC reflection client
+// to interrogate a server for descriptor information. If the server does not support the reflection
+// API then the various DescriptorSource methods will return ErrReflectionNotSupported
+func DescriptorSourceFromServer(_ context.Context, refClient *grpcreflect.Client) DescriptorSource {
+ return serverSource{client: refClient}
+}
+
+type serverSource struct {
+ client *grpcreflect.Client
+}
+
+func (ss serverSource) ListServices() ([]string, error) {
+ svcs, err := ss.client.ListServices()
+ return svcs, reflectionSupport(err)
+}
+
+func (ss serverSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+ file, err := ss.client.FileContainingSymbol(fullyQualifiedName)
+ if err != nil {
+ return nil, reflectionSupport(err)
+ }
+ d := file.FindSymbol(fullyQualifiedName)
+ if d == nil {
+ return nil, notFound("Symbol", fullyQualifiedName)
+ }
+ return d, nil
+}
+
+func (ss serverSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+ var exts []*desc.FieldDescriptor
+ nums, err := ss.client.AllExtensionNumbersForType(typeName)
+ if err != nil {
+ return nil, reflectionSupport(err)
+ }
+ for _, fieldNum := range nums {
+ ext, err := ss.client.ResolveExtension(typeName, fieldNum)
+ if err != nil {
+ return nil, reflectionSupport(err)
+ }
+ exts = append(exts, ext)
+ }
+ return exts, nil
+}
+
+func reflectionSupport(err error) error {
+ if err == nil {
+ return nil
+ }
+ if stat, ok := status.FromError(err); ok && stat.Code() == codes.Unimplemented {
+ return ErrReflectionNotSupported
+ }
+ return err
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/format.go b/vendor/github.com/fullstorydev/grpcurl/format.go
new file mode 100644
index 0000000..db93eb4
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/format.go
@@ -0,0 +1,469 @@
+package grpcurl
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/dynamic"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// RequestParser processes input into messages.
+type RequestParser interface {
+ // Next parses input data into the given request message. If called after
+ // input is exhausted, it returns io.EOF. If the caller re-uses the same
+ // instance in multiple calls to Next, it should call msg.Reset() in between
+ // each call.
+ Next(msg proto.Message) error
+ // NumRequests returns the number of messages that have been parsed and
+ // returned by a call to Next.
+ NumRequests() int
+}
+
+type jsonRequestParser struct {
+ dec *json.Decoder
+ unmarshaler jsonpb.Unmarshaler
+ requestCount int
+}
+
+// NewJSONRequestParser returns a RequestParser that reads data in JSON format
+// from the given reader. The given resolver is used to assist with decoding of
+// google.protobuf.Any messages.
+//
+// Input data that contains more than one message should just include all
+// messages concatenated (though whitespace is necessary to separate some kinds
+// of values in JSON).
+//
+// If the given reader has no data, the returned parser will return io.EOF on
+// the very first call.
+func NewJSONRequestParser(in io.Reader, resolver jsonpb.AnyResolver) RequestParser {
+ return &jsonRequestParser{
+ dec: json.NewDecoder(in),
+ unmarshaler: jsonpb.Unmarshaler{AnyResolver: resolver},
+ }
+}
+
+func (f *jsonRequestParser) Next(m proto.Message) error {
+ var msg json.RawMessage
+ if err := f.dec.Decode(&msg); err != nil {
+ return err
+ }
+ f.requestCount++
+ return f.unmarshaler.Unmarshal(bytes.NewReader(msg), m)
+}
+
+func (f *jsonRequestParser) NumRequests() int {
+ return f.requestCount
+}
+
+const (
+ textSeparatorChar = 0x1e
+)
+
+type textRequestParser struct {
+ r *bufio.Reader
+ err error
+ requestCount int
+}
+
+// NewTextRequestParser returns a RequestParser that reads data in the protobuf
+// text format from the given reader.
+//
+// Input data that contains more than one message should include an ASCII
+// 'Record Separator' character (0x1E) between each message.
+//
+// Empty text is a valid text format and represents an empty message. So if the
+// given reader has no data, the returned parser will yield an empty message
+// for the first call to Next and then return io.EOF thereafter. This also means
+// that if the input data ends with a record separator, then a final empty
+// message will be parsed *after* the separator.
+func NewTextRequestParser(in io.Reader) RequestParser {
+ return &textRequestParser{r: bufio.NewReader(in)}
+}
+
+func (f *textRequestParser) Next(m proto.Message) error {
+ if f.err != nil {
+ return f.err
+ }
+
+ var b []byte
+ b, f.err = f.r.ReadBytes(textSeparatorChar)
+ if f.err != nil && f.err != io.EOF {
+ return f.err
+ }
+ // remove delimiter
+ if len(b) > 0 && b[len(b)-1] == textSeparatorChar {
+ b = b[:len(b)-1]
+ }
+
+ f.requestCount++
+
+ return proto.UnmarshalText(string(b), m)
+}
+
+func (f *textRequestParser) NumRequests() int {
+ return f.requestCount
+}
+
+// Formatter translates messages into string representations.
+type Formatter func(proto.Message) (string, error)
+
+// NewJSONFormatter returns a formatter that returns JSON strings. The JSON will
+// include empty/default values (instead of just omitted them) if emitDefaults
+// is true. The given resolver is used to assist with encoding of
+// google.protobuf.Any messages.
+func NewJSONFormatter(emitDefaults bool, resolver jsonpb.AnyResolver) Formatter {
+ marshaler := jsonpb.Marshaler{
+ EmitDefaults: emitDefaults,
+ Indent: " ",
+ AnyResolver: resolver,
+ }
+ return marshaler.MarshalToString
+}
+
+// NewTextFormatter returns a formatter that returns strings in the protobuf
+// text format. If includeSeparator is true then, when invoked to format
+// multiple messages, all messages after the first one will be prefixed with the
+// ASCII 'Record Separator' character (0x1E).
+func NewTextFormatter(includeSeparator bool) Formatter {
+ tf := textFormatter{useSeparator: includeSeparator}
+ return tf.format
+}
+
+type textFormatter struct {
+ useSeparator bool
+ numFormatted int
+}
+
+var protoTextMarshaler = proto.TextMarshaler{ExpandAny: true}
+
+func (tf *textFormatter) format(m proto.Message) (string, error) {
+ var buf bytes.Buffer
+ if tf.useSeparator && tf.numFormatted > 0 {
+ if err := buf.WriteByte(textSeparatorChar); err != nil {
+ return "", err
+ }
+ }
+
+ // If message implements MarshalText method (such as a *dynamic.Message),
+ // it won't get details about whether or not to format to text compactly
+ // or with indentation. So first see if the message also implements a
+ // MarshalTextIndent method and use that instead if available.
+ type indentMarshaler interface {
+ MarshalTextIndent() ([]byte, error)
+ }
+
+ if indenter, ok := m.(indentMarshaler); ok {
+ b, err := indenter.MarshalTextIndent()
+ if err != nil {
+ return "", err
+ }
+ if _, err := buf.Write(b); err != nil {
+ return "", err
+ }
+ } else if err := protoTextMarshaler.Marshal(&buf, m); err != nil {
+ return "", err
+ }
+
+ // no trailing newline needed
+ str := buf.String()
+ if str[len(str)-1] == '\n' {
+ str = str[:len(str)-1]
+ }
+
+ tf.numFormatted++
+
+ return str, nil
+}
+
+type Format string
+
+const (
+ FormatJSON = Format("json")
+ FormatText = Format("text")
+)
+
+// AnyResolverFromDescriptorSource returns an AnyResolver that will search for
+// types using the given descriptor source.
+func AnyResolverFromDescriptorSource(source DescriptorSource) jsonpb.AnyResolver {
+ return &anyResolver{source: source}
+}
+
+// AnyResolverFromDescriptorSourceWithFallback returns an AnyResolver that will
+// search for types using the given descriptor source and then fallback to a
+// special message if the type is not found. The fallback type will render to
+// JSON with a "@type" property, just like an Any message, but also with a
+// custom "@value" property that includes the binary encoded payload.
+func AnyResolverFromDescriptorSourceWithFallback(source DescriptorSource) jsonpb.AnyResolver {
+ res := anyResolver{source: source}
+ return &anyResolverWithFallback{AnyResolver: &res}
+}
+
+type anyResolver struct {
+ source DescriptorSource
+
+ er dynamic.ExtensionRegistry
+
+ mu sync.RWMutex
+ mf *dynamic.MessageFactory
+ resolved map[string]func() proto.Message
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+
+ r.mu.RLock()
+ factory := r.resolved[mname]
+ r.mu.RUnlock()
+
+ // already resolved?
+ if factory != nil {
+ return factory(), nil
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ // double-check, in case we were racing with another goroutine
+ // that resolved this one
+ factory = r.resolved[mname]
+ if factory != nil {
+ return factory(), nil
+ }
+
+ // use descriptor source to resolve message type
+ d, err := r.source.FindSymbol(mname)
+ if err != nil {
+ return nil, err
+ }
+ md, ok := d.(*desc.MessageDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("unknown message: %s", typeUrl)
+ }
+ // populate any extensions for this message, too
+ if exts, err := r.source.AllExtensionsForType(mname); err != nil {
+ return nil, err
+ } else if err := r.er.AddExtension(exts...); err != nil {
+ return nil, err
+ }
+
+ if r.mf == nil {
+ r.mf = dynamic.NewMessageFactoryWithExtensionRegistry(&r.er)
+ }
+
+ factory = func() proto.Message {
+ return r.mf.NewMessage(md)
+ }
+ if r.resolved == nil {
+ r.resolved = map[string]func() proto.Message{}
+ }
+ r.resolved[mname] = factory
+ return factory(), nil
+}
+
+// anyResolverWithFallback can provide a fallback value for unknown
+// messages that will format itself to JSON using an "@value" field
+// that has the base64-encoded data for the unknown message value.
+type anyResolverWithFallback struct {
+ jsonpb.AnyResolver
+}
+
+func (r anyResolverWithFallback) Resolve(typeUrl string) (proto.Message, error) {
+ msg, err := r.AnyResolver.Resolve(typeUrl)
+ if err == nil {
+ return msg, err
+ }
+
+ // Try "default" resolution logic. This mirrors the default behavior
+ // of jsonpb, which checks to see if the given message name is registered
+ // in the proto package.
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ mt := proto.MessageType(mname)
+ if mt != nil {
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+ }
+
+ // finally, fallback to a special placeholder that can marshal itself
+ // to JSON using a special "@value" property to show base64-encoded
+ // data for the embedded message
+ return &unknownAny{TypeUrl: typeUrl, Error: fmt.Sprintf("%s is not recognized; see @value for raw binary message data", mname)}, nil
+}
+
+type unknownAny struct {
+ TypeUrl string `json:"@type"`
+ Error string `json:"@error"`
+ Value string `json:"@value"`
+}
+
+func (a *unknownAny) MarshalJSONPB(jsm *jsonpb.Marshaler) ([]byte, error) {
+ if jsm.Indent != "" {
+ return json.MarshalIndent(a, "", jsm.Indent)
+ }
+ return json.Marshal(a)
+}
+
+func (a *unknownAny) Unmarshal(b []byte) error {
+ a.Value = base64.StdEncoding.EncodeToString(b)
+ return nil
+}
+
+func (a *unknownAny) Reset() {
+ a.Value = ""
+}
+
+func (a *unknownAny) String() string {
+ b, err := a.MarshalJSONPB(&jsonpb.Marshaler{})
+ if err != nil {
+ return fmt.Sprintf("ERROR: %v", err.Error())
+ }
+ return string(b)
+}
+
+func (a *unknownAny) ProtoMessage() {
+}
+
+var _ proto.Message = (*unknownAny)(nil)
+
+// RequestParserAndFormatterFor returns a request parser and formatter for the
+// given format. The given descriptor source may be used for parsing message
+// data (if needed by the format). The flags emitJSONDefaultFields and
+// includeTextSeparator are options for JSON and protobuf text formats,
+// respectively. Requests will be parsed from the given in.
+func RequestParserAndFormatterFor(format Format, descSource DescriptorSource, emitJSONDefaultFields, includeTextSeparator bool, in io.Reader) (RequestParser, Formatter, error) {
+ switch format {
+ case FormatJSON:
+ resolver := AnyResolverFromDescriptorSource(descSource)
+ return NewJSONRequestParser(in, resolver), NewJSONFormatter(emitJSONDefaultFields, anyResolverWithFallback{AnyResolver: resolver}), nil
+ case FormatText:
+ return NewTextRequestParser(in), NewTextFormatter(includeTextSeparator), nil
+ default:
+ return nil, nil, fmt.Errorf("unknown format: %s", format)
+ }
+}
+
+// DefaultEventHandler logs events to a writer. This is not thread-safe, but is
+// safe for use with InvokeRPC as long as NumResponses and Status are not read
+// until the call to InvokeRPC completes.
+type DefaultEventHandler struct {
+ out io.Writer
+ descSource DescriptorSource
+ formatter func(proto.Message) (string, error)
+ verbose bool
+
+ // NumResponses is the number of responses that have been received.
+ NumResponses int
+ // Status is the status that was received at the end of an RPC. It is
+ // nil if the RPC is still in progress.
+ Status *status.Status
+}
+
+// NewDefaultEventHandler returns an InvocationEventHandler that logs events to
+// the given output. If verbose is true, all events are logged. Otherwise, only
+// response messages are logged.
+func NewDefaultEventHandler(out io.Writer, descSource DescriptorSource, formatter Formatter, verbose bool) *DefaultEventHandler {
+ return &DefaultEventHandler{
+ out: out,
+ descSource: descSource,
+ formatter: formatter,
+ verbose: verbose,
+ }
+}
+
+var _ InvocationEventHandler = (*DefaultEventHandler)(nil)
+
+func (h *DefaultEventHandler) OnResolveMethod(md *desc.MethodDescriptor) {
+ if h.verbose {
+ txt, err := GetDescriptorText(md, h.descSource)
+ if err == nil {
+ fmt.Fprintf(h.out, "\nResolved method descriptor:\n%s\n", txt)
+ }
+ }
+}
+
+func (h *DefaultEventHandler) OnSendHeaders(md metadata.MD) {
+ if h.verbose {
+ fmt.Fprintf(h.out, "\nRequest metadata to send:\n%s\n", MetadataToString(md))
+ }
+}
+
+func (h *DefaultEventHandler) OnReceiveHeaders(md metadata.MD) {
+ if h.verbose {
+ fmt.Fprintf(h.out, "\nResponse headers received:\n%s\n", MetadataToString(md))
+ }
+}
+
+func (h *DefaultEventHandler) OnReceiveResponse(resp proto.Message) {
+ h.NumResponses++
+ if h.verbose {
+ fmt.Fprint(h.out, "\nResponse contents:\n")
+ }
+ if respStr, err := h.formatter(resp); err != nil {
+ fmt.Fprintf(h.out, "Failed to format response message %d: %v\n", h.NumResponses, err)
+ } else {
+ fmt.Fprintln(h.out, respStr)
+ }
+}
+
+func (h *DefaultEventHandler) OnReceiveTrailers(stat *status.Status, md metadata.MD) {
+ h.Status = stat
+ if h.verbose {
+ fmt.Fprintf(h.out, "\nResponse trailers received:\n%s\n", MetadataToString(md))
+ }
+}
+
+// PrintStatus prints details about the given status to the given writer. The given
+// formatter is used to print any detail messages that may be included in the status.
+// If the given status has a code of OK, "OK" is printed and that is all. Otherwise,
+// "ERROR:" is printed along with a line showing the code, one showing the message
+// string, and each detail message if any are present. The detail messages will be
+// printed as proto text format or JSON, depending on the given formatter.
+func PrintStatus(w io.Writer, stat *status.Status, formatter Formatter) {
+ if stat.Code() == codes.OK {
+ fmt.Fprintln(w, "OK")
+ return
+ }
+ fmt.Fprintf(w, "ERROR:\n Code: %s\n Message: %s\n", stat.Code().String(), stat.Message())
+
+ statpb := stat.Proto()
+ if len(statpb.Details) > 0 {
+ fmt.Fprintf(w, " Details:\n")
+ for i, det := range statpb.Details {
+ prefix := fmt.Sprintf(" %d)", i+1)
+ fmt.Fprintf(w, "%s\t", prefix)
+ prefix = strings.Repeat(" ", len(prefix)) + "\t"
+
+ output, err := formatter(det)
+ if err != nil {
+ fmt.Fprintf(w, "Error parsing detail message: %v\n", err)
+ } else {
+ lines := strings.Split(output, "\n")
+ for i, line := range lines {
+ if i == 0 {
+ // first line is already indented
+ fmt.Fprintf(w, "%s\n", line)
+ } else {
+ fmt.Fprintf(w, "%s%s\n", prefix, line)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/go.mod b/vendor/github.com/fullstorydev/grpcurl/go.mod
new file mode 100644
index 0000000..f6af37d
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/go.mod
@@ -0,0 +1,8 @@
+module github.com/fullstorydev/grpcurl
+
+require (
+ github.com/golang/protobuf v1.3.1
+ github.com/jhump/protoreflect v1.4.1
+ golang.org/x/net v0.0.0-20190311183353-d8887717615a
+ google.golang.org/grpc v1.21.0
+)
diff --git a/vendor/github.com/fullstorydev/grpcurl/go.sum b/vendor/github.com/fullstorydev/grpcurl/go.sum
new file mode 100644
index 0000000..466dbb1
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/go.sum
@@ -0,0 +1,30 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/jhump/protoreflect v1.4.1 h1:tgahjuElRiJthp9JfaMUFxabBVIytT/lnMSadY5kMjM=
+github.com/jhump/protoreflect v1.4.1/go.mod h1:gZ3i/BeD62fjlaIL0VW4UDMT70CTX+3m4pOnAlJ0BX8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20170818100345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/fullstorydev/grpcurl/grpcurl.go b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go
new file mode 100644
index 0000000..64947de
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go
@@ -0,0 +1,622 @@
+// Package grpcurl provides the core functionality exposed by the grpcurl command, for
+// dynamically connecting to a server, using the reflection service to inspect the server,
+// and invoking RPCs. The grpcurl command-line tool constructs a DescriptorSource, based
+// on the command-line parameters, and supplies an InvocationEventHandler to supply request
+// data (which can come from command-line args or the process's stdin) and to log the
+// events (to the process's stdout).
+package grpcurl
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/golang/protobuf/ptypes/empty"
+ "github.com/golang/protobuf/ptypes/struct"
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/protoprint"
+ "github.com/jhump/protoreflect/dynamic"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+)
+
+// ListServices uses the given descriptor source to return a sorted list of fully-qualified
+// service names.
+func ListServices(source DescriptorSource) ([]string, error) {
+ svcs, err := source.ListServices()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(svcs)
+ return svcs, nil
+}
+
+type sourceWithFiles interface {
+ GetAllFiles() ([]*desc.FileDescriptor, error)
+}
+
+var _ sourceWithFiles = (*fileSource)(nil)
+
+// GetAllFiles uses the given descriptor source to return a list of file descriptors.
+func GetAllFiles(source DescriptorSource) ([]*desc.FileDescriptor, error) {
+ var files []*desc.FileDescriptor
+ srcFiles, ok := source.(sourceWithFiles)
+
+ // If an error occurs, we still try to load as many files as we can, so that
+ // caller can decide whether to ignore error or not.
+ var firstError error
+ if ok {
+ files, firstError = srcFiles.GetAllFiles()
+ } else {
+ // Source does not implement GetAllFiles method, so use ListServices
+ // and grab files from there.
+ svcNames, err := source.ListServices()
+ if err != nil {
+ firstError = err
+ } else {
+ allFiles := map[string]*desc.FileDescriptor{}
+ for _, name := range svcNames {
+ d, err := source.FindSymbol(name)
+ if err != nil {
+ if firstError == nil {
+ firstError = err
+ }
+ } else {
+ addAllFilesToSet(d.GetFile(), allFiles)
+ }
+ }
+ files = make([]*desc.FileDescriptor, len(allFiles))
+ i := 0
+ for _, fd := range allFiles {
+ files[i] = fd
+ i++
+ }
+ }
+ }
+
+ sort.Sort(filesByName(files))
+ return files, firstError
+}
+
+type filesByName []*desc.FileDescriptor
+
+func (f filesByName) Len() int {
+ return len(f)
+}
+
+func (f filesByName) Less(i, j int) bool {
+ return f[i].GetName() < f[j].GetName()
+}
+
+func (f filesByName) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+func addAllFilesToSet(fd *desc.FileDescriptor, all map[string]*desc.FileDescriptor) {
+ if _, ok := all[fd.GetName()]; ok {
+ // already added
+ return
+ }
+ all[fd.GetName()] = fd
+ for _, dep := range fd.GetDependencies() {
+ addAllFilesToSet(dep, all)
+ }
+}
+
+// ListMethods uses the given descriptor source to return a sorted list of method names
+// for the specified fully-qualified service name.
+func ListMethods(source DescriptorSource, serviceName string) ([]string, error) {
+ dsc, err := source.FindSymbol(serviceName)
+ if err != nil {
+ return nil, err
+ }
+ if sd, ok := dsc.(*desc.ServiceDescriptor); !ok {
+ return nil, notFound("Service", serviceName)
+ } else {
+ methods := make([]string, 0, len(sd.GetMethods()))
+ for _, method := range sd.GetMethods() {
+ methods = append(methods, method.GetFullyQualifiedName())
+ }
+ sort.Strings(methods)
+ return methods, nil
+ }
+}
+
+// MetadataFromHeaders converts a list of header strings (each string in
+// "Header-Name: Header-Value" form) into metadata. If a string has a header
+// name without a value (e.g. does not contain a colon), the value is assumed
+// to be blank. Binary headers (those whose names end in "-bin") should be
+// base64-encoded. But if they cannot be base64-decoded, they will be assumed to
+// be in raw form and used as is.
+func MetadataFromHeaders(headers []string) metadata.MD {
+ md := make(metadata.MD)
+ for _, part := range headers {
+ if part != "" {
+ pieces := strings.SplitN(part, ":", 2)
+ if len(pieces) == 1 {
+ pieces = append(pieces, "") // if no value was specified, just make it "" (maybe the header value doesn't matter)
+ }
+ headerName := strings.ToLower(strings.TrimSpace(pieces[0]))
+ val := strings.TrimSpace(pieces[1])
+ if strings.HasSuffix(headerName, "-bin") {
+ if v, err := decode(val); err == nil {
+ val = v
+ }
+ }
+ md[headerName] = append(md[headerName], val)
+ }
+ }
+ return md
+}
+
+var base64Codecs = []*base64.Encoding{base64.StdEncoding, base64.URLEncoding, base64.RawStdEncoding, base64.RawURLEncoding}
+
+func decode(val string) (string, error) {
+ var firstErr error
+ var b []byte
+ // we are lenient and can accept any of the flavors of base64 encoding
+ for _, d := range base64Codecs {
+ var err error
+ b, err = d.DecodeString(val)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+ return string(b), nil
+ }
+ return "", firstErr
+}
+
+// MetadataToString returns a string representation of the given metadata, for
+// displaying to users.
+func MetadataToString(md metadata.MD) string {
+ if len(md) == 0 {
+ return "(empty)"
+ }
+
+ keys := make([]string, 0, len(md))
+ for k := range md {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ var b bytes.Buffer
+ first := true
+ for _, k := range keys {
+ vs := md[k]
+ for _, v := range vs {
+ if first {
+ first = false
+ } else {
+ b.WriteString("\n")
+ }
+ b.WriteString(k)
+ b.WriteString(": ")
+ if strings.HasSuffix(k, "-bin") {
+ v = base64.StdEncoding.EncodeToString([]byte(v))
+ }
+ b.WriteString(v)
+ }
+ }
+ return b.String()
+}
+
+var printer = &protoprint.Printer{
+ Compact: true,
+ OmitComments: protoprint.CommentsNonDoc,
+ SortElements: true,
+ ForceFullyQualifiedNames: true,
+}
+
+// GetDescriptorText returns a string representation of the given descriptor.
+// This returns a snippet of proto source that describes the given element.
+func GetDescriptorText(dsc desc.Descriptor, _ DescriptorSource) (string, error) {
+ // Note: DescriptorSource is not used, but remains an argument for backwards
+ // compatibility with previous implementation.
+ txt, err := printer.PrintProtoToString(dsc)
+ if err != nil {
+ return "", err
+ }
+ // callers don't expect trailing newlines
+ if txt[len(txt)-1] == '\n' {
+ txt = txt[:len(txt)-1]
+ }
+ return txt, nil
+}
+
+// EnsureExtensions uses the given descriptor source to download extensions for
+// the given message. It returns a copy of the given message, but as a dynamic
+// message that knows about all extensions known to the given descriptor source.
+func EnsureExtensions(source DescriptorSource, msg proto.Message) proto.Message {
+ // load any server extensions so we can properly describe custom options
+ dsc, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return msg
+ }
+
+ var ext dynamic.ExtensionRegistry
+ if err = fetchAllExtensions(source, &ext, dsc, map[string]bool{}); err != nil {
+ return msg
+ }
+
+ // convert message into dynamic message that knows about applicable extensions
+ // (that way we can show meaningful info for custom options instead of printing as unknown)
+ msgFactory := dynamic.NewMessageFactoryWithExtensionRegistry(&ext)
+ dm, err := fullyConvertToDynamic(msgFactory, msg)
+ if err != nil {
+ return msg
+ }
+ return dm
+}
+
+// fetchAllExtensions recursively fetches from the server extensions for the given message type as well as
+// for all message types of nested fields. The extensions are added to the given dynamic registry of extensions
+// so that all server-known extensions can be correctly parsed by grpcurl.
+func fetchAllExtensions(source DescriptorSource, ext *dynamic.ExtensionRegistry, md *desc.MessageDescriptor, alreadyFetched map[string]bool) error {
+ msgTypeName := md.GetFullyQualifiedName()
+ if alreadyFetched[msgTypeName] {
+ return nil
+ }
+ alreadyFetched[msgTypeName] = true
+ if len(md.GetExtensionRanges()) > 0 {
+ fds, err := source.AllExtensionsForType(msgTypeName)
+ if err != nil {
+ return fmt.Errorf("failed to query for extensions of type %s: %v", msgTypeName, err)
+ }
+ for _, fd := range fds {
+ if err := ext.AddExtension(fd); err != nil {
+ return fmt.Errorf("could not register extension %s of type %s: %v", fd.GetFullyQualifiedName(), msgTypeName, err)
+ }
+ }
+ }
+ // recursively fetch extensions for the types of any message fields
+ for _, fd := range md.GetFields() {
+ if fd.GetMessageType() != nil {
+ err := fetchAllExtensions(source, ext, fd.GetMessageType(), alreadyFetched)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// fullConvertToDynamic attempts to convert the given message to a dynamic message as well
+// as any nested messages it may contain as field values. If the given message factory has
+// extensions registered that were not known when the given message was parsed, this effectively
+// allows re-parsing to identify those extensions.
+func fullyConvertToDynamic(msgFact *dynamic.MessageFactory, msg proto.Message) (proto.Message, error) {
+ if _, ok := msg.(*dynamic.Message); ok {
+ return msg, nil // already a dynamic message
+ }
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return nil, err
+ }
+ newMsg := msgFact.NewMessage(md)
+ dm, ok := newMsg.(*dynamic.Message)
+ if !ok {
+ // if message factory didn't produce a dynamic message, then we should leave msg as is
+ return msg, nil
+ }
+
+ if err := dm.ConvertFrom(msg); err != nil {
+ return nil, err
+ }
+
+ // recursively convert all field values, too
+ for _, fd := range md.GetFields() {
+ if fd.IsMap() {
+ if fd.GetMapValueType().GetMessageType() != nil {
+ m := dm.GetField(fd).(map[interface{}]interface{})
+ for k, v := range m {
+ // keys can't be nested messages; so we only need to recurse through map values, not keys
+ newVal, err := fullyConvertToDynamic(msgFact, v.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ dm.PutMapField(fd, k, newVal)
+ }
+ }
+ } else if fd.IsRepeated() {
+ if fd.GetMessageType() != nil {
+ s := dm.GetField(fd).([]interface{})
+ for i, e := range s {
+ newVal, err := fullyConvertToDynamic(msgFact, e.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ dm.SetRepeatedField(fd, i, newVal)
+ }
+ }
+ } else {
+ if fd.GetMessageType() != nil {
+ v := dm.GetField(fd)
+ newVal, err := fullyConvertToDynamic(msgFact, v.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ dm.SetField(fd, newVal)
+ }
+ }
+ }
+ return dm, nil
+}
+
+// MakeTemplate returns a message instance for the given descriptor that is a
+// suitable template for creating an instance of that message in JSON. In
+// particular, it ensures that any repeated fields (which include map fields)
+// are not empty, so they will render with a single element (to show the types
+// and optionally nested fields). It also ensures that nested messages are not
+// nil by setting them to a message that is also fleshed out as a template
+// message.
+func MakeTemplate(md *desc.MessageDescriptor) proto.Message {
+ return makeTemplate(md, nil)
+}
+
+func makeTemplate(md *desc.MessageDescriptor, path []*desc.MessageDescriptor) proto.Message {
+ switch md.GetFullyQualifiedName() {
+ case "google.protobuf.Any":
+ // empty type URL is not allowed by JSON representation
+ // so we must give it a dummy type
+ msg, _ := ptypes.MarshalAny(&empty.Empty{})
+ return msg
+ case "google.protobuf.Value":
+ // unset kind is not allowed by JSON representation
+ // so we must give it something
+ return &structpb.Value{
+ Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "google.protobuf.Value": {Kind: &structpb.Value_StringValue{
+ StringValue: "supports arbitrary JSON",
+ }},
+ },
+ }},
+ }
+ case "google.protobuf.ListValue":
+ return &structpb.ListValue{
+ Values: []*structpb.Value{
+ {
+ Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "google.protobuf.ListValue": {Kind: &structpb.Value_StringValue{
+ StringValue: "is an array of arbitrary JSON values",
+ }},
+ },
+ }},
+ },
+ },
+ }
+ case "google.protobuf.Struct":
+ return &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "google.protobuf.Struct": {Kind: &structpb.Value_StringValue{
+ StringValue: "supports arbitrary JSON objects",
+ }},
+ },
+ }
+ }
+
+ dm := dynamic.NewMessage(md)
+
+ // if the message is a recursive structure, we don't want to blow the stack
+ for _, seen := range path {
+ if seen == md {
+ // already visited this type; avoid infinite recursion
+ return dm
+ }
+ }
+ path = append(path, dm.GetMessageDescriptor())
+
+ // for repeated fields, add a single element with default value
+ // and for message fields, add a message with all default fields
+ // that also has non-nil message and non-empty repeated fields
+
+ for _, fd := range dm.GetMessageDescriptor().GetFields() {
+ if fd.IsRepeated() {
+ switch fd.GetType() {
+ case descpb.FieldDescriptorProto_TYPE_FIXED32,
+ descpb.FieldDescriptorProto_TYPE_UINT32:
+ dm.AddRepeatedField(fd, uint32(0))
+
+ case descpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descpb.FieldDescriptorProto_TYPE_SINT32,
+ descpb.FieldDescriptorProto_TYPE_INT32,
+ descpb.FieldDescriptorProto_TYPE_ENUM:
+ dm.AddRepeatedField(fd, int32(0))
+
+ case descpb.FieldDescriptorProto_TYPE_FIXED64,
+ descpb.FieldDescriptorProto_TYPE_UINT64:
+ dm.AddRepeatedField(fd, uint64(0))
+
+ case descpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descpb.FieldDescriptorProto_TYPE_SINT64,
+ descpb.FieldDescriptorProto_TYPE_INT64:
+ dm.AddRepeatedField(fd, int64(0))
+
+ case descpb.FieldDescriptorProto_TYPE_STRING:
+ dm.AddRepeatedField(fd, "")
+
+ case descpb.FieldDescriptorProto_TYPE_BYTES:
+ dm.AddRepeatedField(fd, []byte{})
+
+ case descpb.FieldDescriptorProto_TYPE_BOOL:
+ dm.AddRepeatedField(fd, false)
+
+ case descpb.FieldDescriptorProto_TYPE_FLOAT:
+ dm.AddRepeatedField(fd, float32(0))
+
+ case descpb.FieldDescriptorProto_TYPE_DOUBLE:
+ dm.AddRepeatedField(fd, float64(0))
+
+ case descpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descpb.FieldDescriptorProto_TYPE_GROUP:
+ dm.AddRepeatedField(fd, makeTemplate(fd.GetMessageType(), path))
+ }
+ } else if fd.GetMessageType() != nil {
+ dm.SetField(fd, makeTemplate(fd.GetMessageType(), path))
+ }
+ }
+ return dm
+}
+
+// ClientTransportCredentials builds transport credentials for a gRPC client using the
+// given properties. If cacertFile is blank, only standard trusted certs are used to
+// verify the server certs. If clientCertFile is blank, the client will not use a client
+// certificate. If clientCertFile is not blank then clientKeyFile must not be blank.
+func ClientTransportCredentials(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (credentials.TransportCredentials, error) {
+ var tlsConf tls.Config
+
+ if clientCertFile != "" {
+ // Load the client certificates from disk
+ certificate, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not load client key pair: %v", err)
+ }
+ tlsConf.Certificates = []tls.Certificate{certificate}
+ }
+
+ if insecureSkipVerify {
+ tlsConf.InsecureSkipVerify = true
+ } else if cacertFile != "" {
+ // Create a certificate pool from the certificate authority
+ certPool := x509.NewCertPool()
+ ca, err := ioutil.ReadFile(cacertFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read ca certificate: %v", err)
+ }
+
+ // Append the certificates from the CA
+ if ok := certPool.AppendCertsFromPEM(ca); !ok {
+ return nil, errors.New("failed to append ca certs")
+ }
+
+ tlsConf.RootCAs = certPool
+ }
+
+ return credentials.NewTLS(&tlsConf), nil
+}
+
+// ServerTransportCredentials builds transport credentials for a gRPC server using the
+// given properties. If cacertFile is blank, the server will not request client certs
+// unless requireClientCerts is true. When requireClientCerts is false and cacertFile is
+// not blank, the server will verify client certs when presented, but will not require
+// client certs. The serverCertFile and serverKeyFile must both not be blank.
+func ServerTransportCredentials(cacertFile, serverCertFile, serverKeyFile string, requireClientCerts bool) (credentials.TransportCredentials, error) {
+ var tlsConf tls.Config
+ // TODO(jh): Remove this line once https://github.com/golang/go/issues/28779 is fixed
+ // in Go tip. Until then, the recently merged TLS 1.3 support breaks the TLS tests.
+ tlsConf.MaxVersion = tls.VersionTLS12
+
+ // Load the server certificates from disk
+ certificate, err := tls.LoadX509KeyPair(serverCertFile, serverKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not load key pair: %v", err)
+ }
+ tlsConf.Certificates = []tls.Certificate{certificate}
+
+ if cacertFile != "" {
+ // Create a certificate pool from the certificate authority
+ certPool := x509.NewCertPool()
+ ca, err := ioutil.ReadFile(cacertFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read ca certificate: %v", err)
+ }
+
+ // Append the certificates from the CA
+ if ok := certPool.AppendCertsFromPEM(ca); !ok {
+ return nil, errors.New("failed to append ca certs")
+ }
+
+ tlsConf.ClientCAs = certPool
+ }
+
+ if requireClientCerts {
+ tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
+ } else if cacertFile != "" {
+ tlsConf.ClientAuth = tls.VerifyClientCertIfGiven
+ } else {
+ tlsConf.ClientAuth = tls.NoClientCert
+ }
+
+ return credentials.NewTLS(&tlsConf), nil
+}
+
+// BlockingDial is a helper method to dial the given address, using optional TLS credentials,
+// and blocking until the returned connection is ready. If the given credentials are nil, the
+// connection will be insecure (plain-text).
+func BlockingDial(ctx context.Context, network, address string, creds credentials.TransportCredentials, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ // grpc.Dial doesn't provide any information on permanent connection errors (like
+ // TLS handshake failures). So in order to provide good error messages, we need a
+ // custom dialer that can provide that info. That means we manage the TLS handshake.
+ result := make(chan interface{}, 1)
+
+ writeResult := func(res interface{}) {
+ // non-blocking write: we only need the first result
+ select {
+ case result <- res:
+ default:
+ }
+ }
+
+ dialer := func(ctx context.Context, address string) (net.Conn, error) {
+ conn, err := (&net.Dialer{}).DialContext(ctx, network, address)
+ if err != nil {
+ writeResult(err)
+ return nil, err
+ }
+ if creds != nil {
+ conn, _, err = creds.ClientHandshake(ctx, address, conn)
+ if err != nil {
+ writeResult(err)
+ return nil, err
+ }
+ }
+ return conn, nil
+ }
+
+ // Even with grpc.FailOnNonTempDialError, this call will usually timeout in
+ // the face of TLS handshake errors. So we can't rely on grpc.WithBlock() to
+ // know when we're done. So we run it in a goroutine and then use result
+ // channel to either get the channel or fail-fast.
+ go func() {
+ opts = append(opts,
+ grpc.WithBlock(),
+ grpc.FailOnNonTempDialError(true),
+ grpc.WithContextDialer(dialer),
+ grpc.WithInsecure(), // we are handling TLS, so tell grpc not to
+ )
+ conn, err := grpc.DialContext(ctx, address, opts...)
+ var res interface{}
+ if err != nil {
+ res = err
+ } else {
+ res = conn
+ }
+ writeResult(res)
+ }()
+
+ select {
+ case res := <-result:
+ if conn, ok := res.(*grpc.ClientConn); ok {
+ return conn, nil
+ }
+ return nil, res.(error)
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/invoke.go b/vendor/github.com/fullstorydev/grpcurl/invoke.go
new file mode 100644
index 0000000..d2f16cb
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/invoke.go
@@ -0,0 +1,389 @@
+package grpcurl
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/dynamic"
+ "github.com/jhump/protoreflect/dynamic/grpcdynamic"
+ "github.com/jhump/protoreflect/grpcreflect"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// InvocationEventHandler is a bag of callbacks for handling events that occur in the course
+// of invoking an RPC. The handler also provides request data that is sent. The callbacks are
+// generally called in the order they are listed below.
+type InvocationEventHandler interface {
+ // OnResolveMethod is called with a descriptor of the method that is being invoked.
+ OnResolveMethod(*desc.MethodDescriptor)
+ // OnSendHeaders is called with the request metadata that is being sent.
+ OnSendHeaders(metadata.MD)
+ // OnReceiveHeaders is called when response headers have been received.
+ OnReceiveHeaders(metadata.MD)
+ // OnReceiveResponse is called for each response message received.
+ OnReceiveResponse(proto.Message)
+ // OnReceiveTrailers is called when response trailers and final RPC status have been received.
+ OnReceiveTrailers(*status.Status, metadata.MD)
+}
+
+// RequestMessageSupplier is a function that is called to retrieve request
+// messages for a GRPC operation. This type is deprecated and will be removed in
+// a future release.
+//
+// Deprecated: This is only used with the deprecated InvokeRpc. Instead, use
+// RequestSupplier with InvokeRPC.
+type RequestMessageSupplier func() ([]byte, error)
+
+// InvokeRpc uses the given gRPC connection to invoke the given method. This function is deprecated
+// and will be removed in a future release. It just delegates to the similarly named InvokeRPC
+// method, whose signature is only slightly different.
+//
+// Deprecated: use InvokeRPC instead.
+func InvokeRpc(ctx context.Context, source DescriptorSource, cc *grpc.ClientConn, methodName string,
+ headers []string, handler InvocationEventHandler, requestData RequestMessageSupplier) error {
+
+ return InvokeRPC(ctx, source, cc, methodName, headers, handler, func(m proto.Message) error {
+ // New function is almost identical, but the request supplier function works differently.
+ // So we adapt the logic here to maintain compatibility.
+ data, err := requestData()
+ if err != nil {
+ return err
+ }
+ return jsonpb.Unmarshal(bytes.NewReader(data), m)
+ })
+}
+
+// RequestSupplier is a function that is called to populate messages for a gRPC operation. The
+// function should populate the given message or return a non-nil error. If the supplier has no
+// more messages, it should return io.EOF. When it returns io.EOF, it should not in any way
+// modify the given message argument.
+type RequestSupplier func(proto.Message) error
+
+// InvokeRPC uses the given gRPC channel to invoke the given method. The given descriptor source
+// is used to determine the type of method and the type of request and response message. The given
+// headers are sent as request metadata. Methods on the given event handler are called as the
+// invocation proceeds.
+//
+// The given requestData function supplies the actual data to send. It should return io.EOF when
+// there is no more request data. If the method being invoked is a unary or server-streaming RPC
+// (e.g. exactly one request message) and there is no request data (e.g. the first invocation of
+// the function returns io.EOF), then an empty request message is sent.
+//
+// If the requestData function and the given event handler coordinate or share any state, they should
+// be thread-safe. This is because the requestData function may be called from a different goroutine
+// than the one invoking event callbacks. (This only happens for bi-directional streaming RPCs, where
+// one goroutine sends request messages and another consumes the response messages).
+func InvokeRPC(ctx context.Context, source DescriptorSource, ch grpcdynamic.Channel, methodName string,
+ headers []string, handler InvocationEventHandler, requestData RequestSupplier) error {
+
+ md := MetadataFromHeaders(headers)
+
+ svc, mth := parseSymbol(methodName)
+ if svc == "" || mth == "" {
+ return fmt.Errorf("given method name %q is not in expected format: 'service/method' or 'service.method'", methodName)
+ }
+ dsc, err := source.FindSymbol(svc)
+ if err != nil {
+ if isNotFoundError(err) {
+ return fmt.Errorf("target server does not expose service %q", svc)
+ }
+ return fmt.Errorf("failed to query for service descriptor %q: %v", svc, err)
+ }
+ sd, ok := dsc.(*desc.ServiceDescriptor)
+ if !ok {
+ return fmt.Errorf("target server does not expose service %q", svc)
+ }
+ mtd := sd.FindMethodByName(mth)
+ if mtd == nil {
+ return fmt.Errorf("service %q does not include a method named %q", svc, mth)
+ }
+
+ handler.OnResolveMethod(mtd)
+
+ // we also download any applicable extensions so we can provide full support for parsing user-provided data
+ var ext dynamic.ExtensionRegistry
+ alreadyFetched := map[string]bool{}
+ if err = fetchAllExtensions(source, &ext, mtd.GetInputType(), alreadyFetched); err != nil {
+ return fmt.Errorf("error resolving server extensions for message %s: %v", mtd.GetInputType().GetFullyQualifiedName(), err)
+ }
+ if err = fetchAllExtensions(source, &ext, mtd.GetOutputType(), alreadyFetched); err != nil {
+ return fmt.Errorf("error resolving server extensions for message %s: %v", mtd.GetOutputType().GetFullyQualifiedName(), err)
+ }
+
+ msgFactory := dynamic.NewMessageFactoryWithExtensionRegistry(&ext)
+ req := msgFactory.NewMessage(mtd.GetInputType())
+
+ handler.OnSendHeaders(md)
+ ctx = metadata.NewOutgoingContext(ctx, md)
+
+ stub := grpcdynamic.NewStubWithMessageFactory(ch, msgFactory)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ if mtd.IsClientStreaming() && mtd.IsServerStreaming() {
+ return invokeBidi(ctx, stub, mtd, handler, requestData, req)
+ } else if mtd.IsClientStreaming() {
+ return invokeClientStream(ctx, stub, mtd, handler, requestData, req)
+ } else if mtd.IsServerStreaming() {
+ return invokeServerStream(ctx, stub, mtd, handler, requestData, req)
+ } else {
+ return invokeUnary(ctx, stub, mtd, handler, requestData, req)
+ }
+}
+
+func invokeUnary(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ err := requestData(req)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ if err != io.EOF {
+ // verify there is no second message, which is a usage error
+ err := requestData(req)
+ if err == nil {
+ return fmt.Errorf("method %q is a unary RPC, but request data contained more than 1 message", md.GetFullyQualifiedName())
+ } else if err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ }
+
+ // Now we can actually invoke the RPC!
+ var respHeaders metadata.MD
+ var respTrailers metadata.MD
+ resp, err := stub.InvokeRpc(ctx, md, req, grpc.Trailer(&respTrailers), grpc.Header(&respHeaders))
+
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ handler.OnReceiveHeaders(respHeaders)
+
+ if stat.Code() == codes.OK {
+ handler.OnReceiveResponse(resp)
+ }
+
+ handler.OnReceiveTrailers(stat, respTrailers)
+
+ return nil
+}
+
+func invokeClientStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ // invoke the RPC!
+ str, err := stub.InvokeRpcClientStream(ctx, md)
+
+ // Upload each request message in the stream
+ var resp proto.Message
+ for err == nil {
+ err = requestData(req)
+ if err == io.EOF {
+ resp, err = str.CloseAndReceive()
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+
+ err = str.SendMsg(req)
+ if err == io.EOF {
+ // We get EOF on send if the server says "go away"
+ // We have to use CloseAndReceive to get the actual code
+ resp, err = str.CloseAndReceive()
+ break
+ }
+
+ req.Reset()
+ }
+
+ // finally, process response data
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ if respHeaders, err := str.Header(); err == nil {
+ handler.OnReceiveHeaders(respHeaders)
+ }
+
+ if stat.Code() == codes.OK {
+ handler.OnReceiveResponse(resp)
+ }
+
+ handler.OnReceiveTrailers(stat, str.Trailer())
+
+ return nil
+}
+
+func invokeServerStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ err := requestData(req)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ if err != io.EOF {
+ // verify there is no second message, which is a usage error
+ err := requestData(req)
+ if err == nil {
+ return fmt.Errorf("method %q is a server-streaming RPC, but request data contained more than 1 message", md.GetFullyQualifiedName())
+ } else if err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ }
+
+ // Now we can actually invoke the RPC!
+ str, err := stub.InvokeRpcServerStream(ctx, md, req)
+
+ if respHeaders, err := str.Header(); err == nil {
+ handler.OnReceiveHeaders(respHeaders)
+ }
+
+ // Download each response message
+ for err == nil {
+ var resp proto.Message
+ resp, err = str.RecvMsg()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ handler.OnReceiveResponse(resp)
+ }
+
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ handler.OnReceiveTrailers(stat, str.Trailer())
+
+ return nil
+}
+
+func invokeBidi(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // invoke the RPC!
+ str, err := stub.InvokeRpcBidiStream(ctx, md)
+
+ var wg sync.WaitGroup
+ var sendErr atomic.Value
+
+ defer wg.Wait()
+
+ if err == nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ // Concurrently upload each request message in the stream
+ var err error
+ for err == nil {
+ err = requestData(req)
+
+ if err == io.EOF {
+ err = str.CloseSend()
+ break
+ }
+ if err != nil {
+ err = fmt.Errorf("error getting request data: %v", err)
+ cancel()
+ break
+ }
+
+ err = str.SendMsg(req)
+
+ req.Reset()
+ }
+
+ if err != nil {
+ sendErr.Store(err)
+ }
+ }()
+ }
+
+ if respHeaders, err := str.Header(); err == nil {
+ handler.OnReceiveHeaders(respHeaders)
+ }
+
+ // Download each response message
+ for err == nil {
+ var resp proto.Message
+ resp, err = str.RecvMsg()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ handler.OnReceiveResponse(resp)
+ }
+
+ if se, ok := sendErr.Load().(error); ok && se != io.EOF {
+ err = se
+ }
+
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ handler.OnReceiveTrailers(stat, str.Trailer())
+
+ return nil
+}
+
+type notFoundError string
+
+func notFound(kind, name string) error {
+ return notFoundError(fmt.Sprintf("%s not found: %s", kind, name))
+}
+
+func (e notFoundError) Error() string {
+ return string(e)
+}
+
+func isNotFoundError(err error) bool {
+ if grpcreflect.IsElementNotFoundError(err) {
+ return true
+ }
+ _, ok := err.(notFoundError)
+ return ok
+}
+
+func parseSymbol(svcAndMethod string) (string, string) {
+ pos := strings.LastIndex(svcAndMethod, "/")
+ if pos < 0 {
+ pos = strings.LastIndex(svcAndMethod, ".")
+ if pos < 0 {
+ return "", ""
+ }
+ }
+ return svcAndMethod[:pos], svcAndMethod[pos+1:]
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
new file mode 100755
index 0000000..407f7dc
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+set -e
+
+cd "$(dirname $0)"
+
+# Run this script to generate files used by tests.
+
+echo "Creating protosets..."
+protoc testing/test.proto \
+ --include_imports \
+ --descriptor_set_out=testing/test.protoset
+
+protoc testing/example.proto \
+ --include_imports \
+ --descriptor_set_out=testing/example.protoset
+
+echo "Creating certs for TLS testing..."
+if ! hash certstrap 2>/dev/null; then
+ # certstrap not found: try to install it
+ go get github.com/square/certstrap
+ go install github.com/square/certstrap
+fi
+
+function cs() {
+ certstrap --depot-path testing/tls "$@" --passphrase ""
+}
+
+rm -rf testing/tls
+
+# Create CA
+cs init --years 10 --common-name ca
+
+# Create client cert
+cs request-cert --common-name client
+cs sign client --years 10 --CA ca
+
+# Create server cert
+cs request-cert --common-name server --ip 127.0.0.1 --domain localhost
+cs sign server --years 10 --CA ca
+
+# Create another server cert for error testing
+cs request-cert --common-name other --ip 1.2.3.4 --domain foobar.com
+cs sign other --years 10 --CA ca
+
+# Create another CA and client cert for more
+# error testing
+cs init --years 10 --common-name wrong-ca
+cs request-cert --common-name wrong-client
+cs sign wrong-client --years 10 --CA wrong-ca
+
+# Create expired cert
+cs request-cert --common-name expired --ip 127.0.0.1 --domain localhost
+cs sign expired --years 0 --CA ca