[VOL-4442] grpc streaming connection monitoring

Change-Id: I435a03fdc0ac2b549dc4512220148cb19c16db19
diff --git a/cmd/openolt-adapter/main.go b/cmd/openolt-adapter/main.go
index 434fc83..cffd2ab 100644
--- a/cmd/openolt-adapter/main.go
+++ b/cmd/openolt-adapter/main.go
@@ -38,10 +38,8 @@
 	"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
 	ac "github.com/opencord/voltha-openolt-adapter/internal/pkg/core"
 	"github.com/opencord/voltha-protos/v5/go/adapter_service"
-	"github.com/opencord/voltha-protos/v5/go/common"
 	ca "github.com/opencord/voltha-protos/v5/go/core_adapter"
 	"github.com/opencord/voltha-protos/v5/go/core_service"
-	"github.com/opencord/voltha-protos/v5/go/health"
 	"github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service"
 	"github.com/opencord/voltha-protos/v5/go/voltha"
 	"google.golang.org/grpc"
@@ -55,16 +53,17 @@
 )
 
 type adapter struct {
-	instanceID  string
-	config      *config.AdapterFlags
-	grpcServer  *vgrpc.GrpcServer
-	oltAdapter  *ac.OpenOLT
-	kafkaClient kafka.Client
-	kvClient    kvstore.Client
-	coreClient  *vgrpc.Client
-	eventProxy  eventif.EventProxy
-	halted      bool
-	exitChannel chan int
+	instanceID      string
+	config          *config.AdapterFlags
+	grpcServer      *vgrpc.GrpcServer
+	oltAdapter      *ac.OpenOLT
+	oltInterAdapter *ac.OpenOLTInterAdapter
+	kafkaClient     kafka.Client
+	kvClient        kvstore.Client
+	coreClient      *vgrpc.Client
+	eventProxy      eventif.EventProxy
+	halted          bool
+	exitChannel     chan int
 }
 
 func newAdapter(cf *config.AdapterFlags) *adapter {
@@ -133,17 +132,23 @@
 	if a.coreClient, err = vgrpc.NewClient(
 		a.config.AdapterEndpoint,
 		a.config.CoreEndpoint,
+		"core_service.CoreService",
 		a.coreRestarted); err != nil {
 		logger.Fatal(ctx, "grpc-client-not-created")
 	}
 	// Start the core grpc client
-	go a.coreClient.Start(ctx, setAndTestCoreServiceHandler)
+	go a.coreClient.Start(ctx, getCoreServiceClientHandler)
 
 	// Create the open OLT adapter
 	if a.oltAdapter, err = a.startOpenOLT(ctx, a.coreClient, a.eventProxy, a.config, cm); err != nil {
 		logger.Fatalw(ctx, "error-starting-openolt", log.Fields{"error": err})
 	}
 
+	// Create the open OLT Inter adapter adapter
+	if a.oltInterAdapter, err = a.startOpenOLTInterAdapter(ctx, a.oltAdapter); err != nil {
+		logger.Fatalw(ctx, "error-starting-openolt-inter-adapter", log.Fields{"error": err})
+	}
+
 	// Create and start the grpc server
 	a.grpcServer = vgrpc.NewGrpcServer(a.config.GrpcAddress, nil, false, p)
 
@@ -151,7 +156,7 @@
 	a.addAdapterService(ctx, a.grpcServer, a.oltAdapter)
 
 	//Register the olt inter-adapter  service
-	a.addOltInterAdapterService(ctx, a.grpcServer, a.oltAdapter)
+	a.addOltInterAdapterService(ctx, a.grpcServer, a.oltInterAdapter)
 
 	// Start the grpc server
 	go a.startGRPCService(ctx, a.grpcServer, oltAdapterService)
@@ -171,13 +176,12 @@
 	return nil
 }
 
-// setAndTestCoreServiceHandler is used to test whether the remote gRPC service is up
-func setAndTestCoreServiceHandler(ctx context.Context, conn *grpc.ClientConn, clientConn *common.Connection) interface{} {
-	svc := core_service.NewCoreServiceClient(conn)
-	if h, err := svc.GetHealthStatus(ctx, clientConn); err != nil || h.State != health.HealthStatus_HEALTHY {
+// getCoreServiceClientHandler is used to test whether the remote gRPC service is up
+func getCoreServiceClientHandler(ctx context.Context, conn *grpc.ClientConn) interface{} {
+	if conn == nil {
 		return nil
 	}
-	return svc
+	return core_service.NewCoreServiceClient(conn)
 }
 
 /**
@@ -239,6 +243,14 @@
 	// send exit signal
 	a.exitChannel <- 0
 
+	// Stop all grpc processing
+	if err := a.oltAdapter.Stop(ctx); err != nil {
+		logger.Errorw(ctx, "failure-stopping-olt-adapter-service", log.Fields{"error": err, "adapter": a.config.AdapterName})
+	}
+	if err := a.oltInterAdapter.Stop(ctx); err != nil {
+		logger.Errorw(ctx, "failure-stopping-olt-inter-adapter-service", log.Fields{"error": err, "adapter": a.config.AdapterName})
+	}
+
 	// Cleanup - applies only if we had a kvClient
 	if a.kvClient != nil {
 		// Release all reservations
@@ -346,6 +358,19 @@
 	return sOLT, nil
 }
 
+func (a *adapter) startOpenOLTInterAdapter(ctx context.Context, oo *ac.OpenOLT) (*ac.OpenOLTInterAdapter, error) {
+	logger.Info(ctx, "starting-open-olt-inter-adapter")
+	var err error
+	sOLTInterAdapter := ac.NewOpenOLTInterAdapter(oo)
+
+	if err = sOLTInterAdapter.Start(ctx); err != nil {
+		return nil, err
+	}
+
+	logger.Info(ctx, "open-olt-inter-adapter-started")
+	return sOLTInterAdapter, nil
+}
+
 func (a *adapter) registerWithCore(ctx context.Context, serviceName string, retries int) error {
 	adapterID := fmt.Sprintf("openolt_%d", a.config.CurrentReplica)
 	logger.Infow(ctx, "registering-with-core", log.Fields{
diff --git a/cmd/openolt-adapter/main_test.go b/cmd/openolt-adapter/main_test.go
index a70949d..5c470ac 100644
--- a/cmd/openolt-adapter/main_test.go
+++ b/cmd/openolt-adapter/main_test.go
@@ -86,11 +86,12 @@
 	if ad.coreClient, err = vgrpc.NewClient(
 		"olt-endpoint",
 		ms.ApiEndpoint,
+		"core_service.CoreService",
 		ad.coreRestarted); err != nil {
 		t.Errorf("grpc client: expected error:nil, got error: %v", err)
 	}
 	// Start the core grpc client
-	go ad.coreClient.Start(ctx, setAndTestCoreServiceHandler)
+	go ad.coreClient.Start(ctx, getCoreServiceClientHandler)
 	defer ad.coreClient.Stop(ctx)
 	err = ad.registerWithCore(ctx, coreService, 1)
 	if err != nil {
diff --git a/go.mod b/go.mod
index f5922e6..9ca9f25 100644
--- a/go.mod
+++ b/go.mod
@@ -13,9 +13,9 @@
 	github.com/gogo/protobuf v1.3.2
 	github.com/golang/protobuf v1.5.2
 	github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
-	github.com/opencord/voltha-lib-go/v7 v7.1.3
-	github.com/opencord/voltha-protos/v5 v5.2.1
+	github.com/opencord/voltha-lib-go/v7 v7.1.5
+	github.com/opencord/voltha-protos/v5 v5.2.2
 	github.com/stretchr/testify v1.7.0
 	go.etcd.io/etcd v3.3.25+incompatible
-	google.golang.org/grpc v1.42.0
+	google.golang.org/grpc v1.44.0
 )
diff --git a/go.sum b/go.sum
index a947090..6e28928 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,6 @@
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I=
 github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
@@ -92,6 +93,7 @@
 github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
 github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
@@ -105,12 +107,15 @@
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
 github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
 github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
 github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
 github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
@@ -136,6 +141,8 @@
 github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
 github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
 github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/jhump/protoreflect v1.10.2 h1:Qc3VJs63tedVbKAI+nT56poeo81VC/Qz1erp5No5ROU=
+github.com/jhump/protoreflect v1.10.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
 github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
 github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -172,6 +179,7 @@
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
 github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
 github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -183,11 +191,10 @@
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI=
 github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/opencord/voltha-lib-go/v7 v7.1.3 h1:K6bnHg9N/Eg+P+qLqtuZ4GhOOVxZJ14qj9RZYujjEBw=
-github.com/opencord/voltha-lib-go/v7 v7.1.3/go.mod h1:tdAFZ7N/rRg3ScnVM/15KkUZN60kTYPGU+uiRMpjTfY=
-github.com/opencord/voltha-protos/v5 v5.1.2/go.mod h1:dvz08S+JLdfjhFtd3Rf38DLeD9jQ6JO1+0K3NTdheRo=
-github.com/opencord/voltha-protos/v5 v5.2.1 h1:ijIn+7Jl7vJ/m/f1mtlGvXAN7gps2k2DroeCN2gXUSM=
-github.com/opencord/voltha-protos/v5 v5.2.1/go.mod h1:dvz08S+JLdfjhFtd3Rf38DLeD9jQ6JO1+0K3NTdheRo=
+github.com/opencord/voltha-lib-go/v7 v7.1.5 h1:IdNDzcA8V8LXHVm/2dG9QJ1IqFO+kM1aCAC4By+3MEU=
+github.com/opencord/voltha-lib-go/v7 v7.1.5/go.mod h1:lnwlFfhDVMBg2siCv1CajB1fvfAU9Cs8VbB64LQ8zVg=
+github.com/opencord/voltha-protos/v5 v5.2.2 h1:1Bcgl+Fmp00ZxlDrHZdcbjpMgOwX6TnZmOTrYm9SbR8=
+github.com/opencord/voltha-protos/v5 v5.2.2/go.mod h1:ZGcyW79kQKIo7AySo1LRu613E6uiozixrCF0yNB/4x8=
 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
 github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
 github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -223,6 +230,7 @@
 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
@@ -250,6 +258,7 @@
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
@@ -289,6 +298,7 @@
 golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
 golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
 golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -322,6 +332,7 @@
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -368,7 +379,10 @@
 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
 golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs=
@@ -386,8 +400,9 @@
 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20211207154714-918901c715cf h1:PSEM+IQFb9xdsj2CGhfqUTfsZvF8DScCVP1QZb2IiTQ=
-google.golang.org/genproto v0.0.0-20211207154714-918901c715cf/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068 h1:pwzFiZfBTH/GjBWz1BcDwMBaHBo8mZvpLa7eBKJpFAk=
+google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
 google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
 google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -395,7 +410,10 @@
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
 google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
 google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
@@ -407,6 +425,7 @@
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@@ -424,6 +443,7 @@
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
 sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
 sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/internal/pkg/core/device_handler.go b/internal/pkg/core/device_handler.go
index 0e46594..8248278 100644
--- a/internal/pkg/core/device_handler.go
+++ b/internal/pkg/core/device_handler.go
@@ -51,7 +51,6 @@
 	"github.com/opencord/voltha-protos/v5/go/common"
 	ca "github.com/opencord/voltha-protos/v5/go/core_adapter"
 	"github.com/opencord/voltha-protos/v5/go/extension"
-	"github.com/opencord/voltha-protos/v5/go/health"
 	ia "github.com/opencord/voltha-protos/v5/go/inter_adapter"
 	"github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service"
 	of "github.com/opencord/voltha-protos/v5/go/openflow_13"
@@ -85,7 +84,7 @@
 	lockChildAdapterClients sync.RWMutex
 	EventProxy              eventif.EventProxy
 	openOLT                 *OpenOLT
-	exitChannel             chan int
+	exitChannel             chan struct{}
 	lockDevice              sync.RWMutex
 	Client                  oop.OpenoltClient
 	transitionMap           *TransitionMap
@@ -192,7 +191,7 @@
 	cloned := (proto.Clone(device)).(*voltha.Device)
 	dh.device = cloned
 	dh.openOLT = adapter
-	dh.exitChannel = make(chan int, 1) // TODO: Why buffered?
+	dh.exitChannel = make(chan struct{})
 	dh.lockDevice = sync.RWMutex{}
 	dh.stopCollector = make(chan bool, 2)      // TODO: Why buffered?
 	dh.stopHeartbeatCheck = make(chan bool, 2) // TODO: Why buffered?
@@ -229,13 +228,15 @@
 	logger.Debug(ctx, "device-agent-started")
 }
 
-// stop stops the device dh.  Not much to do for now
-func (dh *DeviceHandler) stop(ctx context.Context) {
+// Stop stops the device handler
+func (dh *DeviceHandler) Stop(ctx context.Context) {
 	dh.lockDevice.Lock()
 	defer dh.lockDevice.Unlock()
 	logger.Debug(ctx, "stopping-device-agent")
-	dh.exitChannel <- 1
+	close(dh.exitChannel)
 
+	// Delete (which will stop also) all grpc connections to the child adapters
+	dh.deleteAdapterClients(ctx)
 	logger.Debug(ctx, "device-agent-stopped")
 }
 
@@ -3219,11 +3220,13 @@
 	if dh.childAdapterClients[endpoint], err = vgrpc.NewClient(
 		dh.cfg.AdapterEndpoint,
 		endpoint,
-		dh.onuAdapterRestarted); err != nil {
+		"onu_inter_adapter_service.OnuInterAdapterService",
+		dh.onuInterAdapterRestarted,
+		vgrpc.ClientContextData(dh.device.Id)); err != nil {
 		logger.Errorw(ctx, "grpc-client-not-created", log.Fields{"error": err, "endpoint": endpoint})
 		return err
 	}
-	go dh.childAdapterClients[endpoint].Start(log.WithSpanFromContext(context.TODO(), ctx), dh.setAndTestOnuInterAdapterServiceHandler)
+	go dh.childAdapterClients[endpoint].Start(log.WithSpanFromContext(context.TODO(), ctx), dh.getOnuInterAdapterServiceClientHandler)
 
 	// Wait until we have a connection to the child adapter.
 	// Unlimited retries or until context expires
@@ -3281,30 +3284,18 @@
 	}
 }
 
-// TODO:  Any action the adapter needs to do following a onu adapter restart?
-func (dh *DeviceHandler) onuAdapterRestarted(ctx context.Context, endPoint string) error {
-	logger.Warnw(ctx, "onu-adapter-reconnected", log.Fields{"endpoint": endPoint})
+// TODO:  Any action the adapter needs to do following a onu adapter inter adapter service restart?
+func (dh *DeviceHandler) onuInterAdapterRestarted(ctx context.Context, endPoint string) error {
+	logger.Warnw(ctx, "onu-inter-adapter-service-reconnected", log.Fields{"endpoint": endPoint})
 	return nil
 }
 
-// setAndTestOnuInterAdapterServiceHandler is used to test whether the remote gRPC service is up
-func (dh *DeviceHandler) setAndTestOnuInterAdapterServiceHandler(ctx context.Context, conn *grpc.ClientConn, clientConn *common.Connection) interface{} {
-	// The onu adapter needs to know whether the olt adapter can connect to it.   Since the olt adapter
-	// has a grpc client connection per device handler (i.e. per olt device) to the onu adapter
-	// then the onu adapter needs to know whether that specific client can connect to it. Because the
-	// client uses a polling mechanism then not all grpc clients could be connected at the same time,
-	// a maximum difference of 5 sec.  We therefore add the parent device as additional contextual information
-	// to this request.
-	dh.lockDevice.RLock()
-	if dh.device != nil {
-		clientConn.ContextInfo = dh.device.Id
-	}
-	dh.lockDevice.RUnlock()
-	svc := onu_inter_adapter_service.NewOnuInterAdapterServiceClient(conn)
-	if h, err := svc.GetHealthStatus(ctx, clientConn); err != nil || h.State != health.HealthStatus_HEALTHY {
+// getOnuInterAdapterServiceClientHandler is used to setup the remote gRPC service
+func (dh *DeviceHandler) getOnuInterAdapterServiceClientHandler(ctx context.Context, conn *grpc.ClientConn) interface{} {
+	if conn == nil {
 		return nil
 	}
-	return svc
+	return onu_inter_adapter_service.NewOnuInterAdapterServiceClient(conn)
 }
 
 func (dh *DeviceHandler) setDeviceDeletionInProgressFlag(flag bool) {
diff --git a/internal/pkg/core/device_handler_test.go b/internal/pkg/core/device_handler_test.go
index ea40a5c..8737ee2 100644
--- a/internal/pkg/core/device_handler_test.go
+++ b/internal/pkg/core/device_handler_test.go
@@ -1100,10 +1100,10 @@
 	dh := newMockDeviceHandler()
 	dh1 := negativeDeviceHandler()
 	dh.start(context.Background())
-	dh.stop(context.Background())
+	dh.Stop(context.Background())
 
 	dh1.start(context.Background())
-	dh1.stop(context.Background())
+	dh1.Stop(context.Background())
 
 }
 
diff --git a/internal/pkg/core/openolt.go b/internal/pkg/core/openolt.go
index 3665cbd..a483111 100644
--- a/internal/pkg/core/openolt.go
+++ b/internal/pkg/core/openolt.go
@@ -20,6 +20,7 @@
 import (
 	"context"
 	"errors"
+	"fmt"
 	"sync"
 	"time"
 
@@ -30,6 +31,7 @@
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 	"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
 	"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
+	"github.com/opencord/voltha-protos/v5/go/adapter_service"
 	"github.com/opencord/voltha-protos/v5/go/common"
 	ca "github.com/opencord/voltha-protos/v5/go/core_adapter"
 	"github.com/opencord/voltha-protos/v5/go/extension"
@@ -49,7 +51,7 @@
 	numOnus                     int
 	KVStoreAddress              string
 	KVStoreType                 string
-	exitChannel                 chan int
+	exitChannel                 chan struct{}
 	HeartbeatCheckInterval      time.Duration
 	HeartbeatFailReportInterval time.Duration
 	GrpcTimeoutInterval         time.Duration
@@ -64,7 +66,7 @@
 	coreClient *vgrpc.Client,
 	eventProxy eventif.EventProxy, cfg *config.AdapterFlags, cm *conf.ConfigManager) *OpenOLT {
 	var openOLT OpenOLT
-	openOLT.exitChannel = make(chan int, 1)
+	openOLT.exitChannel = make(chan struct{})
 	openOLT.deviceHandlers = make(map[string]*DeviceHandler)
 	openOLT.config = cfg
 	openOLT.numOnus = cfg.OnuNumber
@@ -93,7 +95,15 @@
 //Stop terminates the session
 func (oo *OpenOLT) Stop(ctx context.Context) error {
 	logger.Info(ctx, "stopping-device-manager")
-	oo.exitChannel <- 1
+	close(oo.exitChannel)
+	// Stop the device handlers
+	oo.stopAllDeviceHandlers(ctx)
+
+	// Stop the core grpc client connection
+	if oo.coreClient != nil {
+		oo.coreClient.Stop(ctx)
+	}
+
 	logger.Info(ctx, "device-manager-stopped")
 	return nil
 }
@@ -121,9 +131,12 @@
 	return nil
 }
 
-// GetHealthStatus is used as a service readiness validation as a grpc connection
-func (oo *OpenOLT) GetHealthStatus(ctx context.Context, clientConn *common.Connection) (*health.HealthStatus, error) {
-	return &health.HealthStatus{State: health.HealthStatus_HEALTHY}, nil
+func (oo *OpenOLT) stopAllDeviceHandlers(ctx context.Context) {
+	oo.lockDeviceHandlersMap.Lock()
+	defer oo.lockDeviceHandlersMap.Unlock()
+	for _, handler := range oo.deviceHandlers {
+		handler.Stop(ctx)
+	}
 }
 
 // AdoptDevice creates a new device handler if not present already and then adopts the device
@@ -413,6 +426,50 @@
 
 }
 
+// GetHealthStatus is used by a OltAdapterService client to detect a connection
+// lost with the gRPC server hosting the OltAdapterService service
+func (oo *OpenOLT) GetHealthStatus(stream adapter_service.AdapterService_GetHealthStatusServer) error {
+	ctx := context.Background()
+	logger.Debugw(ctx, "receive-stream-connection", log.Fields{"stream": stream})
+
+	if stream == nil {
+		return fmt.Errorf("conn-is-nil %v", stream)
+	}
+	initialRequestTime := time.Now()
+	var remoteClient *common.Connection
+	var tempClient *common.Connection
+	var err error
+loop:
+	for {
+		tempClient, err = stream.Recv()
+		if err != nil {
+			logger.Warnw(ctx, "received-stream-error", log.Fields{"remote-client": remoteClient, "error": err})
+			break loop
+		}
+		// Send a response back
+		err = stream.Send(&health.HealthStatus{State: health.HealthStatus_HEALTHY})
+		if err != nil {
+			logger.Warnw(ctx, "sending-stream-error", log.Fields{"remote-client": remoteClient, "error": err})
+			break loop
+		}
+
+		remoteClient = tempClient
+		logger.Debugw(ctx, "received-keep-alive", log.Fields{"remote-client": remoteClient})
+
+		select {
+		case <-stream.Context().Done():
+			logger.Infow(ctx, "stream-keep-alive-context-done", log.Fields{"remote-client": remoteClient, "error": stream.Context().Err()})
+			break loop
+		case <-oo.exitChannel:
+			logger.Warnw(ctx, "received-stop", log.Fields{"remote-client": remoteClient, "initial-conn-time": initialRequestTime})
+			break loop
+		default:
+		}
+	}
+	logger.Errorw(ctx, "connection-down", log.Fields{"remote-client": remoteClient, "error": err, "initial-conn-time": initialRequestTime})
+	return err
+}
+
 /*
  *
  * Unimplemented APIs
diff --git a/internal/pkg/core/openoltInterAdapter.go b/internal/pkg/core/openoltInterAdapter.go
new file mode 100644
index 0000000..c8fb8bd
--- /dev/null
+++ b/internal/pkg/core/openoltInterAdapter.go
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2022-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package core provides the utility for olt devices, flows and statistics
+package core
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-protos/v5/go/common"
+	"github.com/opencord/voltha-protos/v5/go/health"
+	ia "github.com/opencord/voltha-protos/v5/go/inter_adapter"
+	oltia "github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service"
+)
+
+//OpenOLTInterAdapter structure holds a reference to the oltAdapter
+type OpenOLTInterAdapter struct {
+	oltAdapter  *OpenOLT
+	exitChannel chan struct{}
+}
+
+//NewOpenOLTInterAdapter returns a new instance of OpenOLTInterAdapter
+func NewOpenOLTInterAdapter(oltAdapter *OpenOLT) *OpenOLTInterAdapter {
+	return &OpenOLTInterAdapter{oltAdapter: oltAdapter, exitChannel: make(chan struct{})}
+}
+
+//Start starts (logs) the device manager
+func (oo *OpenOLTInterAdapter) Start(ctx context.Context) error {
+	return nil
+}
+
+//Stop terminates the session
+func (oo *OpenOLTInterAdapter) Stop(ctx context.Context) error {
+	close(oo.exitChannel)
+	return nil
+}
+
+// ProxyOmciRequest proxies an OMCI request from the child adapter
+func (oo *OpenOLTInterAdapter) ProxyOmciRequest(ctx context.Context, request *ia.OmciMessage) (*empty.Empty, error) {
+	return oo.oltAdapter.ProxyOmciRequest(ctx, request)
+}
+
+// ProxyOmciRequests proxies an OMCI request from the child adapter
+func (oo *OpenOLTInterAdapter) ProxyOmciRequests(ctx context.Context, request *ia.OmciMessages) (*empty.Empty, error) {
+	return oo.oltAdapter.ProxyOmciRequests(ctx, request)
+}
+
+// GetTechProfileInstance returns an instance of a tech profile
+func (oo *OpenOLTInterAdapter) GetTechProfileInstance(ctx context.Context, request *ia.TechProfileInstanceRequestMessage) (*ia.TechProfileDownloadMessage, error) {
+	return oo.oltAdapter.GetTechProfileInstance(ctx, request)
+}
+
+// GetHealthStatus is used by a OltInterAdapterService client to detect a connection
+// lost with the gRPC server hosting the OltInterAdapterService service
+func (oo *OpenOLTInterAdapter) GetHealthStatus(stream oltia.OltInterAdapterService_GetHealthStatusServer) error {
+	ctx := context.Background()
+	logger.Debugw(ctx, "receive-stream-connection", log.Fields{"stream": stream})
+
+	if stream == nil {
+		return fmt.Errorf("conn-is-nil %v", stream)
+	}
+	initialRequestTime := time.Now()
+	var remoteClient *common.Connection
+	var tempClient *common.Connection
+	var err error
+loop:
+	for {
+		tempClient, err = stream.Recv()
+		if err != nil {
+			logger.Warnw(ctx, "received-stream-error", log.Fields{"remote-client": remoteClient, "error": err})
+			break loop
+		}
+		err = stream.Send(&health.HealthStatus{State: health.HealthStatus_HEALTHY})
+		if err != nil {
+			logger.Warnw(ctx, "sending-stream-error", log.Fields{"remote-client": remoteClient, "error": err})
+			break loop
+		}
+
+		remoteClient = tempClient
+		logger.Debugw(ctx, "received-keep-alive", log.Fields{"remote-client": remoteClient})
+
+		select {
+		case <-stream.Context().Done():
+			logger.Infow(ctx, "stream-keep-alive-context-done", log.Fields{"remote-client": remoteClient, "error": stream.Context().Err()})
+			break loop
+		case <-oo.exitChannel:
+			logger.Warnw(ctx, "received-stop", log.Fields{"remote-client": remoteClient, "initial-conn-time": initialRequestTime})
+			break loop
+		default:
+		}
+	}
+	logger.Errorw(ctx, "connection-down", log.Fields{"remote-client": remoteClient, "error": err, "initial-conn-time": initialRequestTime})
+	return err
+}
diff --git a/internal/pkg/core/openolt_test.go b/internal/pkg/core/openolt_test.go
index c549d44..f909128 100644
--- a/internal/pkg/core/openolt_test.go
+++ b/internal/pkg/core/openolt_test.go
@@ -50,7 +50,7 @@
 	numOnus        int
 	KVStoreAddress string
 	KVStoreType    string
-	exitChannel    chan int
+	exitChannel    chan struct{}
 	ctx            context.Context
 }
 
@@ -543,7 +543,7 @@
 		args    args
 		wantErr error
 	}{
-		{"stop-1", &fields{exitChannel: make(chan int, 1)}, args{}, errors.New("stop error")},
+		{"stop-1", &fields{exitChannel: make(chan struct{})}, args{}, errors.New("stop error")},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
diff --git a/pkg/mocks/mockCoreClient.go b/pkg/mocks/mockCoreClient.go
index 28a4324..1825f5e 100644
--- a/pkg/mocks/mockCoreClient.go
+++ b/pkg/mocks/mockCoreClient.go
@@ -27,14 +27,14 @@
 	vgrpc "github.com/opencord/voltha-lib-go/v7/pkg/grpc"
 	"github.com/opencord/voltha-protos/v5/go/common"
 	ca "github.com/opencord/voltha-protos/v5/go/core_adapter"
-	"github.com/opencord/voltha-protos/v5/go/health"
+	"github.com/opencord/voltha-protos/v5/go/core_service"
 	"github.com/opencord/voltha-protos/v5/go/voltha"
 	"google.golang.org/grpc"
 )
 
 // NewMockCoreClient creates a new mock core client for a given core service
 func NewMockCoreClient(coreService *MockCoreService) *vgrpc.Client {
-	cc, _ := vgrpc.NewClient("mock-local-endpoint", "mock-remote-endpoint", nil)
+	cc, _ := vgrpc.NewClient("mock-core-endpoint", "mock-server-endpoint", "core_service.CoreService", nil)
 	cc.SetService(coreService)
 	return cc
 }
@@ -47,11 +47,6 @@
 	DevicePorts map[string][]*voltha.Port
 }
 
-// GetHealthStatus implements mock GetHealthStatus
-func (mcs MockCoreService) GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error) {
-	return &health.HealthStatus{State: health.HealthStatus_HEALTHY}, nil
-}
-
 // RegisterAdapter implements mock RegisterAdapter
 func (mcs MockCoreService) RegisterAdapter(ctx context.Context, in *ca.AdapterRegistration, opts ...grpc.CallOption) (*empty.Empty, error) {
 	if ctx == nil || in.Adapter == nil || in.DTypes == nil {
@@ -232,6 +227,11 @@
 	return &empty.Empty{}, nil
 }
 
+// GetHealthStatus implements mock GetHealthStatus
+func (mcs MockCoreService) GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (core_service.CoreService_GetHealthStatusClient, error) {
+	return nil, nil
+}
+
 // Additional API found in the Core - unused?
 
 // ReconcileChildDevices implements mock ReconcileChildDevices
diff --git a/pkg/mocks/mockOnuInterAdapterClient.go b/pkg/mocks/mockOnuInterAdapterClient.go
index 2a751d7..dbd6e7d 100644
--- a/pkg/mocks/mockOnuInterAdapterClient.go
+++ b/pkg/mocks/mockOnuInterAdapterClient.go
@@ -22,15 +22,14 @@
 
 	"github.com/golang/protobuf/ptypes/empty"
 	vgrpc "github.com/opencord/voltha-lib-go/v7/pkg/grpc"
-	"github.com/opencord/voltha-protos/v5/go/common"
-	"github.com/opencord/voltha-protos/v5/go/health"
 	ia "github.com/opencord/voltha-protos/v5/go/inter_adapter"
+	"github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service"
 	"google.golang.org/grpc"
 )
 
 // NewMockChildAdapterClient create a mock child adapter client
 func NewMockChildAdapterClient(srv *MockOnuInterAdapterService) *vgrpc.Client {
-	cc, _ := vgrpc.NewClient("mock-local-endpoint", "mock-remote-endpoint", nil)
+	cc, _ := vgrpc.NewClient("mock-child-endpoint", "mock-server-endpoint", "OnuInterAdapterService", nil)
 	cc.SetService(srv)
 	return cc
 }
@@ -39,11 +38,6 @@
 type MockOnuInterAdapterService struct {
 }
 
-// GetHealthStatus implements mock GetHealthStatus
-func (mos MockOnuInterAdapterService) GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error) {
-	return &health.HealthStatus{State: health.HealthStatus_HEALTHY}, nil
-}
-
 // OnuIndication implements mock OnuIndication
 func (mos *MockOnuInterAdapterService) OnuIndication(ctx context.Context, in *ia.OnuIndicationMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
 	return &empty.Empty{}, nil
@@ -68,3 +62,8 @@
 func (mos *MockOnuInterAdapterService) DeleteTCont(ctx context.Context, in *ia.DeleteTcontMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
 	return &empty.Empty{}, nil
 }
+
+// GetHealthStatus implements mock GetHealthStatus
+func (mos MockOnuInterAdapterService) GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (onu_inter_adapter_service.OnuInterAdapterService_GetHealthStatusServer, error) {
+	return nil, nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..8d82abe
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,78 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/struct/struct.proto
+
+package structpb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/struct.proto.
+
+type NullValue = structpb.NullValue
+
+const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE
+
+var NullValue_name = structpb.NullValue_name
+var NullValue_value = structpb.NullValue_value
+
+type Struct = structpb.Struct
+type Value = structpb.Value
+type Value_NullValue = structpb.Value_NullValue
+type Value_NumberValue = structpb.Value_NumberValue
+type Value_StringValue = structpb.Value_StringValue
+type Value_BoolValue = structpb.Value_BoolValue
+type Value_StructValue = structpb.Value_StructValue
+type Value_ListValue = structpb.Value_ListValue
+type ListValue = structpb.ListValue
+
+var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{
+	0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+	0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() }
+func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File
+	file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 0000000..cc40f27
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,71 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
+
+package wrappers
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/wrappers.proto.
+
+type DoubleValue = wrapperspb.DoubleValue
+type FloatValue = wrapperspb.FloatValue
+type Int64Value = wrapperspb.Int64Value
+type UInt64Value = wrapperspb.UInt64Value
+type Int32Value = wrapperspb.Int32Value
+type UInt32Value = wrapperspb.UInt32Value
+type BoolValue = wrapperspb.BoolValue
+type StringValue = wrapperspb.StringValue
+type BytesValue = wrapperspb.BytesValue
+
+var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
+	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
+	0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
+	0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+	0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+	0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
+func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
+	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go
new file mode 100644
index 0000000..b6f4ed0
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/codec.go
@@ -0,0 +1,217 @@
+package codec
+
+import (
+	"io"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/jhump/protoreflect/internal/codec"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = codec.ErrOverflow
+
+// ErrBadWireType is returned when decoding a wire-type from a buffer that
+// is not valid.
+var ErrBadWireType = codec.ErrBadWireType
+
+// NB: much of the implementation is in an internal package, to avoid an import
+// cycle between this codec package and the desc package. We export it from
+// this package, but we can't use a type alias because we also need to add
+// methods to it, to broaden the exposed API.
+
+// Buffer is a reader and a writer that wraps a slice of bytes and also
+// provides API for decoding and encoding the protobuf binary format.
+//
+// Its operation is similar to that of a bytes.Buffer: writing pushes
+// data to the end of the buffer while reading pops data from the head
+// of the buffer. So the same buffer can be used to both read and write.
+type Buffer codec.Buffer
+
+// NewBuffer creates a new buffer with the given slice of bytes as the
+// buffer's initial contents.
+func NewBuffer(buf []byte) *Buffer {
+	return (*Buffer)(codec.NewBuffer(buf))
+}
+
+// SetDeterministic sets this buffer to encode messages deterministically. This
+// is useful for tests. But the overhead is non-zero, so it should not likely be
+// used outside of tests. When true, map fields in a message must have their
+// keys sorted before serialization to ensure deterministic output. Otherwise,
+// values in a map field will be serialized in map iteration order.
+func (cb *Buffer) SetDeterministic(deterministic bool) {
+	(*codec.Buffer)(cb).SetDeterministic(deterministic)
+}
+
+// IsDeterministic returns whether or not this buffer is configured to encode
+// messages deterministically.
+func (cb *Buffer) IsDeterministic() bool {
+	return (*codec.Buffer)(cb).IsDeterministic()
+}
+
+// Reset resets this buffer back to empty. Any subsequent writes/encodes
+// to the buffer will allocate a new backing slice of bytes.
+func (cb *Buffer) Reset() {
+	(*codec.Buffer)(cb).Reset()
+}
+
+// Bytes returns the slice of bytes remaining in the buffer. Note that
+// this does not perform a copy: if the contents of the returned slice
+// are modified, the modifications will be visible to subsequent reads
+// via the buffer.
+func (cb *Buffer) Bytes() []byte {
+	return (*codec.Buffer)(cb).Bytes()
+}
+
+// String returns the remaining bytes in the buffer as a string.
+func (cb *Buffer) String() string {
+	return (*codec.Buffer)(cb).String()
+}
+
+// EOF returns true if there are no more bytes remaining to read.
+func (cb *Buffer) EOF() bool {
+	return (*codec.Buffer)(cb).EOF()
+}
+
+// Skip attempts to skip the given number of bytes in the input. If
+// the input has fewer bytes than the given count, io.ErrUnexpectedEOF
+// is returned and the buffer is unchanged. Otherwise, the given number
+// of bytes are skipped and nil is returned.
+func (cb *Buffer) Skip(count int) error {
+	return (*codec.Buffer)(cb).Skip(count)
+
+}
+
+// Len returns the remaining number of bytes in the buffer.
+func (cb *Buffer) Len() int {
+	return (*codec.Buffer)(cb).Len()
+}
+
+// Read implements the io.Reader interface. If there are no bytes
+// remaining in the buffer, it will return 0, io.EOF. Otherwise,
+// it reads max(len(dest), cb.Len()) bytes from input and copies
+// them into dest. It returns the number of bytes copied and a nil
+// error in this case.
+func (cb *Buffer) Read(dest []byte) (int, error) {
+	return (*codec.Buffer)(cb).Read(dest)
+}
+
+var _ io.Reader = (*Buffer)(nil)
+
+// Write implements the io.Writer interface. It always returns
+// len(data), nil.
+func (cb *Buffer) Write(data []byte) (int, error) {
+	return (*codec.Buffer)(cb).Write(data)
+}
+
+var _ io.Writer = (*Buffer)(nil)
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) DecodeVarint() (uint64, error) {
+	return (*codec.Buffer)(cb).DecodeVarint()
+}
+
+// DecodeTagAndWireType decodes a field tag and wire type from input.
+// This reads a varint and then extracts the two fields from the varint
+// value read.
+func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) {
+	return (*codec.Buffer)(cb).DecodeTagAndWireType()
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) DecodeFixed64() (x uint64, err error) {
+	return (*codec.Buffer)(cb).DecodeFixed64()
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) DecodeFixed32() (x uint64, err error) {
+	return (*codec.Buffer)(cb).DecodeFixed32()
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	return (*codec.Buffer)(cb).DecodeRawBytes(alloc)
+}
+
+// ReadGroup reads the input until a "group end" tag is found
+// and returns the data up to that point. Subsequent reads from
+// the buffer will read data after the group end tag. If alloc
+// is true, the data is copied to a new slice before being returned.
+// Otherwise, the returned slice is a view into the buffer's
+// underlying byte slice.
+//
+// This function correctly handles nested groups: if a "group start"
+// tag is found, then that group's end tag will be included in the
+// returned data.
+func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) {
+	return (*codec.Buffer)(cb).ReadGroup(alloc)
+}
+
+// SkipGroup is like ReadGroup, except that it discards the
+// data and just advances the buffer to point to the input
+// right *after* the "group end" tag.
+func (cb *Buffer) SkipGroup() error {
+	return (*codec.Buffer)(cb).SkipGroup()
+}
+
+// SkipField attempts to skip the value of a field with the given wire
+// type. When consuming a protobuf-encoded stream, it can be called immediately
+// after DecodeTagAndWireType to discard the subsequent data for the field.
+func (cb *Buffer) SkipField(wireType int8) error {
+	return (*codec.Buffer)(cb).SkipField(wireType)
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) EncodeVarint(x uint64) error {
+	return (*codec.Buffer)(cb).EncodeVarint(x)
+}
+
+// EncodeTagAndWireType encodes the given field tag and wire type to the
+// buffer. This combines the two values and then writes them as a varint.
+func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error {
+	return (*codec.Buffer)(cb).EncodeTagAndWireType(tag, wireType)
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) EncodeFixed64(x uint64) error {
+	return (*codec.Buffer)(cb).EncodeFixed64(x)
+
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) EncodeFixed32(x uint64) error {
+	return (*codec.Buffer)(cb).EncodeFixed32(x)
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) EncodeRawBytes(b []byte) error {
+	return (*codec.Buffer)(cb).EncodeRawBytes(b)
+}
+
+// EncodeMessage writes the given message to the buffer.
+func (cb *Buffer) EncodeMessage(pm proto.Message) error {
+	return (*codec.Buffer)(cb).EncodeMessage(pm)
+}
+
+// EncodeDelimitedMessage writes the given message to the buffer with a
+// varint-encoded length prefix (the delimiter).
+func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error {
+	return (*codec.Buffer)(cb).EncodeDelimitedMessage(pm)
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
new file mode 100644
index 0000000..02f8a32
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -0,0 +1,318 @@
+package codec
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+
+func init() {
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+}
+
+// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
+// it reads indicates an end-group marker.
+var ErrWireTypeEndGroup = errors.New("unexpected wire type: end group")
+
+// MessageFactory is used to instantiate messages when DecodeFieldValue needs to
+// decode a message value.
+//
+// Also see MessageFactory in "github.com/jhump/protoreflect/dynamic", which
+// implements this interface.
+type MessageFactory interface {
+	NewMessage(md *desc.MessageDescriptor) proto.Message
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+	// The tag number for the unrecognized field.
+	Tag int32
+
+	// Encoding indicates how the unknown field was encoded on the wire. If it
+	// is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+	// the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+	// significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+	// Value.
+	Encoding int8
+	Contents []byte
+	Value    uint64
+}
+
+// DecodeZigZag32 decodes a signed 32-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag32(v uint64) int32 {
+	return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31))
+}
+
+// DecodeZigZag64 decodes a signed 64-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag64(v uint64) int64 {
+	return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))
+}
+
+// DecodeFieldValue will read a field value from the buffer and return its
+// value and the corresponding field descriptor. The given function is used
+// to lookup a field descriptor by tag number. The given factory is used to
+// instantiate a message if the field value is (or contains) a message value.
+//
+// On error, the field descriptor and value are typically nil. However, if the
+// error returned is ErrWireTypeEndGroup, the returned value will indicate any
+// tag number encoded in the end-group marker.
+//
+// If the field descriptor returned is nil, that means that the given function
+// returned nil. This is expected to happen for unrecognized tag numbers. In
+// that case, no error is returned, and the value will be an UnknownField.
+func (cb *Buffer) DecodeFieldValue(fieldFinder func(int32) *desc.FieldDescriptor, fact MessageFactory) (*desc.FieldDescriptor, interface{}, error) {
+	if cb.EOF() {
+		return nil, nil, io.EOF
+	}
+	tagNumber, wireType, err := cb.DecodeTagAndWireType()
+	if err != nil {
+		return nil, nil, err
+	}
+	if wireType == proto.WireEndGroup {
+		return nil, tagNumber, ErrWireTypeEndGroup
+	}
+	fd := fieldFinder(tagNumber)
+	if fd == nil {
+		val, err := cb.decodeUnknownField(tagNumber, wireType)
+		return nil, val, err
+	}
+	val, err := cb.decodeKnownField(fd, wireType, fact)
+	return fd, val, err
+}
+
+// DecodeScalarField extracts a properly-typed value from v. The returned value's
+// type depends on the given field descriptor type. It will be the same type as
+// generated structs use for the field descriptor's type. Enum types will return
+// an int32. If the given field type uses length-delimited encoding (nested
+// messages, bytes, and strings), an error is returned.
+func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return v != 0, nil
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return uint32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM:
+		s := int64(v)
+		if s > math.MaxInt32 || s < math.MinInt32 {
+			return nil, ErrOverflow
+		}
+		return int32(s), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return int32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return DecodeZigZag32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return v, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return int64(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return DecodeZigZag64(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return math.Float32frombits(uint32(v)), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return math.Float64frombits(v), nil
+
+	default:
+		// bytes, string, message, and group cannot be represented as a simple numeric value
+		return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName())
+	}
+}
+
+// DecodeLengthDelimitedField extracts a properly-typed value from bytes. The
+// returned value's type will usually be []byte, string, or, for nested messages,
+// the type returned from the given message factory. However, since repeated
+// scalar fields can be length-delimited, when they used packed encoding, it can
+// also return an []interface{}, where each element is a scalar value. Furthermore,
+// it could return a scalar type, not in a slice, if the given field descriptor is
+// not repeated. This is to support cases where a field is changed from optional
+// to repeated. New code may emit a packed repeated representation, but old code
+// still expects a single scalar value. In this case, if the actual data in bytes
+// contains multiple values, only the last value is returned.
+func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
+	switch {
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return bytes, nil
+
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+		return string(bytes), nil
+
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
+		fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+		msg := mf.NewMessage(fd.GetMessageType())
+		err := proto.Unmarshal(bytes, msg)
+		if err != nil {
+			return nil, err
+		} else {
+			return msg, nil
+		}
+
+	default:
+		// even if the field is not repeated or not packed, we still parse it as such for
+		// backwards compatibility (e.g. message we are de-serializing could have been both
+		// repeated and packed at the time of serialization)
+		packedBuf := NewBuffer(bytes)
+		var slice []interface{}
+		var val interface{}
+		for !packedBuf.EOF() {
+			var v uint64
+			var err error
+			if varintTypes[fd.GetType()] {
+				v, err = packedBuf.DecodeVarint()
+			} else if fixed32Types[fd.GetType()] {
+				v, err = packedBuf.DecodeFixed32()
+			} else if fixed64Types[fd.GetType()] {
+				v, err = packedBuf.DecodeFixed64()
+			} else {
+				return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName())
+			}
+			if err != nil {
+				return nil, err
+			}
+			val, err = DecodeScalarField(fd, v)
+			if err != nil {
+				return nil, err
+			}
+			if fd.IsRepeated() {
+				slice = append(slice, val)
+			}
+		}
+		if fd.IsRepeated() {
+			return slice, nil
+		} else {
+			// if not a repeated field, last value wins
+			return val, nil
+		}
+	}
+}
+
+func (b *Buffer) decodeKnownField(fd *desc.FieldDescriptor, encoding int8, fact MessageFactory) (interface{}, error) {
+	var val interface{}
+	var err error
+	switch encoding {
+	case proto.WireFixed32:
+		var num uint64
+		num, err = b.DecodeFixed32()
+		if err == nil {
+			val, err = DecodeScalarField(fd, num)
+		}
+	case proto.WireFixed64:
+		var num uint64
+		num, err = b.DecodeFixed64()
+		if err == nil {
+			val, err = DecodeScalarField(fd, num)
+		}
+	case proto.WireVarint:
+		var num uint64
+		num, err = b.DecodeVarint()
+		if err == nil {
+			val, err = DecodeScalarField(fd, num)
+		}
+
+	case proto.WireBytes:
+		alloc := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES
+		var raw []byte
+		raw, err = b.DecodeRawBytes(alloc)
+		if err == nil {
+			val, err = DecodeLengthDelimitedField(fd, raw, fact)
+		}
+
+	case proto.WireStartGroup:
+		if fd.GetMessageType() == nil {
+			return nil, fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName())
+		}
+		msg := fact.NewMessage(fd.GetMessageType())
+		var data []byte
+		data, err = b.ReadGroup(false)
+		if err == nil {
+			err = proto.Unmarshal(data, msg)
+			if err == nil {
+				val = msg
+			}
+		}
+
+	default:
+		return nil, ErrBadWireType
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return val, nil
+}
+
+func (b *Buffer) decodeUnknownField(tagNumber int32, encoding int8) (interface{}, error) {
+	u := UnknownField{Tag: tagNumber, Encoding: encoding}
+	var err error
+	switch encoding {
+	case proto.WireFixed32:
+		u.Value, err = b.DecodeFixed32()
+	case proto.WireFixed64:
+		u.Value, err = b.DecodeFixed64()
+	case proto.WireVarint:
+		u.Value, err = b.DecodeVarint()
+	case proto.WireBytes:
+		u.Contents, err = b.DecodeRawBytes(true)
+	case proto.WireStartGroup:
+		u.Contents, err = b.ReadGroup(true)
+	default:
+		err = ErrBadWireType
+	}
+	if err != nil {
+		return nil, err
+	}
+	return u, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/doc.go b/vendor/github.com/jhump/protoreflect/codec/doc.go
new file mode 100644
index 0000000..f76499f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/doc.go
@@ -0,0 +1,7 @@
+// Package codec contains a reader/write type that assists with encoding
+// and decoding protobuf's binary representation.
+//
+// The code in this package began as a fork of proto.Buffer but provides
+// additional API to make it more useful to code that needs to dynamically
+// process or produce the protobuf binary format.
+package codec
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
new file mode 100644
index 0000000..499aa95
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -0,0 +1,288 @@
+package codec
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// EncodeZigZag64 does zig-zag encoding to convert the given
+// signed 64-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag64(v int64) uint64 {
+	return (uint64(v) << 1) ^ uint64(v>>63)
+}
+
+// EncodeZigZag32 does zig-zag encoding to convert the given
+// signed 32-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag32(v int32) uint64 {
+	return uint64((uint32(v) << 1) ^ uint32((v >> 31)))
+}
+
+func (cb *Buffer) EncodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+	if fd.IsMap() {
+		mp := val.(map[interface{}]interface{})
+		entryType := fd.GetMessageType()
+		keyType := entryType.FindFieldByNumber(1)
+		valType := entryType.FindFieldByNumber(2)
+		var entryBuffer Buffer
+		if cb.IsDeterministic() {
+			entryBuffer.SetDeterministic(true)
+			keys := make([]interface{}, 0, len(mp))
+			for k := range mp {
+				keys = append(keys, k)
+			}
+			sort.Sort(sortable(keys))
+			for _, k := range keys {
+				v := mp[k]
+				entryBuffer.Reset()
+				if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+					return err
+				}
+				rv := reflect.ValueOf(v)
+				if rv.Kind() != reflect.Ptr || !rv.IsNil() {
+					if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+						return err
+					}
+				}
+				if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+					return err
+				}
+				if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+					return err
+				}
+			}
+		} else {
+			for k, v := range mp {
+				entryBuffer.Reset()
+				if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+					return err
+				}
+				rv := reflect.ValueOf(v)
+				if rv.Kind() != reflect.Ptr || !rv.IsNil() {
+					if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+						return err
+					}
+				}
+				if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+					return err
+				}
+				if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	} else if fd.IsRepeated() {
+		sl := val.([]interface{})
+		wt, err := getWireType(fd.GetType())
+		if err != nil {
+			return err
+		}
+		if isPacked(fd) && len(sl) > 0 &&
+			(wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) {
+			// packed repeated field
+			var packedBuffer Buffer
+			for _, v := range sl {
+				if err := packedBuffer.encodeFieldValue(fd, v); err != nil {
+					return err
+				}
+			}
+			if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+				return err
+			}
+			return cb.EncodeRawBytes(packedBuffer.Bytes())
+		} else {
+			// non-packed repeated field
+			for _, v := range sl {
+				if err := cb.encodeFieldElement(fd, v); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	} else {
+		return cb.encodeFieldElement(fd, val)
+	}
+}
+
+func isPacked(fd *desc.FieldDescriptor) bool {
+	opts := fd.AsFieldDescriptorProto().GetOptions()
+	// if set, use that value
+	if opts != nil && opts.Packed != nil {
+		return opts.GetPacked()
+	}
+	// if unset: proto2 defaults to false, proto3 to true
+	return fd.GetFile().IsProto3()
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+	return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+	vi := s[i]
+	vj := s[j]
+	switch reflect.TypeOf(vi).Kind() {
+	case reflect.Int32:
+		return vi.(int32) < vj.(int32)
+	case reflect.Int64:
+		return vi.(int64) < vj.(int64)
+	case reflect.Uint32:
+		return vi.(uint32) < vj.(uint32)
+	case reflect.Uint64:
+		return vi.(uint64) < vj.(uint64)
+	case reflect.String:
+		return vi.(string) < vj.(string)
+	case reflect.Bool:
+		return !vi.(bool) && vj.(bool)
+	default:
+		panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+	}
+}
+
+func (s sortable) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (b *Buffer) encodeFieldElement(fd *desc.FieldDescriptor, val interface{}) error {
+	wt, err := getWireType(fd.GetType())
+	if err != nil {
+		return err
+	}
+	if err := b.EncodeTagAndWireType(fd.GetNumber(), wt); err != nil {
+		return err
+	}
+	if err := b.encodeFieldValue(fd, val); err != nil {
+		return err
+	}
+	if wt == proto.WireStartGroup {
+		return b.EncodeTagAndWireType(fd.GetNumber(), proto.WireEndGroup)
+	}
+	return nil
+}
+
+func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		v := val.(bool)
+		if v {
+			return b.EncodeVarint(1)
+		}
+		return b.EncodeVarint(0)
+
+	case descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_INT32:
+		v := val.(int32)
+		return b.EncodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		v := val.(int32)
+		return b.EncodeFixed32(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		v := val.(int32)
+		return b.EncodeVarint(EncodeZigZag32(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		v := val.(uint32)
+		return b.EncodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		v := val.(uint32)
+		return b.EncodeFixed32(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		v := val.(int64)
+		return b.EncodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		v := val.(int64)
+		return b.EncodeFixed64(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		v := val.(int64)
+		return b.EncodeVarint(EncodeZigZag64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		v := val.(uint64)
+		return b.EncodeVarint(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		v := val.(uint64)
+		return b.EncodeFixed64(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		v := val.(float64)
+		return b.EncodeFixed64(math.Float64bits(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		v := val.(float32)
+		return b.EncodeFixed32(uint64(math.Float32bits(v)))
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		v := val.([]byte)
+		return b.EncodeRawBytes(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		v := val.(string)
+		return b.EncodeRawBytes(([]byte)(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return b.EncodeDelimitedMessage(val.(proto.Message))
+
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		// just append the nested message to this buffer
+		return b.EncodeMessage(val.(proto.Message))
+		// whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
+
+	default:
+		return fmt.Errorf("unrecognized field type: %v", fd.GetType())
+	}
+}
+
+func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+	switch t {
+	case descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return proto.WireVarint, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return proto.WireFixed32, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return proto.WireFixed64, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES,
+		descriptor.FieldDescriptorProto_TYPE_STRING,
+		descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return proto.WireBytes, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return proto.WireStartGroup, nil
+
+	default:
+		return 0, ErrBadWireType
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
new file mode 100644
index 0000000..538820c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -0,0 +1,231 @@
+package desc
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+	intn "github.com/jhump/protoreflect/internal"
+)
+
+// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto.
+// The file's direct dependencies must be provided. If the given dependencies do not include
+// all of the file's dependencies or if the contents of the descriptors are internally
+// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
+func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+	return createFileDescriptor(fd, deps, nil)
+}
+
+func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+	ret := &FileDescriptor{
+		proto:      fd,
+		symbols:    map[string]Descriptor{},
+		fieldIndex: map[string]map[int32]*FieldDescriptor{},
+	}
+	pkg := fd.GetPackage()
+
+	// populate references to file descriptor dependencies
+	files := map[string]*FileDescriptor{}
+	for _, f := range deps {
+		files[f.proto.GetName()] = f
+	}
+	ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
+	for i, d := range fd.GetDependency() {
+		resolved := r.ResolveImport(fd.GetName(), d)
+		ret.deps[i] = files[resolved]
+		if ret.deps[i] == nil {
+			if resolved != d {
+				ret.deps[i] = files[d]
+			}
+			if ret.deps[i] == nil {
+				return nil, intn.ErrNoSuchFile(d)
+			}
+		}
+	}
+	ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
+	for i, pd := range fd.GetPublicDependency() {
+		ret.publicDeps[i] = ret.deps[pd]
+	}
+	ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency()))
+	for i, wd := range fd.GetWeakDependency() {
+		ret.weakDeps[i] = ret.deps[wd]
+	}
+	ret.isProto3 = fd.GetSyntax() == "proto3"
+
+	// populate all tables of child descriptors
+	for _, m := range fd.GetMessageType() {
+		md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
+		ret.symbols[n] = md
+		ret.messages = append(ret.messages, md)
+	}
+	for _, e := range fd.GetEnumType() {
+		ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
+		ret.symbols[n] = ed
+		ret.enums = append(ret.enums, ed)
+	}
+	for _, ex := range fd.GetExtension() {
+		exd, n := createFieldDescriptor(ret, ret, pkg, ex)
+		ret.symbols[n] = exd
+		ret.extensions = append(ret.extensions, exd)
+	}
+	for _, s := range fd.GetService() {
+		sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
+		ret.symbols[n] = sd
+		ret.services = append(ret.services, sd)
+	}
+
+	ret.sourceInfo = internal.CreateSourceInfoMap(fd)
+	ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
+
+	// now we can resolve all type references and source code info
+	scopes := []scope{fileScope(ret)}
+	path := make([]int32, 1, 8)
+	path[0] = internal.File_messagesTag
+	for i, md := range ret.messages {
+		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+	path[0] = internal.File_enumsTag
+	for i, ed := range ret.enums {
+		ed.resolve(append(path, int32(i)))
+	}
+	path[0] = internal.File_extensionsTag
+	for i, exd := range ret.extensions {
+		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+	path[0] = internal.File_servicesTag
+	for i, sd := range ret.services {
+		if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+
+	return ret, nil
+}
+
+// CreateFileDescriptors constructs a set of descriptors, one for each of the
+// given descriptor protos. The given set of descriptor protos must include all
+// transitive dependencies for every file.
+func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+	return createFileDescriptors(fds, nil)
+}
+
+func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+	if len(fds) == 0 {
+		return nil, nil
+	}
+	files := map[string]*dpb.FileDescriptorProto{}
+	resolved := map[string]*FileDescriptor{}
+	var name string
+	for _, fd := range fds {
+		name = fd.GetName()
+		files[name] = fd
+	}
+	for _, fd := range fds {
+		_, err := createFromSet(fd.GetName(), r, nil, files, resolved)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return resolved, nil
+}
+
+// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
+// file descriptors and their transitive dependencies. The files are topologically sorted
+// so that a file will always appear after its dependencies.
+func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
+	var fdps []*dpb.FileDescriptorProto
+	addAllFiles(fds, &fdps, map[string]struct{}{})
+	return &dpb.FileDescriptorSet{File: fdps}
+}
+
+func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+	for _, fd := range src {
+		if _, ok := seen[fd.GetName()]; ok {
+			continue
+		}
+		seen[fd.GetName()] = struct{}{}
+		addAllFiles(fd.GetDependencies(), results, seen)
+		*results = append(*results, fd.AsFileDescriptorProto())
+	}
+}
+
+// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The
+// set's *last* file will be the returned descriptor. The set's remaining files must comprise
+// the full set of transitive dependencies of that last file. This is the same format and
+// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
+//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+	return createFileDescriptorFromSet(fds, nil)
+}
+
+func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+	result, err := createFileDescriptorsFromSet(fds, r)
+	if err != nil {
+		return nil, err
+	}
+	files := fds.GetFile()
+	lastFilename := files[len(files)-1].GetName()
+	return result[lastFilename], nil
+}
+
+// CreateFileDescriptorsFromSet creates file descriptors from the given file descriptor set.
+// The returned map includes all files in the set, keyed b name. The set must include the
+// full set of transitive dependencies for all files therein or else a link error will occur
+// and be returned instead of the slice of descriptors. This is the same format used by
+// protoc when a FileDescriptorSet file with an invocation like so:
+//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+	return createFileDescriptorsFromSet(fds, nil)
+}
+
+func createFileDescriptorsFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+	files := fds.GetFile()
+	if len(files) == 0 {
+		return nil, errors.New("file descriptor set is empty")
+	}
+	return createFileDescriptors(files, r)
+}
+
+// createFromSet creates a descriptor for the given filename. It recursively
+// creates descriptors for the given file's dependencies.
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+	for _, s := range seen {
+		if filename == s {
+			return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
+		}
+	}
+	seen = append(seen, filename)
+
+	if d, ok := resolved[filename]; ok {
+		return d, nil
+	}
+	fdp := files[filename]
+	if fdp == nil {
+		return nil, intn.ErrNoSuchFile(filename)
+	}
+	deps := make([]*FileDescriptor, len(fdp.GetDependency()))
+	for i, depName := range fdp.GetDependency() {
+		resolvedDep := r.ResolveImport(filename, depName)
+		dep, err := createFromSet(resolvedDep, r, seen, files, resolved)
+		if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName {
+			dep, err = createFromSet(depName, r, seen, files, resolved)
+		}
+		if err != nil {
+			return nil, err
+		}
+		deps[i] = dep
+	}
+	d, err := createFileDescriptor(fdp, deps, r)
+	if err != nil {
+		return nil, err
+	}
+	resolved[filename] = d
+	return d, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
new file mode 100644
index 0000000..42f0f8e
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -0,0 +1,1723 @@
+package desc
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+// Descriptor is the common interface implemented by all descriptor objects.
+type Descriptor interface {
+	// GetName returns the name of the object described by the descriptor. This will
+	// be a base name that does not include enclosing message names or the package name.
+	// For file descriptors, this indicates the path and name to the described file.
+	GetName() string
+	// GetFullyQualifiedName returns the fully-qualified name of the object described by
+	// the descriptor. This will include the package name and any enclosing message names.
+	// For file descriptors, this returns the path and name to the described file (same as
+	// GetName).
+	GetFullyQualifiedName() string
+	// GetParent returns the enclosing element in a proto source file. If the described
+	// object is a top-level object, this returns the file descriptor. Otherwise, it returns
+	// the element in which the described object was declared. File descriptors have no
+	// parent and return nil.
+	GetParent() Descriptor
+	// GetFile returns the file descriptor in which this element was declared. File
+	// descriptors return themselves.
+	GetFile() *FileDescriptor
+	// GetOptions returns the options proto containing options for the described element.
+	GetOptions() proto.Message
+	// GetSourceInfo returns any source code information that was present in the file
+	// descriptor. Source code info is optional. If no source code info is available for
+	// the element (including if there is none at all in the file descriptor) then this
+	// returns nil
+	GetSourceInfo() *dpb.SourceCodeInfo_Location
+	// AsProto returns the underlying descriptor proto for this descriptor.
+	AsProto() proto.Message
+}
+
+type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc
+
+// FileDescriptor describes a proto source file.
+type FileDescriptor struct {
+	proto      *dpb.FileDescriptorProto
+	symbols    map[string]Descriptor
+	deps       []*FileDescriptor
+	publicDeps []*FileDescriptor
+	weakDeps   []*FileDescriptor
+	messages   []*MessageDescriptor
+	enums      []*EnumDescriptor
+	extensions []*FieldDescriptor
+	services   []*ServiceDescriptor
+	fieldIndex map[string]map[int32]*FieldDescriptor
+	isProto3   bool
+	sourceInfo internal.SourceInfoMap
+	sourceInfoRecomputeFunc
+}
+
+func (fd *FileDescriptor) recomputeSourceInfo() {
+	internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
+}
+
+func (fd *FileDescriptor) registerField(field *FieldDescriptor) {
+	fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()]
+	if fields == nil {
+		fields = map[int32]*FieldDescriptor{}
+		fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields
+	}
+	fields[field.GetNumber()] = field
+}
+
+// GetName returns the name of the file, as it was given to the protoc invocation
+// to compile it, possibly including path (relative to a directory in the proto
+// import path).
+func (fd *FileDescriptor) GetName() string {
+	return fd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the name of the file, same as GetName. It is
+// present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFullyQualifiedName() string {
+	return fd.proto.GetName()
+}
+
+// GetPackage returns the name of the package declared in the file.
+func (fd *FileDescriptor) GetPackage() string {
+	return fd.proto.GetPackage()
+}
+
+// GetParent always returns nil: files are the root of descriptor hierarchies.
+// Is it present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetParent() Descriptor {
+	return nil
+}
+
+// GetFile returns the receiver, which is a file descriptor. This is present
+// to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFile() *FileDescriptor {
+	return fd
+}
+
+// GetOptions returns the file's options. Most usages will be more interested
+// in GetFileOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetOptions() proto.Message {
+	return fd.proto.GetOptions()
+}
+
+// GetFileOptions returns the file's options.
+func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+	return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
+// interface.
+func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return nil
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFileDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) AsProto() proto.Message {
+	return fd.proto
+}
+
+// AsFileDescriptorProto returns the underlying descriptor proto.
+func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+	return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FileDescriptor) String() string {
+	return fd.proto.String()
+}
+
+// IsProto3 returns true if the file declares a syntax of "proto3".
+func (fd *FileDescriptor) IsProto3() bool {
+	return fd.isProto3
+}
+
+// GetDependencies returns all of this file's dependencies. These correspond to
+// import statements in the file.
+func (fd *FileDescriptor) GetDependencies() []*FileDescriptor {
+	return fd.deps
+}
+
+// GetPublicDependencies returns all of this file's public dependencies. These
+// correspond to public import statements in the file.
+func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor {
+	return fd.publicDeps
+}
+
+// GetWeakDependencies returns all of this file's weak dependencies. These
+// correspond to weak import statements in the file.
+func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor {
+	return fd.weakDeps
+}
+
+// GetMessageTypes returns all top-level messages declared in this file.
+func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor {
+	return fd.messages
+}
+
+// GetEnumTypes returns all top-level enums declared in this file.
+func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor {
+	return fd.enums
+}
+
+// GetExtensions returns all top-level extensions declared in this file.
+func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor {
+	return fd.extensions
+}
+
+// GetServices returns all services declared in this file.
+func (fd *FileDescriptor) GetServices() []*ServiceDescriptor {
+	return fd.services
+}
+
+// FindSymbol returns the descriptor contained within this file for the
+// element with the given fully-qualified symbol name. If no such element
+// exists then this method returns nil.
+func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+	if symbol[0] == '.' {
+		symbol = symbol[1:]
+	}
+	if ret := fd.symbols[symbol]; ret != nil {
+		return ret
+	}
+
+	// allow accessing symbols through public imports, too
+	for _, dep := range fd.GetPublicDependencies() {
+		if ret := dep.FindSymbol(symbol); ret != nil {
+			return ret
+		}
+	}
+
+	// not found
+	return nil
+}
+
+// FindMessage finds the message with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor {
+	if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok {
+		return md
+	} else {
+		return nil
+	}
+}
+
+// FindEnum finds the enum with the given fully-qualified name. If no such
+// element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor {
+	if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok {
+		return ed
+	} else {
+		return nil
+	}
+}
+
+// FindService finds the service with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor {
+	if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok {
+		return sd
+	} else {
+		return nil
+	}
+}
+
+// FindExtension finds the extension field for the given extended type name and
+// tag number. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor {
+	if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() {
+		return exd
+	} else {
+		return nil
+	}
+}
+
+// FindExtensionByName finds the extension field with the given fully-qualified
+// name. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor {
+	if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() {
+		return exd
+	} else {
+		return nil
+	}
+}
+
+// MessageDescriptor describes a protocol buffer message.
+type MessageDescriptor struct {
+	proto          *dpb.DescriptorProto
+	parent         Descriptor
+	file           *FileDescriptor
+	fields         []*FieldDescriptor
+	nested         []*MessageDescriptor
+	enums          []*EnumDescriptor
+	extensions     []*FieldDescriptor
+	oneOfs         []*OneOfDescriptor
+	extRanges      extRanges
+	fqn            string
+	sourceInfoPath []int32
+	jsonNames      jsonNameMap
+	isProto3       bool
+	isMapEntry     bool
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
+	msgName := merge(enclosing, md.GetName())
+	ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
+	for _, f := range md.GetField() {
+		fld, n := createFieldDescriptor(fd, ret, msgName, f)
+		symbols[n] = fld
+		ret.fields = append(ret.fields, fld)
+	}
+	for _, nm := range md.NestedType {
+		nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
+		symbols[n] = nmd
+		ret.nested = append(ret.nested, nmd)
+	}
+	for _, e := range md.EnumType {
+		ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
+		symbols[n] = ed
+		ret.enums = append(ret.enums, ed)
+	}
+	for _, ex := range md.GetExtension() {
+		exd, n := createFieldDescriptor(fd, ret, msgName, ex)
+		symbols[n] = exd
+		ret.extensions = append(ret.extensions, exd)
+	}
+	for i, o := range md.GetOneofDecl() {
+		od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
+		symbols[n] = od
+		ret.oneOfs = append(ret.oneOfs, od)
+	}
+	for _, r := range md.GetExtensionRange() {
+		// proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
+		// but protoc converts range to exclusive end in descriptor, so we must convert back
+		end := r.GetEnd() - 1
+		ret.extRanges = append(ret.extRanges, proto.ExtensionRange{
+			Start: r.GetStart(),
+			End:   end})
+	}
+	sort.Sort(ret.extRanges)
+	ret.isProto3 = fd.isProto3
+	ret.isMapEntry = md.GetOptions().GetMapEntry() &&
+		len(ret.fields) == 2 &&
+		ret.fields[0].GetNumber() == 1 &&
+		ret.fields[1].GetNumber() == 2
+
+	return ret, msgName
+}
+
+func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
+	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Message_nestedMessagesTag)
+	scopes = append(scopes, messageScope(md))
+	for i, nmd := range md.nested {
+		if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_enumsTag
+	for i, ed := range md.enums {
+		ed.resolve(append(path, int32(i)))
+	}
+	path[len(path)-1] = internal.Message_fieldsTag
+	for i, fld := range md.fields {
+		if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_extensionsTag
+	for i, exd := range md.extensions {
+		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_oneOfsTag
+	for i, od := range md.oneOfs {
+		od.resolve(append(path, int32(i)))
+	}
+	return nil
+}
+
+// GetName returns the simple (unqualified) name of the message.
+func (md *MessageDescriptor) GetName() string {
+	return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the message. This
+// includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (md *MessageDescriptor) GetFullyQualifiedName() string {
+	return md.fqn
+}
+
+// GetParent returns the message's enclosing descriptor. For top-level messages,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (md *MessageDescriptor) GetParent() Descriptor {
+	return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this message is defined.
+func (md *MessageDescriptor) GetFile() *FileDescriptor {
+	return md.file
+}
+
+// GetOptions returns the message's options. Most usages will be more interested
+// in GetMessageOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) GetOptions() proto.Message {
+	return md.proto.GetOptions()
+}
+
+// GetMessageOptions returns the message's options.
+func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+	return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the message, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// message was defined and also contains comments associated with the message
+// definition.
+func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) AsProto() proto.Message {
+	return md.proto
+}
+
+// AsDescriptorProto returns the underlying descriptor proto.
+func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+	return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MessageDescriptor) String() string {
+	return md.proto.String()
+}
+
+// IsMapEntry returns true if this is a synthetic message type that represents an entry
+// in a map field.
+func (md *MessageDescriptor) IsMapEntry() bool {
+	return md.isMapEntry
+}
+
+// GetFields returns all of the fields for this message.
+func (md *MessageDescriptor) GetFields() []*FieldDescriptor {
+	return md.fields
+}
+
+// GetNestedMessageTypes returns all of the message types declared inside this message.
+func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor {
+	return md.nested
+}
+
+// GetNestedEnumTypes returns all of the enums declared inside this message.
+func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor {
+	return md.enums
+}
+
+// GetNestedExtensions returns all of the extensions declared inside this message.
+func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor {
+	return md.extensions
+}
+
+// GetOneOfs returns all of the one-of field sets declared inside this message.
+func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor {
+	return md.oneOfs
+}
+
+// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
+func (md *MessageDescriptor) IsProto3() bool {
+	return md.isProto3
+}
+
+// GetExtensionRanges returns the ranges of extension field numbers for this message.
+func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange {
+	return md.extRanges
+}
+
+// IsExtendable returns true if this message has any extension ranges.
+func (md *MessageDescriptor) IsExtendable() bool {
+	return len(md.extRanges) > 0
+}
+
+// IsExtension returns true if the given tag number is within any of this message's
+// extension ranges.
+func (md *MessageDescriptor) IsExtension(tagNumber int32) bool {
+	return md.extRanges.IsExtension(tagNumber)
+}
+
+type extRanges []proto.ExtensionRange
+
+func (er extRanges) String() string {
+	var buf bytes.Buffer
+	first := true
+	for _, r := range er {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(",")
+		}
+		fmt.Fprintf(&buf, "%d..%d", r.Start, r.End)
+	}
+	return buf.String()
+}
+
+func (er extRanges) IsExtension(tagNumber int32) bool {
+	i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber })
+	return i < len(er) && tagNumber >= er[i].Start
+}
+
+func (er extRanges) Len() int {
+	return len(er)
+}
+
+func (er extRanges) Less(i, j int) bool {
+	return er[i].Start < er[j].Start
+}
+
+func (er extRanges) Swap(i, j int) {
+	er[i], er[j] = er[j], er[i]
+}
+
+// FindFieldByName finds the field with the given name. If no such field exists
+// then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
+	fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+	if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
+		return fd
+	} else {
+		return nil
+	}
+}
+
+// FindFieldByNumber finds the field with the given tag number. If no such field
+// exists then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
+	if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+		return fd
+	} else {
+		return nil
+	}
+}
+
+// FieldDescriptor describes a field of a protocol buffer message.
+type FieldDescriptor struct {
+	proto          *dpb.FieldDescriptorProto
+	parent         Descriptor
+	owner          *MessageDescriptor
+	file           *FileDescriptor
+	oneOf          *OneOfDescriptor
+	msgType        *MessageDescriptor
+	enumType       *EnumDescriptor
+	fqn            string
+	sourceInfoPath []int32
+	def            memoizedDefault
+	isMap          bool
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
+	fldName := merge(enclosing, fld.GetName())
+	ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
+	if fld.GetExtendee() == "" {
+		ret.owner = parent.(*MessageDescriptor)
+	}
+	// owner for extensions, field type (be it message or enum), and one-ofs get resolved later
+	return ret, fldName
+}
+
+func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+	if fd.proto.OneofIndex != nil && fd.oneOf == nil {
+		return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+	}
+	fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
+		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+			return err
+		} else {
+			fd.enumType = desc.(*EnumDescriptor)
+		}
+	}
+	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+			return err
+		} else {
+			fd.msgType = desc.(*MessageDescriptor)
+		}
+	}
+	if fd.proto.GetExtendee() != "" {
+		if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+			return err
+		} else {
+			fd.owner = desc.(*MessageDescriptor)
+		}
+	}
+	fd.file.registerField(fd)
+	fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
+		fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().IsMapEntry()
+	return nil
+}
+
+func (fd *FieldDescriptor) determineDefault() interface{} {
+	if fd.IsMap() {
+		return map[interface{}]interface{}(nil)
+	} else if fd.IsRepeated() {
+		return []interface{}(nil)
+	} else if fd.msgType != nil {
+		return nil
+	}
+
+	proto3 := fd.file.isProto3
+	if !proto3 {
+		def := fd.AsFieldDescriptorProto().GetDefaultValue()
+		if def != "" {
+			ret := parseDefaultValue(fd, def)
+			if ret != nil {
+				return ret
+			}
+			// if we can't parse default value, fall-through to return normal default...
+		}
+	}
+
+	switch fd.GetType() {
+	case dpb.FieldDescriptorProto_TYPE_FIXED32,
+		dpb.FieldDescriptorProto_TYPE_UINT32:
+		return uint32(0)
+	case dpb.FieldDescriptorProto_TYPE_SFIXED32,
+		dpb.FieldDescriptorProto_TYPE_INT32,
+		dpb.FieldDescriptorProto_TYPE_SINT32:
+		return int32(0)
+	case dpb.FieldDescriptorProto_TYPE_FIXED64,
+		dpb.FieldDescriptorProto_TYPE_UINT64:
+		return uint64(0)
+	case dpb.FieldDescriptorProto_TYPE_SFIXED64,
+		dpb.FieldDescriptorProto_TYPE_INT64,
+		dpb.FieldDescriptorProto_TYPE_SINT64:
+		return int64(0)
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		return float32(0.0)
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		return float64(0.0)
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		return false
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		return []byte(nil)
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		return ""
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		if proto3 {
+			return int32(0)
+		}
+		enumVals := fd.GetEnumType().GetValues()
+		if len(enumVals) > 0 {
+			return enumVals[0].GetNumber()
+		} else {
+			return int32(0) // WTF?
+		}
+	default:
+		panic(fmt.Sprintf("Unknown field type: %v", fd.GetType()))
+	}
+}
+
+func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
+	switch fd.GetType() {
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		vd := fd.GetEnumType().FindValueByName(val)
+		if vd != nil {
+			return vd.GetNumber()
+		}
+		return nil
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		if val == "true" {
+			return true
+		} else if val == "false" {
+			return false
+		}
+		return nil
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		return []byte(unescape(val))
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		return val
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		if f, err := strconv.ParseFloat(val, 32); err == nil {
+			return float32(f)
+		} else {
+			return float32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		if f, err := strconv.ParseFloat(val, 64); err == nil {
+			return f
+		} else {
+			return float64(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_INT32,
+		dpb.FieldDescriptorProto_TYPE_SINT32,
+		dpb.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, err := strconv.ParseInt(val, 10, 32); err == nil {
+			return int32(i)
+		} else {
+			return int32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_UINT32,
+		dpb.FieldDescriptorProto_TYPE_FIXED32:
+		if i, err := strconv.ParseUint(val, 10, 32); err == nil {
+			return uint32(i)
+		} else {
+			return uint32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_INT64,
+		dpb.FieldDescriptorProto_TYPE_SINT64,
+		dpb.FieldDescriptorProto_TYPE_SFIXED64:
+		if i, err := strconv.ParseInt(val, 10, 64); err == nil {
+			return i
+		} else {
+			return int64(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_UINT64,
+		dpb.FieldDescriptorProto_TYPE_FIXED64:
+		if i, err := strconv.ParseUint(val, 10, 64); err == nil {
+			return i
+		} else {
+			return uint64(0)
+		}
+	default:
+		return nil
+	}
+}
+
+func unescape(s string) string {
+	// protoc encodes default values for 'bytes' fields using C escaping,
+	// so this function reverses that escaping
+	out := make([]byte, 0, len(s))
+	var buf [4]byte
+	for len(s) > 0 {
+		if s[0] != '\\' || len(s) < 2 {
+			// not escape sequence, or too short to be well-formed escape
+			out = append(out, s[0])
+			s = s[1:]
+		} else if s[1] == 'x' || s[1] == 'X' {
+			n := matchPrefix(s[2:], 2, isHex)
+			if n == 0 {
+				// bad escape
+				out = append(out, s[:2]...)
+				s = s[2:]
+			} else {
+				c, err := strconv.ParseUint(s[2:2+n], 16, 8)
+				if err != nil {
+					// shouldn't really happen...
+					out = append(out, s[:2+n]...)
+				} else {
+					out = append(out, byte(c))
+				}
+				s = s[2+n:]
+			}
+		} else if s[1] >= '0' && s[1] <= '7' {
+			n := 1 + matchPrefix(s[2:], 2, isOctal)
+			c, err := strconv.ParseUint(s[1:1+n], 8, 8)
+			if err != nil || c > 0xff {
+				out = append(out, s[:1+n]...)
+			} else {
+				out = append(out, byte(c))
+			}
+			s = s[1+n:]
+		} else if s[1] == 'u' {
+			if len(s) < 6 {
+				// bad escape
+				out = append(out, s...)
+				s = s[len(s):]
+			} else {
+				c, err := strconv.ParseUint(s[2:6], 16, 16)
+				if err != nil {
+					// bad escape
+					out = append(out, s[:6]...)
+				} else {
+					w := utf8.EncodeRune(buf[:], rune(c))
+					out = append(out, buf[:w]...)
+				}
+				s = s[6:]
+			}
+		} else if s[1] == 'U' {
+			if len(s) < 10 {
+				// bad escape
+				out = append(out, s...)
+				s = s[len(s):]
+			} else {
+				c, err := strconv.ParseUint(s[2:10], 16, 32)
+				if err != nil || c > 0x10ffff {
+					// bad escape
+					out = append(out, s[:10]...)
+				} else {
+					w := utf8.EncodeRune(buf[:], rune(c))
+					out = append(out, buf[:w]...)
+				}
+				s = s[10:]
+			}
+		} else {
+			switch s[1] {
+			case 'a':
+				out = append(out, '\a')
+			case 'b':
+				out = append(out, '\b')
+			case 'f':
+				out = append(out, '\f')
+			case 'n':
+				out = append(out, '\n')
+			case 'r':
+				out = append(out, '\r')
+			case 't':
+				out = append(out, '\t')
+			case 'v':
+				out = append(out, '\v')
+			case '\\':
+				out = append(out, '\\')
+			case '\'':
+				out = append(out, '\'')
+			case '"':
+				out = append(out, '"')
+			case '?':
+				out = append(out, '?')
+			default:
+				// invalid escape, just copy it as-is
+				out = append(out, s[:2]...)
+			}
+			s = s[2:]
+		}
+	}
+	return string(out)
+}
+
+func isOctal(b byte) bool { return b >= '0' && b <= '7' }
+func isHex(b byte) bool {
+	return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
+}
+func matchPrefix(s string, limit int, fn func(byte) bool) int {
+	l := len(s)
+	if l > limit {
+		l = limit
+	}
+	i := 0
+	for ; i < l; i++ {
+		if !fn(s[i]) {
+			return i
+		}
+	}
+	return i
+}
+
+// GetName returns the name of the field.
+func (fd *FieldDescriptor) GetName() string {
+	return fd.proto.GetName()
+}
+
+// GetNumber returns the tag number of this field.
+func (fd *FieldDescriptor) GetNumber() int32 {
+	return fd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
+// GetName, this includes fully qualified name of the enclosing message for
+// regular fields.
+//
+// For extension fields, this includes the package (if there is one) as well as
+// any enclosing messages. The package and/or enclosing messages are for where
+// the extension is defined, not the message it extends.
+//
+// If this field is part of a one-of, the fully qualified name does *not*
+// include the name of the one-of, only of the enclosing message.
+func (fd *FieldDescriptor) GetFullyQualifiedName() string {
+	return fd.fqn
+}
+
+// GetParent returns the fields's enclosing descriptor. For normal
+// (non-extension) fields, this is the enclosing message. For extensions, this
+// is the descriptor in which the extension is defined, not the message that is
+// extended. The parent for an extension may be a file descriptor or a message,
+// depending on where the extension is defined.
+func (fd *FieldDescriptor) GetParent() Descriptor {
+	return fd.parent
+}
+
+// GetFile returns the descriptor for the file in which this field is defined.
+func (fd *FieldDescriptor) GetFile() *FileDescriptor {
+	return fd.file
+}
+
+// GetOptions returns the field's options. Most usages will be more interested
+// in GetFieldOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) GetOptions() proto.Message {
+	return fd.proto.GetOptions()
+}
+
+// GetFieldOptions returns the field's options.
+func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+	return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the field, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// field was defined and also contains comments associated with the field
+// definition.
+func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return fd.file.sourceInfo.Get(fd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFieldDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) AsProto() proto.Message {
+	return fd.proto
+}
+
+// AsFieldDescriptorProto returns the underlying descriptor proto.
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+	return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FieldDescriptor) String() string {
+	return fd.proto.String()
+}
+
+// GetJSONName returns the name of the field as referenced in the message's JSON
+// format.
+func (fd *FieldDescriptor) GetJSONName() string {
+	if jsonName := fd.proto.JsonName; jsonName != nil {
+		// if json name is present, use its value
+		return *jsonName
+	}
+	// otherwise, compute the proper JSON name from the field name
+	return jsonCamelCase(fd.proto.GetName())
+}
+
+func jsonCamelCase(s string) string {
+	// This mirrors the implementation in protoc/C++ runtime and in the Java runtime:
+	//   https://github.com/protocolbuffers/protobuf/blob/a104dffcb6b1958a424f5fa6f9e6bdc0ab9b6f9e/src/google/protobuf/descriptor.cc#L276
+	//   https://github.com/protocolbuffers/protobuf/blob/a1c886834425abb64a966231dd2c9dd84fb289b3/java/core/src/main/java/com/google/protobuf/Descriptors.java#L1286
+	var buf bytes.Buffer
+	prevWasUnderscore := false
+	for _, r := range s {
+		if r == '_' {
+			prevWasUnderscore = true
+			continue
+		}
+		if prevWasUnderscore {
+			r = unicode.ToUpper(r)
+			prevWasUnderscore = false
+		}
+		buf.WriteRune(r)
+	}
+	return buf.String()
+}
+
+// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName),
+// but includes the fully qualified name of the enclosing message.
+//
+// If the field is an extension, it will return the package name (if there is
+// one) as well as the names of any enclosing messages. The package and/or
+// enclosing messages are for where the extension is defined, not the message it
+// extends.
+func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string {
+	parent := fd.GetParent()
+	switch parent := parent.(type) {
+	case *FileDescriptor:
+		pkg := parent.GetPackage()
+		if pkg == "" {
+			return fd.GetJSONName()
+		}
+		return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName())
+	default:
+		return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName())
+	}
+}
+
+// GetOwner returns the message type that this field belongs to. If this is a normal
+// field then this is the same as GetParent. But for extensions, this will be the
+// extendee message whereas GetParent refers to where the extension was declared.
+func (fd *FieldDescriptor) GetOwner() *MessageDescriptor {
+	return fd.owner
+}
+
+// IsExtension returns true if this is an extension field.
+func (fd *FieldDescriptor) IsExtension() bool {
+	return fd.proto.GetExtendee() != ""
+}
+
+// GetOneOf returns the one-of field set to which this field belongs. If this field
+// is not part of a one-of then this method returns nil.
+func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor {
+	return fd.oneOf
+}
+
+// GetType returns the type of this field. If the type indicates an enum, the
+// enum type can be queried via GetEnumType. If the type indicates a message, the
+// message type can be queried via GetMessageType.
+func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+	return fd.proto.GetType()
+}
+
+// GetLabel returns the label for this field. The label can be required (proto2-only),
+// optional (default for proto3), or required.
+func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+	return fd.proto.GetLabel()
+}
+
+// IsRequired returns true if this field has the "required" label.
+func (fd *FieldDescriptor) IsRequired() bool {
+	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// IsRepeated returns true if this field has the "repeated" label.
+func (fd *FieldDescriptor) IsRepeated() bool {
+	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// IsProto3Optional returns true if this field has an explicit "optional" label
+// and is in a "proto3" syntax file. Such fields, if they are normal fields (not
+// extensions), will be nested in synthetic oneofs that contain only the single
+// field.
+func (fd *FieldDescriptor) IsProto3Optional() bool {
+	return internal.GetProto3Optional(fd.proto)
+}
+
+// HasPresence returns true if this field can distinguish when a value is
+// present or not. Scalar fields in "proto3" syntax files, for example, return
+// false since absent values are indistinguishable from zero values.
+func (fd *FieldDescriptor) HasPresence() bool {
+	if !fd.file.isProto3 {
+		return true
+	}
+	return fd.msgType != nil || fd.oneOf != nil
+}
+
+// IsMap returns true if this is a map field. If so, it will have the "repeated"
+// label its type will be a message that represents a map entry. The map entry
+// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
+func (fd *FieldDescriptor) IsMap() bool {
+	return fd.isMap
+}
+
+// GetMapKeyType returns the type of the key field if this is a map field. If it is
+// not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
+	if fd.isMap {
+		return fd.msgType.FindFieldByNumber(int32(1))
+	}
+	return nil
+}
+
+// GetMapValueType returns the type of the value field if this is a map field. If it
+// is not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
+	if fd.isMap {
+		return fd.msgType.FindFieldByNumber(int32(2))
+	}
+	return nil
+}
+
+// GetMessageType returns the type of this field if it is a message type. If
+// this field is not a message type, it returns nil.
+func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor {
+	return fd.msgType
+}
+
+// GetEnumType returns the type of this field if it is an enum type. If this
+// field is not an enum type, it returns nil.
+func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor {
+	return fd.enumType
+}
+
+// GetDefaultValue returns the default value for this field.
+//
+// If this field represents a message type, this method always returns nil (even though
+// for proto2 files, the default value should be a default instance of the message type).
+// If the field represents an enum type, this method returns an int32 corresponding to the
+// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If
+// this field is repeated (and not a map), it returns a nil []interface{}.
+//
+// Otherwise, it returns the declared default value for the field or a zero value, if no
+// default is declared or if the file is proto3. The type of said return value corresponds
+// to the type of the field:
+//  +-------------------------+-----------+
+//  |       Declared Type     |  Go Type  |
+//  +-------------------------+-----------+
+//  | int32, sint32, sfixed32 | int32     |
+//  | int64, sint64, sfixed64 | int64     |
+//  | uint32, fixed32         | uint32    |
+//  | uint64, fixed64         | uint64    |
+//  | float                   | float32   |
+//  | double                  | double32  |
+//  | bool                    | bool      |
+//  | string                  | string    |
+//  | bytes                   | []byte    |
+//  +-------------------------+-----------+
+func (fd *FieldDescriptor) GetDefaultValue() interface{} {
+	return fd.getDefaultValue()
+}
+
+// EnumDescriptor describes an enum declared in a proto file.
+type EnumDescriptor struct {
+	proto          *dpb.EnumDescriptorProto
+	parent         Descriptor
+	file           *FileDescriptor
+	values         []*EnumValueDescriptor
+	valuesByNum    sortedValues
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
+	enumName := merge(enclosing, ed.GetName())
+	ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
+	for _, ev := range ed.GetValue() {
+		evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
+		symbols[n] = evd
+		ret.values = append(ret.values, evd)
+	}
+	if len(ret.values) > 0 {
+		ret.valuesByNum = make(sortedValues, len(ret.values))
+		copy(ret.valuesByNum, ret.values)
+		sort.Stable(ret.valuesByNum)
+	}
+	return ret, enumName
+}
+
+type sortedValues []*EnumValueDescriptor
+
+func (sv sortedValues) Len() int {
+	return len(sv)
+}
+
+func (sv sortedValues) Less(i, j int) bool {
+	return sv[i].GetNumber() < sv[j].GetNumber()
+}
+
+func (sv sortedValues) Swap(i, j int) {
+	sv[i], sv[j] = sv[j], sv[i]
+}
+
+func (ed *EnumDescriptor) resolve(path []int32) {
+	ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Enum_valuesTag)
+	for i, evd := range ed.values {
+		evd.resolve(append(path, int32(i)))
+	}
+}
+
+// GetName returns the simple (unqualified) name of the enum type.
+func (ed *EnumDescriptor) GetName() string {
+	return ed.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum type.
+// This includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (ed *EnumDescriptor) GetFullyQualifiedName() string {
+	return ed.fqn
+}
+
+// GetParent returns the enum type's enclosing descriptor. For top-level enums,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (ed *EnumDescriptor) GetParent() Descriptor {
+	return ed.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum is defined.
+func (ed *EnumDescriptor) GetFile() *FileDescriptor {
+	return ed.file
+}
+
+// GetOptions returns the enum type's options. Most usages will be more
+// interested in GetEnumOptions, which has a concrete return type. This generic
+// version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) GetOptions() proto.Message {
+	return ed.proto.GetOptions()
+}
+
+// GetEnumOptions returns the enum type's options.
+func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+	return ed.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum type, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum type was defined and also contains comments associated with the enum
+// definition.
+func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return ed.file.sourceInfo.Get(ed.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) AsProto() proto.Message {
+	return ed.proto
+}
+
+// AsEnumDescriptorProto returns the underlying descriptor proto.
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+	return ed.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (ed *EnumDescriptor) String() string {
+	return ed.proto.String()
+}
+
+// GetValues returns all of the allowed values defined for this enum.
+func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor {
+	return ed.values
+}
+
+// FindValueByName finds the enum value with the given name. If no such value exists
+// then nil is returned.
+func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
+	fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+	if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
+		return vd
+	} else {
+		return nil
+	}
+}
+
+// FindValueByNumber finds the value with the given numeric value. If no such value
+// exists then nil is returned. If aliases are allowed and multiple values have the
+// given number, the first declared value is returned.
+func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor {
+	index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num })
+	if index < len(ed.valuesByNum) {
+		vd := ed.valuesByNum[index]
+		if vd.GetNumber() == num {
+			return vd
+		}
+	}
+	return nil
+}
+
+// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
+type EnumValueDescriptor struct {
+	proto          *dpb.EnumValueDescriptorProto
+	parent         *EnumDescriptor
+	file           *FileDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
+	valName := merge(enclosing, evd.GetName())
+	return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+}
+
+func (vd *EnumValueDescriptor) resolve(path []int32) {
+	vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the enum value.
+func (vd *EnumValueDescriptor) GetName() string {
+	return vd.proto.GetName()
+}
+
+// GetNumber returns the numeric value associated with this enum value.
+func (vd *EnumValueDescriptor) GetNumber() int32 {
+	return vd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum value.
+// Unlike GetName, this includes fully qualified name of the enclosing enum.
+func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
+	return vd.fqn
+}
+
+// GetParent returns the descriptor for the enum in which this enum value is
+// defined. Most usages will prefer to use GetEnum, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetParent() Descriptor {
+	return vd.parent
+}
+
+// GetEnum returns the enum in which this enum value is defined.
+func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor {
+	return vd.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum value is
+// defined.
+func (vd *EnumValueDescriptor) GetFile() *FileDescriptor {
+	return vd.file
+}
+
+// GetOptions returns the enum value's options. Most usages will be more
+// interested in GetEnumValueOptions, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetOptions() proto.Message {
+	return vd.proto.GetOptions()
+}
+
+// GetEnumValueOptions returns the enum value's options.
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+	return vd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum value, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum value was defined and also contains comments associated with the enum
+// value definition.
+func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return vd.file.sourceInfo.Get(vd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumValueDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) AsProto() proto.Message {
+	return vd.proto
+}
+
+// AsEnumValueDescriptorProto returns the underlying descriptor proto.
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+	return vd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (vd *EnumValueDescriptor) String() string {
+	return vd.proto.String()
+}
+
+// ServiceDescriptor describes an RPC service declared in a proto file.
+type ServiceDescriptor struct {
+	proto          *dpb.ServiceDescriptorProto
+	file           *FileDescriptor
+	methods        []*MethodDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
+	serviceName := merge(enclosing, sd.GetName())
+	ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
+	for _, m := range sd.GetMethod() {
+		md, n := createMethodDescriptor(fd, ret, serviceName, m)
+		symbols[n] = md
+		ret.methods = append(ret.methods, md)
+	}
+	return ret, serviceName
+}
+
+func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
+	sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Service_methodsTag)
+	for i, md := range sd.methods {
+		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetName returns the simple (unqualified) name of the service.
+func (sd *ServiceDescriptor) GetName() string {
+	return sd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the service. This
+// includes the package name (if there is one).
+func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
+	return sd.fqn
+}
+
+// GetParent returns the descriptor for the file in which this service is
+// defined. Most usages will prefer to use GetFile, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetParent() Descriptor {
+	return sd.file
+}
+
+// GetFile returns the descriptor for the file in which this service is defined.
+func (sd *ServiceDescriptor) GetFile() *FileDescriptor {
+	return sd.file
+}
+
+// GetOptions returns the service's options. Most usages will be more interested
+// in GetServiceOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetOptions() proto.Message {
+	return sd.proto.GetOptions()
+}
+
+// GetServiceOptions returns the service's options.
+func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+	return sd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the service, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// service was defined and also contains comments associated with the service
+// definition.
+func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return sd.file.sourceInfo.Get(sd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsServiceDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) AsProto() proto.Message {
+	return sd.proto
+}
+
+// AsServiceDescriptorProto returns the underlying descriptor proto.
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+	return sd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (sd *ServiceDescriptor) String() string {
+	return sd.proto.String()
+}
+
+// GetMethods returns all of the RPC methods for this service.
+func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor {
+	return sd.methods
+}
+
+// FindMethodByName finds the method with the given name. If no such method exists
+// then nil is returned.
+func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
+	fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+	if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
+		return md
+	} else {
+		return nil
+	}
+}
+
+// MethodDescriptor describes an RPC method declared in a proto file.
+type MethodDescriptor struct {
+	proto          *dpb.MethodDescriptorProto
+	parent         *ServiceDescriptor
+	file           *FileDescriptor
+	inType         *MessageDescriptor
+	outType        *MessageDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
+	// request and response types get resolved later
+	methodName := merge(enclosing, md.GetName())
+	return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+}
+
+func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
+	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
+		return err
+	} else {
+		md.inType = desc.(*MessageDescriptor)
+	}
+	if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+		return err
+	} else {
+		md.outType = desc.(*MessageDescriptor)
+	}
+	return nil
+}
+
+// GetName returns the name of the method.
+func (md *MethodDescriptor) GetName() string {
+	return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
+// GetName, this includes fully qualified name of the enclosing service.
+func (md *MethodDescriptor) GetFullyQualifiedName() string {
+	return md.fqn
+}
+
+// GetParent returns the descriptor for the service in which this method is
+// defined. Most usages will prefer to use GetService, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (md *MethodDescriptor) GetParent() Descriptor {
+	return md.parent
+}
+
+// GetService returns the RPC service in which this method is declared.
+func (md *MethodDescriptor) GetService() *ServiceDescriptor {
+	return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this method is defined.
+func (md *MethodDescriptor) GetFile() *FileDescriptor {
+	return md.file
+}
+
+// GetOptions returns the method's options. Most usages will be more interested
+// in GetMethodOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) GetOptions() proto.Message {
+	return md.proto.GetOptions()
+}
+
+// GetMethodOptions returns the method's options.
+func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+	return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the method, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// method was defined and also contains comments associated with the method
+// definition.
+func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsMethodDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) AsProto() proto.Message {
+	return md.proto
+}
+
+// AsMethodDescriptorProto returns the underlying descriptor proto.
+func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+	return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MethodDescriptor) String() string {
+	return md.proto.String()
+}
+
+// IsServerStreaming returns true if this is a server-streaming method.
+func (md *MethodDescriptor) IsServerStreaming() bool {
+	return md.proto.GetServerStreaming()
+}
+
+// IsClientStreaming returns true if this is a client-streaming method.
+func (md *MethodDescriptor) IsClientStreaming() bool {
+	return md.proto.GetClientStreaming()
+}
+
+// GetInputType returns the input type, or request type, of the RPC method.
+func (md *MethodDescriptor) GetInputType() *MessageDescriptor {
+	return md.inType
+}
+
+// GetOutputType returns the output type, or response type, of the RPC method.
+func (md *MethodDescriptor) GetOutputType() *MessageDescriptor {
+	return md.outType
+}
+
+// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
+type OneOfDescriptor struct {
+	proto          *dpb.OneofDescriptorProto
+	parent         *MessageDescriptor
+	file           *FileDescriptor
+	choices        []*FieldDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
+	oneOfName := merge(enclosing, od.GetName())
+	ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+	for _, f := range parent.fields {
+		oi := f.proto.OneofIndex
+		if oi != nil && *oi == int32(index) {
+			f.oneOf = ret
+			ret.choices = append(ret.choices, f)
+		}
+	}
+	return ret, oneOfName
+}
+
+func (od *OneOfDescriptor) resolve(path []int32) {
+	od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the one-of.
+func (od *OneOfDescriptor) GetName() string {
+	return od.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
+// GetName, this includes fully qualified name of the enclosing message.
+func (od *OneOfDescriptor) GetFullyQualifiedName() string {
+	return od.fqn
+}
+
+// GetParent returns the descriptor for the message in which this one-of is
+// defined. Most usages will prefer to use GetOwner, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (od *OneOfDescriptor) GetParent() Descriptor {
+	return od.parent
+}
+
+// GetOwner returns the message to which this one-of field set belongs.
+func (od *OneOfDescriptor) GetOwner() *MessageDescriptor {
+	return od.parent
+}
+
+// GetFile returns the descriptor for the file in which this one-fof is defined.
+func (od *OneOfDescriptor) GetFile() *FileDescriptor {
+	return od.file
+}
+
+// GetOptions returns the one-of's options. Most usages will be more interested
+// in GetOneOfOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) GetOptions() proto.Message {
+	return od.proto.GetOptions()
+}
+
+// GetOneOfOptions returns the one-of's options.
+func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+	return od.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the one-of, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// one-of was defined and also contains comments associated with the one-of
+// definition.
+func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return od.file.sourceInfo.Get(od.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsOneofDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) AsProto() proto.Message {
+	return od.proto
+}
+
+// AsOneofDescriptorProto returns the underlying descriptor proto.
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+	return od.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (od *OneOfDescriptor) String() string {
+	return od.proto.String()
+}
+
+// GetChoices returns the fields that are part of the one-of field set. At most one of
+// these fields may be set for a given message.
+func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor {
+	return od.choices
+}
+
+func (od *OneOfDescriptor) IsSynthetic() bool {
+	return len(od.choices) == 1 && od.choices[0].IsProto3Optional()
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(string) Descriptor
+
+func fileScope(fd *FileDescriptor) scope {
+	// we search symbols in this file, but also symbols in other files that have
+	// the same package as this file or a "parent" package (in protobuf,
+	// packages are a hierarchy like C++ namespaces)
+	prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
+	return func(name string) Descriptor {
+		for _, prefix := range prefixes {
+			n := merge(prefix, name)
+			d := findSymbol(fd, n, false)
+			if d != nil {
+				return d
+			}
+		}
+		return nil
+	}
+}
+
+func messageScope(md *MessageDescriptor) scope {
+	return func(name string) Descriptor {
+		n := merge(md.fqn, name)
+		if d, ok := md.file.symbols[n]; ok {
+			return d
+		}
+		return nil
+	}
+}
+
+func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
+	if strings.HasPrefix(name, ".") {
+		// already fully-qualified
+		d := findSymbol(fd, name[1:], false)
+		if d != nil {
+			return d, nil
+		}
+	} else {
+		// unqualified, so we look in the enclosing (last) scope first and move
+		// towards outermost (first) scope, trying to resolve the symbol
+		for i := len(scopes) - 1; i >= 0; i-- {
+			d := scopes[i](name)
+			if d != nil {
+				return d, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
+}
+
+func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
+	d := fd.symbols[name]
+	if d != nil {
+		return d
+	}
+
+	// When public = false, we are searching only directly imported symbols. But we
+	// also need to search transitive public imports due to semantics of public imports.
+	var deps []*FileDescriptor
+	if public {
+		deps = fd.publicDeps
+	} else {
+		deps = fd.deps
+	}
+	for _, dep := range deps {
+		d = findSymbol(dep, name, true)
+		if d != nil {
+			return d
+		}
+	}
+
+	return nil
+}
+
+func merge(a, b string) string {
+	if a == "" {
+		return b
+	} else {
+		return a + "." + b
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
new file mode 100644
index 0000000..25d619a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
@@ -0,0 +1,30 @@
+//go:build appengine || gopherjs || purego
+// +build appengine gopherjs purego
+
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+type jsonNameMap struct{}
+type memoizedDefault struct{}
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+	// NB: With allowed use of unsafe, we use it to atomically define an index
+	// via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index
+	// and must do a linear scan of fields each time.
+	for _, f := range md.fields {
+		jn := f.GetJSONName()
+		if jn == jsonName {
+			return f
+		}
+	}
+	return nil
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+	return fd.determineDefault()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
new file mode 100644
index 0000000..691f0d8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
@@ -0,0 +1,59 @@
+//go:build !appengine && !gopherjs && !purego
+// +build !appengine,!gopherjs,!purego
+
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe
+type memoizedDefault *interface{}            // loaded/stored atomically via atomic+unsafe
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+	// NB: We don't want to eagerly index JSON names because many programs won't use it.
+	// So we want to do it lazily, but also make sure the result is thread-safe. So we
+	// atomically load/store the map as if it were a normal pointer. We don't use other
+	// mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to
+	// do this lazily because those types cannot be copied, and we'd rather not induce
+	// 'go vet' errors in programs that use descriptors and try to copy them.
+	// If multiple goroutines try to access the index at the same time, before it is
+	// built, they will all end up computing the index redundantly. Future reads of
+	// the index will use whatever was the "last one stored" by those racing goroutines.
+	// Since building the index is deterministic, this is fine: all indices computed
+	// will be the same.
+	addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames))
+	jsonNames := atomic.LoadPointer(addrOfJsonNames)
+	var index map[string]*FieldDescriptor
+	if jsonNames == nil {
+		// slow path: compute the index
+		index = map[string]*FieldDescriptor{}
+		for _, f := range md.fields {
+			jn := f.GetJSONName()
+			index[jn] = f
+		}
+		atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index)))
+	} else {
+		*(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames
+	}
+	return index[jsonName]
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+	addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def))
+	def := atomic.LoadPointer(addrOfDef)
+	if def != nil {
+		return *(*interface{})(def)
+	}
+	// slow path: compute the default, potentially involves decoding value
+	d := fd.determineDefault()
+	atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d)))
+	return d
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
new file mode 100644
index 0000000..1740dce
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -0,0 +1,41 @@
+// Package desc contains "rich descriptors" for protocol buffers. The built-in
+// descriptor types are simple protobuf messages, each one representing a
+// different kind of element in the AST of a .proto source file.
+//
+// Because of this inherent "tree" quality, these build-in descriptors cannot
+// refer to their enclosing file descriptor. Nor can a field descriptor refer to
+// a message or enum descriptor that represents the field's type (for enum and
+// nested message fields). All such links must instead be stringly typed. This
+// limitation makes them much harder to use for doing interesting things with
+// reflection.
+//
+// Without this package, resolving references to types is particularly complex.
+// For example, resolving a field's type, the message type an extension extends,
+// or the request and response types of an RPC method all require searching
+// through symbols defined not only in the file in which these elements are
+// declared but also in its transitive closure of dependencies.
+//
+// "Rich descriptors" avoid the need to deal with the complexities described
+// above. A rich descriptor has all type references resolved and provides
+// methods to access other rich descriptors for all referenced elements. Each
+// rich descriptor has a usefully broad API, but does not try to mimic the full
+// interface of the underlying descriptor proto. Instead, every rich descriptor
+// provides access to that underlying proto, for extracting descriptor
+// properties that are not immediately accessible through rich descriptor's
+// methods.
+//
+// Rich descriptors can be accessed in similar ways as their "poor" cousins
+// (descriptor protos). Instead of using proto.FileDescriptor, use
+// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
+// can also be easily accessed using desc.LoadMessageDescriptor and
+// desc.LoadFieldDescriptorForExtension, respectively.
+//
+// It is also possible create rich descriptors for proto messages that a given
+// Go program doesn't even know about. For example, they could be loaded from a
+// FileDescriptorSet file (which can be generated by protoc) or loaded from a
+// server. This enables interesting things like dynamic clients: where a Go
+// program can be an RPC client of a service it wasn't compiled to know about.
+//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
new file mode 100644
index 0000000..ab93032
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -0,0 +1,313 @@
+package desc
+
+import (
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+var (
+	globalImportPathConf map[string]string
+	globalImportPathMu   sync.RWMutex
+)
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path. For more details on why alternate import paths may need to
+// be configured, see ImportResolver.
+//
+// This method panics if provided invalid input. An empty importPath is invalid.
+// An un-registered registerPath is also invalid. For example, if an attempt is
+// made to register the import path "foo/bar.proto" as "bar.proto", but there is
+// no "bar.proto" registered in the Go protobuf runtime, this method will panic.
+// This method also panics if an attempt is made to register the same import
+// path more than once.
+//
+// This function works globally, applying to all descriptors loaded by this
+// package. If you instead want more granular support for handling alternate
+// import paths -- such as for a single invocation of a function in this
+// package or when the alternate path is only used from one file (so you don't
+// want the alternate path used when loading every other file), use an
+// ImportResolver instead.
+func RegisterImportPath(registerPath, importPath string) {
+	if len(importPath) == 0 {
+		panic("import path cannot be empty")
+	}
+	desc := proto.FileDescriptor(registerPath)
+	if len(desc) == 0 {
+		panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
+	}
+	globalImportPathMu.Lock()
+	defer globalImportPathMu.Unlock()
+	if reg := globalImportPathConf[importPath]; reg != "" {
+		panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg))
+	}
+	if globalImportPathConf == nil {
+		globalImportPathConf = map[string]string{}
+	}
+	globalImportPathConf[importPath] = registerPath
+}
+
+// ResolveImport resolves the given import path. If it has been registered as an
+// alternate via RegisterImportPath, the registered path is returned. Otherwise,
+// the given import path is returned unchanged.
+func ResolveImport(importPath string) string {
+	importPath = clean(importPath)
+	globalImportPathMu.RLock()
+	defer globalImportPathMu.RUnlock()
+	reg := globalImportPathConf[importPath]
+	if reg == "" {
+		return importPath
+	}
+	return reg
+}
+
+// ImportResolver lets you work-around linking issues that are caused by
+// mismatches between how a particular proto source file is registered in the Go
+// protobuf runtime and how that same file is imported by other files. The file
+// is registered using the same relative path given to protoc when the file is
+// compiled (i.e. when Go code is generated). So if any file tries to import
+// that source file, but using a different relative path, then a link error will
+// occur when this package tries to load a descriptor for the importing file.
+//
+// For example, let's say we have two proto source files: "foo/bar.proto" and
+// "fubar/baz.proto". The latter imports the former using a line like so:
+//    import "foo/bar.proto";
+// However, when protoc is invoked, the command-line args looks like so:
+//    protoc -Ifoo/ --go_out=foo/ bar.proto
+//    protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
+// how they are registered in the Go protobuf runtime. So, when loading the
+// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
+// but will find no file registered with that path:
+//    fd, err := desc.LoadFileDescriptor("baz.proto")
+//    // err will be non-nil, complaining that there is no such file
+//    // found named "foo/bar.proto"
+//
+// This can be remedied by registering alternate import paths using an
+// ImportResolver. Continuing with the example above, the code below would fix
+// any link issue:
+//    var r desc.ImportResolver
+//    r.RegisterImportPath("bar.proto", "foo/bar.proto")
+//    fd, err := r.LoadFileDescriptor("baz.proto")
+//    // err will be nil; descriptor successfully loaded!
+//
+// If there are files that are *always* imported using a different relative
+// path then how they are registered, consider using the global
+// RegisterImportPath function, so you don't have to use an ImportResolver for
+// every file that imports it.
+type ImportResolver struct {
+	children    map[string]*ImportResolver
+	importPaths map[string]string
+
+	// By default, an ImportResolver will fallback to consulting any paths
+	// registered via the top-level RegisterImportPath function. Setting this
+	// field to true will cause the ImportResolver to skip that fallback and
+	// only examine its own locally registered paths.
+	SkipFallbackRules bool
+}
+
+// ResolveImport resolves the given import path in the context of the given
+// source file. If a matching alternate has been registered with this resolver
+// via a call to RegisterImportPath or RegisterImportPathFrom, then the
+// registered path is returned. Otherwise, the given import path is returned
+// unchanged.
+func (r *ImportResolver) ResolveImport(source, importPath string) string {
+	if r != nil {
+		res := r.resolveImport(clean(source), clean(importPath))
+		if res != "" {
+			return res
+		}
+		if r.SkipFallbackRules {
+			return importPath
+		}
+	}
+	return ResolveImport(importPath)
+}
+
+func (r *ImportResolver) resolveImport(source, importPath string) string {
+	if source == "" {
+		return r.importPaths[importPath]
+	}
+	var car, cdr string
+	idx := strings.IndexRune(source, filepath.Separator)
+	if idx < 0 {
+		car, cdr = source, ""
+	} else {
+		car, cdr = source[:idx], source[idx+1:]
+	}
+	ch := r.children[car]
+	if ch != nil {
+		if reg := ch.resolveImport(cdr, importPath); reg != "" {
+			return reg
+		}
+	}
+	return r.importPaths[importPath]
+}
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path with this resolver. Any appearance of the given import path
+// when linking files will instead try to link the given registered path. If the
+// registered path cannot be located, then linking will fallback to the actual
+// imported path.
+//
+// This method will panic if given an empty path or if the same import path is
+// registered more than once.
+//
+// To constrain the contexts where the given import path is to be re-written,
+// use RegisterImportPathFrom instead.
+func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) {
+	r.RegisterImportPathFrom(registerPath, importPath, "")
+}
+
+// RegisterImportPathFrom registers an alternate import path for a given
+// registered proto file path with this resolver, but only for imports in the
+// specified source context.
+//
+// The source context can be the name of a folder or a proto source file. Any
+// appearance of the given import path in that context will instead try to link
+// the given registered path. To be in context, the file that is being linked
+// (i.e. the one whose import statement is being resolved) must be the same
+// relative path of the source context or be a sub-path (i.e. a descendant of
+// the source folder).
+//
+// If the registered path cannot be located, then linking will fallback to the
+// actual imported path.
+//
+// This method will panic if given an empty path. The source context, on the
+// other hand, is allowed to be blank. A blank source matches all files. This
+// method also panics if the same import path is registered in the same source
+// context more than once.
+func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) {
+	importPath = clean(importPath)
+	if len(importPath) == 0 {
+		panic("import path cannot be empty")
+	}
+	registerPath = clean(registerPath)
+	if len(registerPath) == 0 {
+		panic("registered path cannot be empty")
+	}
+	r.registerImportPathFrom(registerPath, importPath, clean(source))
+}
+
+func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) {
+	if source == "" {
+		if r.importPaths == nil {
+			r.importPaths = map[string]string{}
+		} else if reg := r.importPaths[importPath]; reg != "" {
+			panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath))
+		}
+		r.importPaths[importPath] = registerPath
+		return
+	}
+	var car, cdr string
+	idx := strings.IndexRune(source, filepath.Separator)
+	if idx < 0 {
+		car, cdr = source, ""
+	} else {
+		car, cdr = source[:idx], source[idx+1:]
+	}
+	ch := r.children[car]
+	if ch == nil {
+		if r.children == nil {
+			r.children = map[string]*ImportResolver{}
+		}
+		ch = &ImportResolver{}
+		r.children[car] = ch
+	}
+	ch.registerImportPathFrom(registerPath, importPath, cdr)
+}
+
+// LoadFileDescriptor is the same as the package function of the same name, but
+// any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
+	return loadFileDescriptor(filePath, r)
+}
+
+// LoadMessageDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking
+// files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
+	return loadMessageDescriptor(msgName, r)
+}
+
+// LoadMessageDescriptorForMessage is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForMessage(msg, r)
+}
+
+// LoadMessageDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForType(msgType, r)
+}
+
+// LoadEnumDescriptorForEnum is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForEnum(enum, r)
+}
+
+// LoadEnumDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForType(enumType, r)
+}
+
+// LoadFieldDescriptorForExtension is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+	return loadFieldDescriptorForExtension(ext, r)
+}
+
+// CreateFileDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+	return createFileDescriptor(fdp, deps, r)
+}
+
+// CreateFileDescriptors is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor protos.
+func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+	return createFileDescriptors(fds, r)
+}
+
+// CreateFileDescriptorFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+	return createFileDescriptorFromSet(fds, r)
+}
+
+// CreateFileDescriptorsFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+	return createFileDescriptorsFromSet(fds, r)
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+
+func clean(path string) string {
+	if path == "" {
+		return ""
+	}
+	path = filepath.Clean(path)
+	if path == "." {
+		return ""
+	}
+	return strings.TrimPrefix(path, dotPrefix)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
new file mode 100644
index 0000000..9aa4a3e
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
@@ -0,0 +1,120 @@
+package internal
+
+import (
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/jhump/protoreflect/internal/codec"
+	"reflect"
+	"strings"
+
+	"github.com/jhump/protoreflect/internal"
+)
+
+// NB: We use reflection or unknown fields in case we are linked against an older
+// version of the proto runtime which does not know about the proto3_optional field.
+// We don't require linking with newer version (which would greatly simplify this)
+// because that means pulling in v1.4+ of the protobuf runtime, which has some
+// compatibility issues. (We'll be nice to users and not require they upgrade to
+// that latest runtime to upgrade to newer protoreflect.)
+
+func GetProto3Optional(fd *dpb.FieldDescriptorProto) bool {
+	type newerFieldDesc interface {
+		GetProto3Optional() bool
+	}
+	var pm proto.Message = fd
+	if fd, ok := pm.(newerFieldDesc); ok {
+		return fd.GetProto3Optional()
+	}
+
+	// Field does not exist, so we have to examine unknown fields
+	// (we just silently bail if we have problems parsing them)
+	unk := internal.GetUnrecognized(pm)
+	buf := codec.NewBuffer(unk)
+	for {
+		tag, wt, err := buf.DecodeTagAndWireType()
+		if err != nil {
+			return false
+		}
+		if tag == Field_proto3OptionalTag && wt == proto.WireVarint {
+			v, _ := buf.DecodeVarint()
+			return v != 0
+		}
+		if err := buf.SkipField(wt); err != nil {
+			return false
+		}
+	}
+}
+
+func SetProto3Optional(fd *dpb.FieldDescriptorProto) {
+	rv := reflect.ValueOf(fd).Elem()
+	fld := rv.FieldByName("Proto3Optional")
+	if fld.IsValid() {
+		fld.Set(reflect.ValueOf(proto.Bool(true)))
+		return
+	}
+
+	// Field does not exist, so we have to store as unknown field.
+	var buf codec.Buffer
+	if err := buf.EncodeTagAndWireType(Field_proto3OptionalTag, proto.WireVarint); err != nil {
+		// TODO: panic? log?
+		return
+	}
+	if err := buf.EncodeVarint(1); err != nil {
+		// TODO: panic? log?
+		return
+	}
+	internal.SetUnrecognized(fd, buf.Bytes())
+}
+
+// ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor
+// for each proto3 optional field. It also updates the fields to have the correct
+// oneof index reference.
+func ProcessProto3OptionalFields(msgd *dpb.DescriptorProto) {
+	var allNames map[string]struct{}
+	for _, fd := range msgd.Field {
+		if GetProto3Optional(fd) {
+			// lazy init the set of all names
+			if allNames == nil {
+				allNames = map[string]struct{}{}
+				for _, fd := range msgd.Field {
+					allNames[fd.GetName()] = struct{}{}
+				}
+				for _, fd := range msgd.Extension {
+					allNames[fd.GetName()] = struct{}{}
+				}
+				for _, ed := range msgd.EnumType {
+					allNames[ed.GetName()] = struct{}{}
+					for _, evd := range ed.Value {
+						allNames[evd.GetName()] = struct{}{}
+					}
+				}
+				for _, fd := range msgd.NestedType {
+					allNames[fd.GetName()] = struct{}{}
+				}
+				for _, n := range msgd.ReservedName {
+					allNames[n] = struct{}{}
+				}
+			}
+
+			// Compute a name for the synthetic oneof. This uses the same
+			// algorithm as used in protoc:
+			//  https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803
+			ooName := fd.GetName()
+			if !strings.HasPrefix(ooName, "_") {
+				ooName = "_" + ooName
+			}
+			for {
+				_, ok := allNames[ooName]
+				if !ok {
+					// found a unique name
+					allNames[ooName] = struct{}{}
+					break
+				}
+				ooName = "X" + ooName
+			}
+
+			fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl)))
+			msgd.OneofDecl = append(msgd.OneofDecl, &dpb.OneofDescriptorProto{Name: proto.String(ooName)})
+		}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
new file mode 100644
index 0000000..b4150b8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -0,0 +1,107 @@
+package internal
+
+import (
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// SourceInfoMap is a map of paths in a descriptor to the corresponding source
+// code info.
+type SourceInfoMap map[string][]*dpb.SourceCodeInfo_Location
+
+// Get returns the source code info for the given path. If there are
+// multiple locations for the same path, the first one is returned.
+func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+	v := m[asMapKey(path)]
+	if len(v) > 0 {
+		return v[0]
+	}
+	return nil
+}
+
+// GetAll returns all source code info for the given path.
+func (m SourceInfoMap) GetAll(path []int32) []*dpb.SourceCodeInfo_Location {
+	return m[asMapKey(path)]
+}
+
+// Add stores the given source code info for the given path.
+func (m SourceInfoMap) Add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+	m[asMapKey(path)] = append(m[asMapKey(path)], loc)
+}
+
+// PutIfAbsent stores the given source code info for the given path only if the
+// given path does not exist in the map. This method returns true when the value
+// is stored, false if the path already exists.
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+	k := asMapKey(path)
+	if _, ok := m[k]; ok {
+		return false
+	}
+	m[k] = []*dpb.SourceCodeInfo_Location{loc}
+	return true
+}
+
+func asMapKey(slice []int32) string {
+	// NB: arrays should be usable as map keys, but this does not
+	// work due to a bug: https://github.com/golang/go/issues/22605
+	//rv := reflect.ValueOf(slice)
+	//arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem())
+	//array := reflect.New(arrayType).Elem()
+	//reflect.Copy(array, rv)
+	//return array.Interface()
+
+	b := make([]byte, len(slice)*4)
+	j := 0
+	for _, s := range slice {
+		b[j] = byte(s)
+		b[j+1] = byte(s >> 8)
+		b[j+2] = byte(s >> 16)
+		b[j+3] = byte(s >> 24)
+		j += 4
+	}
+	return string(b)
+}
+
+// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
+// source code info in the given file descriptor proto.
+func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+	res := SourceInfoMap{}
+	PopulateSourceInfoMap(fd, res)
+	return res
+}
+
+// PopulateSourceInfoMap populates the given SourceInfoMap with information from
+// the given file descriptor.
+func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+	for _, l := range fd.GetSourceCodeInfo().GetLocation() {
+		m.Add(l.Path, l)
+	}
+}
+
+// NB: This wonkiness allows desc.Descriptor impl to implement an interface that
+// is only usable from this package, by embedding a SourceInfoComputeFunc that
+// implements the actual logic (which must live in desc package to avoid a
+// dependency cycle).
+
+// SourceInfoComputer is a single method which will be invoked to recompute
+// source info. This is needed for the protoparse package, which needs to link
+// descriptors without source info in order to interpret options, but then needs
+// to re-compute source info after that interpretation so that final linked
+// descriptors expose the right info.
+type SourceInfoComputer interface {
+	recomputeSourceInfo()
+}
+
+// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will
+// be aliased in the desc package to an unexported name so it is not marked as
+// an exported field in reflection and not present in Go docs.
+type SourceInfoComputeFunc func()
+
+func (f SourceInfoComputeFunc) recomputeSourceInfo() {
+	f()
+}
+
+// RecomputeSourceInfo is used to initiate recomputation of source info. This is
+// is used by the protoparse package, after it interprets options.
+func RecomputeSourceInfo(c SourceInfoComputer) {
+	c.recomputeSourceInfo()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
new file mode 100644
index 0000000..71855bf
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -0,0 +1,291 @@
+package internal
+
+import (
+	"math"
+	"unicode"
+	"unicode/utf8"
+)
+
+const (
+	// MaxNormalTag is the maximum allowed tag number for a field in a normal message.
+	MaxNormalTag = 536870911 // 2^29 - 1
+
+	// MaxMessageSetTag is the maximum allowed tag number of a field in a message that
+	// uses the message set wire format.
+	MaxMessageSetTag = math.MaxInt32 - 1
+
+	// MaxTag is the maximum allowed tag number. (It is the same as MaxMessageSetTag
+	// since that is the absolute highest allowed.)
+	MaxTag = MaxMessageSetTag
+
+	// SpecialReservedStart is the first tag in a range that is reserved and not
+	// allowed for use in message definitions.
+	SpecialReservedStart = 19000
+	// SpecialReservedEnd is the last tag in a range that is reserved and not
+	// allowed for use in message definitions.
+	SpecialReservedEnd = 19999
+
+	// NB: It would be nice to use constants from generated code instead of
+	// hard-coding these here. But code-gen does not emit these as constants
+	// anywhere. The only places they appear in generated code are struct tags
+	// on fields of the generated descriptor protos.
+
+	// File_packageTag is the tag number of the package element in a file
+	// descriptor proto.
+	File_packageTag = 2
+	// File_dependencyTag is the tag number of the dependencies element in a
+	// file descriptor proto.
+	File_dependencyTag = 3
+	// File_messagesTag is the tag number of the messages element in a file
+	// descriptor proto.
+	File_messagesTag = 4
+	// File_enumsTag is the tag number of the enums element in a file descriptor
+	// proto.
+	File_enumsTag = 5
+	// File_servicesTag is the tag number of the services element in a file
+	// descriptor proto.
+	File_servicesTag = 6
+	// File_extensionsTag is the tag number of the extensions element in a file
+	// descriptor proto.
+	File_extensionsTag = 7
+	// File_optionsTag is the tag number of the options element in a file
+	// descriptor proto.
+	File_optionsTag = 8
+	// File_syntaxTag is the tag number of the syntax element in a file
+	// descriptor proto.
+	File_syntaxTag = 12
+	// Message_nameTag is the tag number of the name element in a message
+	// descriptor proto.
+	Message_nameTag = 1
+	// Message_fieldsTag is the tag number of the fields element in a message
+	// descriptor proto.
+	Message_fieldsTag = 2
+	// Message_nestedMessagesTag is the tag number of the nested messages
+	// element in a message descriptor proto.
+	Message_nestedMessagesTag = 3
+	// Message_enumsTag is the tag number of the enums element in a message
+	// descriptor proto.
+	Message_enumsTag = 4
+	// Message_extensionRangeTag is the tag number of the extension ranges
+	// element in a message descriptor proto.
+	Message_extensionRangeTag = 5
+	// Message_extensionsTag is the tag number of the extensions element in a
+	// message descriptor proto.
+	Message_extensionsTag = 6
+	// Message_optionsTag is the tag number of the options element in a message
+	// descriptor proto.
+	Message_optionsTag = 7
+	// Message_oneOfsTag is the tag number of the one-ofs element in a message
+	// descriptor proto.
+	Message_oneOfsTag = 8
+	// Message_reservedRangeTag is the tag number of the reserved ranges element
+	// in a message descriptor proto.
+	Message_reservedRangeTag = 9
+	// Message_reservedNameTag is the tag number of the reserved names element
+	// in a message descriptor proto.
+	Message_reservedNameTag = 10
+	// ExtensionRange_startTag is the tag number of the start index in an
+	// extension range proto.
+	ExtensionRange_startTag = 1
+	// ExtensionRange_endTag is the tag number of the end index in an
+	// extension range proto.
+	ExtensionRange_endTag = 2
+	// ExtensionRange_optionsTag is the tag number of the options element in an
+	// extension range proto.
+	ExtensionRange_optionsTag = 3
+	// ReservedRange_startTag is the tag number of the start index in a reserved
+	// range proto.
+	ReservedRange_startTag = 1
+	// ReservedRange_endTag is the tag number of the end index in a reserved
+	// range proto.
+	ReservedRange_endTag = 2
+	// Field_nameTag is the tag number of the name element in a field descriptor
+	// proto.
+	Field_nameTag = 1
+	// Field_extendeeTag is the tag number of the extendee element in a field
+	// descriptor proto.
+	Field_extendeeTag = 2
+	// Field_numberTag is the tag number of the number element in a field
+	// descriptor proto.
+	Field_numberTag = 3
+	// Field_labelTag is the tag number of the label element in a field
+	// descriptor proto.
+	Field_labelTag = 4
+	// Field_typeTag is the tag number of the type element in a field descriptor
+	// proto.
+	Field_typeTag = 5
+	// Field_typeNameTag is the tag number of the type name element in a field
+	// descriptor proto.
+	Field_typeNameTag = 6
+	// Field_defaultTag is the tag number of the default value element in a
+	// field descriptor proto.
+	Field_defaultTag = 7
+	// Field_optionsTag is the tag number of the options element in a field
+	// descriptor proto.
+	Field_optionsTag = 8
+	// Field_jsonNameTag is the tag number of the JSON name element in a field
+	// descriptor proto.
+	Field_jsonNameTag = 10
+	// Field_proto3OptionalTag is the tag number of the proto3_optional element
+	// in a descriptor proto.
+	Field_proto3OptionalTag = 17
+	// OneOf_nameTag is the tag number of the name element in a one-of
+	// descriptor proto.
+	OneOf_nameTag = 1
+	// OneOf_optionsTag is the tag number of the options element in a one-of
+	// descriptor proto.
+	OneOf_optionsTag = 2
+	// Enum_nameTag is the tag number of the name element in an enum descriptor
+	// proto.
+	Enum_nameTag = 1
+	// Enum_valuesTag is the tag number of the values element in an enum
+	// descriptor proto.
+	Enum_valuesTag = 2
+	// Enum_optionsTag is the tag number of the options element in an enum
+	// descriptor proto.
+	Enum_optionsTag = 3
+	// Enum_reservedRangeTag is the tag number of the reserved ranges element in
+	// an enum descriptor proto.
+	Enum_reservedRangeTag = 4
+	// Enum_reservedNameTag is the tag number of the reserved names element in
+	// an enum descriptor proto.
+	Enum_reservedNameTag = 5
+	// EnumVal_nameTag is the tag number of the name element in an enum value
+	// descriptor proto.
+	EnumVal_nameTag = 1
+	// EnumVal_numberTag is the tag number of the number element in an enum
+	// value descriptor proto.
+	EnumVal_numberTag = 2
+	// EnumVal_optionsTag is the tag number of the options element in an enum
+	// value descriptor proto.
+	EnumVal_optionsTag = 3
+	// Service_nameTag is the tag number of the name element in a service
+	// descriptor proto.
+	Service_nameTag = 1
+	// Service_methodsTag is the tag number of the methods element in a service
+	// descriptor proto.
+	Service_methodsTag = 2
+	// Service_optionsTag is the tag number of the options element in a service
+	// descriptor proto.
+	Service_optionsTag = 3
+	// Method_nameTag is the tag number of the name element in a method
+	// descriptor proto.
+	Method_nameTag = 1
+	// Method_inputTag is the tag number of the input type element in a method
+	// descriptor proto.
+	Method_inputTag = 2
+	// Method_outputTag is the tag number of the output type element in a method
+	// descriptor proto.
+	Method_outputTag = 3
+	// Method_optionsTag is the tag number of the options element in a method
+	// descriptor proto.
+	Method_optionsTag = 4
+	// Method_inputStreamTag is the tag number of the input stream flag in a
+	// method descriptor proto.
+	Method_inputStreamTag = 5
+	// Method_outputStreamTag is the tag number of the output stream flag in a
+	// method descriptor proto.
+	Method_outputStreamTag = 6
+
+	// UninterpretedOptionsTag is the tag number of the uninterpreted options
+	// element. All *Options messages use the same tag for the field that stores
+	// uninterpreted options.
+	UninterpretedOptionsTag = 999
+
+	// Uninterpreted_nameTag is the tag number of the name element in an
+	// uninterpreted options proto.
+	Uninterpreted_nameTag = 2
+	// Uninterpreted_identTag is the tag number of the identifier value in an
+	// uninterpreted options proto.
+	Uninterpreted_identTag = 3
+	// Uninterpreted_posIntTag is the tag number of the positive int value in an
+	// uninterpreted options proto.
+	Uninterpreted_posIntTag = 4
+	// Uninterpreted_negIntTag is the tag number of the negative int value in an
+	// uninterpreted options proto.
+	Uninterpreted_negIntTag = 5
+	// Uninterpreted_doubleTag is the tag number of the double value in an
+	// uninterpreted options proto.
+	Uninterpreted_doubleTag = 6
+	// Uninterpreted_stringTag is the tag number of the string value in an
+	// uninterpreted options proto.
+	Uninterpreted_stringTag = 7
+	// Uninterpreted_aggregateTag is the tag number of the aggregate value in an
+	// uninterpreted options proto.
+	Uninterpreted_aggregateTag = 8
+	// UninterpretedName_nameTag is the tag number of the name element in an
+	// uninterpreted option name proto.
+	UninterpretedName_nameTag = 1
+)
+
+// JsonName returns the default JSON name for a field with the given name.
+func JsonName(name string) string {
+	var js []rune
+	nextUpper := false
+	for i, r := range name {
+		if r == '_' {
+			nextUpper = true
+			continue
+		}
+		if i == 0 {
+			js = append(js, r)
+		} else if nextUpper {
+			nextUpper = false
+			js = append(js, unicode.ToUpper(r))
+		} else {
+			js = append(js, r)
+		}
+	}
+	return string(js)
+}
+
+// InitCap returns the given field name, but with the first letter capitalized.
+func InitCap(name string) string {
+	r, sz := utf8.DecodeRuneInString(name)
+	return string(unicode.ToUpper(r)) + name[sz:]
+}
+
+// CreatePrefixList returns a list of package prefixes to search when resolving
+// a symbol name. If the given package is blank, it returns only the empty
+// string. If the given package contains only one token, e.g. "foo", it returns
+// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
+// successively shorter prefixes of the package and then the empty string. For
+// example, for a package named "foo.bar.baz" it will return the following list:
+//   ["foo.bar.baz", "foo.bar", "foo", ""]
+func CreatePrefixList(pkg string) []string {
+	if pkg == "" {
+		return []string{""}
+	}
+
+	numDots := 0
+	// one pass to pre-allocate the returned slice
+	for i := 0; i < len(pkg); i++ {
+		if pkg[i] == '.' {
+			numDots++
+		}
+	}
+	if numDots == 0 {
+		return []string{pkg, ""}
+	}
+
+	prefixes := make([]string, numDots+2)
+	// second pass to fill in returned slice
+	for i := 0; i < len(pkg); i++ {
+		if pkg[i] == '.' {
+			prefixes[numDots] = pkg[:i]
+			numDots--
+		}
+	}
+	prefixes[0] = pkg
+
+	return prefixes
+}
+
+// GetMaxTag returns the max tag number allowed, based on whether a message uses
+// message set wire format or not.
+func GetMaxTag(isMessageSet bool) int32 {
+	if isMessageSet {
+		return MaxMessageSetTag
+	}
+	return MaxNormalTag
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
new file mode 100644
index 0000000..4a05830
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -0,0 +1,341 @@
+package desc
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/internal"
+)
+
+var (
+	cacheMu       sync.RWMutex
+	filesCache    = map[string]*FileDescriptor{}
+	messagesCache = map[string]*MessageDescriptor{}
+	enumCache     = map[reflect.Type]*EnumDescriptor{}
+)
+
+// LoadFileDescriptor creates a file descriptor using the bytes returned by
+// proto.FileDescriptor. Descriptors are cached so that they do not need to be
+// re-processed if the same file is fetched again later.
+func LoadFileDescriptor(file string) (*FileDescriptor, error) {
+	return loadFileDescriptor(file, nil)
+}
+
+func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
+	f := getFileFromCache(file)
+	if f != nil {
+		return f, nil
+	}
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadFileDescriptorLocked(file, r)
+}
+
+func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
+	f := filesCache[file]
+	if f != nil {
+		return f, nil
+	}
+	fd, err := internal.LoadFileDescriptor(file)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err = toFileDescriptorLocked(fd, r)
+	if err != nil {
+		return nil, err
+	}
+	putCacheLocked(file, f)
+	return f, nil
+}
+
+func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
+	deps := make([]*FileDescriptor, len(fd.GetDependency()))
+	for i, dep := range fd.GetDependency() {
+		resolvedDep := r.ResolveImport(fd.GetName(), dep)
+		var err error
+		deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
+		if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
+			// try original path
+			deps[i], err = loadFileDescriptorLocked(dep, r)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	return CreateFileDescriptor(fd, deps...)
+}
+
+func getFileFromCache(file string) *FileDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return filesCache[file]
+}
+
+func putCacheLocked(filename string, fd *FileDescriptor) {
+	filesCache[filename] = fd
+	putMessageCacheLocked(fd.messages)
+}
+
+func putMessageCacheLocked(mds []*MessageDescriptor) {
+	for _, md := range mds {
+		messagesCache[md.fqn] = md
+		putMessageCacheLocked(md.nested)
+	}
+}
+
+// interface implemented by generated messages, which all have a Descriptor() method in
+// addition to the methods of proto.Message
+type protoMessage interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
+// Message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
+	return loadMessageDescriptor(message, nil)
+}
+
+func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
+	m := getMessageFromCache(message)
+	if m != nil {
+		return m, nil
+	}
+
+	pt := proto.MessageType(message)
+	if pt == nil {
+		return nil, nil
+	}
+	msg, err := messageFromType(pt)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadMessageDescriptorForTypeLocked(message, msg, r)
+}
+
+// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForType(messageType, nil)
+}
+
+func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
+	m, err := messageFromType(messageType)
+	if err != nil {
+		return nil, err
+	}
+	return loadMessageDescriptorForMessage(m, r)
+}
+
+// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
+// returned by message.Descriptor(). If the given type is not recognized, then a nil
+// descriptor is returned.
+func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForMessage(message, nil)
+}
+
+func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
+	// efficiently handle dynamic messages
+	type descriptorable interface {
+		GetMessageDescriptor() *MessageDescriptor
+	}
+	if d, ok := message.(descriptorable); ok {
+		return d.GetMessageDescriptor(), nil
+	}
+
+	name := proto.MessageName(message)
+	if name == "" {
+		return nil, nil
+	}
+	m := getMessageFromCache(name)
+	if m != nil {
+		return m, nil
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+}
+
+func messageFromType(mt reflect.Type) (protoMessage, error) {
+	if mt.Kind() != reflect.Ptr {
+		mt = reflect.PtrTo(mt)
+	}
+	m, ok := reflect.Zero(mt).Interface().(protoMessage)
+	if !ok {
+		return nil, fmt.Errorf("failed to create message from type: %v", mt)
+	}
+	return m, nil
+}
+
+func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
+	m := messagesCache[name]
+	if m != nil {
+		return m, nil
+	}
+
+	fdb, _ := message.Descriptor()
+	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err := toFileDescriptorLocked(fd, r)
+	if err != nil {
+		return nil, err
+	}
+	putCacheLocked(fd.GetName(), f)
+	return f.FindSymbol(name).(*MessageDescriptor), nil
+}
+
+func getMessageFromCache(message string) *MessageDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return messagesCache[message]
+}
+
+// interface implemented by all generated enums
+type protoEnum interface {
+	EnumDescriptor() ([]byte, []int)
+}
+
+// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because
+// it is not useful since protoc-gen-go does not expose the name anywhere in generated
+// code or register it in a way that is it accessible for reflection code. This also
+// means we have to cache enum descriptors differently -- we can only cache them as
+// they are requested, as opposed to caching all enum types whenever a file descriptor
+// is cached. This is because we need to know the generated type of the enums, and we
+// don't know that at the time of caching file descriptors.
+
+// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by enum.EnumDescriptor() for the given enum type.
+func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForType(enumType, nil)
+}
+
+func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
+	// we cache descriptors using non-pointer type
+	if enumType.Kind() == reflect.Ptr {
+		enumType = enumType.Elem()
+	}
+	e := getEnumFromCache(enumType)
+	if e != nil {
+		return e, nil
+	}
+	enum, err := enumFromType(enumType)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+}
+
+// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
+// returned by enum.EnumDescriptor().
+func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForEnum(enum, nil)
+}
+
+func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+	et := reflect.TypeOf(enum)
+	// we cache descriptors using non-pointer type
+	if et.Kind() == reflect.Ptr {
+		et = et.Elem()
+		enum = reflect.Zero(et).Interface().(protoEnum)
+	}
+	e := getEnumFromCache(et)
+	if e != nil {
+		return e, nil
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadEnumDescriptorForTypeLocked(et, enum, r)
+}
+
+func enumFromType(et reflect.Type) (protoEnum, error) {
+	if et.Kind() != reflect.Int32 {
+		et = reflect.PtrTo(et)
+	}
+	e, ok := reflect.Zero(et).Interface().(protoEnum)
+	if !ok {
+		return nil, fmt.Errorf("failed to create enum from type: %v", et)
+	}
+	return e, nil
+}
+
+func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+	e := enumCache[et]
+	if e != nil {
+		return e, nil
+	}
+
+	fdb, path := enum.EnumDescriptor()
+	name := fmt.Sprintf("%v", et)
+	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	if err != nil {
+		return nil, err
+	}
+	// see if we already have cached "rich" descriptor
+	f, ok := filesCache[fd.GetName()]
+	if !ok {
+		f, err = toFileDescriptorLocked(fd, r)
+		if err != nil {
+			return nil, err
+		}
+		putCacheLocked(fd.GetName(), f)
+	}
+
+	ed := findEnum(f, path)
+	enumCache[et] = ed
+	return ed, nil
+}
+
+func getEnumFromCache(et reflect.Type) *EnumDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return enumCache[et]
+}
+
+func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
+	if len(path) == 1 {
+		return fd.GetEnumTypes()[path[0]]
+	}
+	md := fd.GetMessageTypes()[path[0]]
+	for _, i := range path[1 : len(path)-1] {
+		md = md.GetNestedMessageTypes()[i]
+	}
+	return md.GetNestedEnumTypes()[path[len(path)-1]]
+}
+
+// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
+// extension description.
+func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+	return loadFieldDescriptorForExtension(ext, nil)
+}
+
+func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
+	file, err := loadFileDescriptor(ext.Filename, r)
+	if err != nil {
+		return nil, err
+	}
+	field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor)
+	// make sure descriptor agrees with attributes of the ExtensionDesc
+	if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) ||
+		field.GetNumber() != ext.Field {
+		return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name)
+	}
+	return field, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
new file mode 100644
index 0000000..91fd672
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -0,0 +1,185 @@
+package dynamic
+
+// Binary serialization and de-serialization for dynamic messages
+
+import (
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/jhump/protoreflect/codec"
+	"io"
+)
+
+// defaultDeterminism, if true, will mean that calls to Marshal will produce
+// deterministic output. This is used to make the output of proto.Marshal(...)
+// deterministic (since there is no way to have that convey determinism intent).
+// **This is only used from tests.**
+var defaultDeterminism = false
+
+// Marshal serializes this message to bytes, returning an error if the operation
+// fails. The resulting bytes are in the standard protocol buffer binary format.
+func (m *Message) Marshal() ([]byte, error) {
+	var b codec.Buffer
+	b.SetDeterministic(defaultDeterminism)
+	if err := m.marshal(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a
+// new byte slice to marshal into, it uses the provided byte slice. The backing array
+// for the returned byte slice *may* be the same as the one that was passed in, but
+// it's not guaranteed as a new backing array will automatically be allocated if
+// more bytes need to be written than the provided buffer has capacity for.
+func (m *Message) MarshalAppend(b []byte) ([]byte, error) {
+	codedBuf := codec.NewBuffer(b)
+	codedBuf.SetDeterministic(defaultDeterminism)
+	if err := m.marshal(codedBuf); err != nil {
+		return nil, err
+	}
+	return codedBuf.Bytes(), nil
+}
+
+// MarshalDeterministic serializes this message to bytes in a deterministic way,
+// returning an error if the operation fails. This differs from Marshal in that
+// map keys will be sorted before serializing to bytes. The protobuf spec does
+// not define ordering for map entries, so Marshal will use standard Go map
+// iteration order (which will be random). But for cases where determinism is
+// more important than performance, use this method instead.
+func (m *Message) MarshalDeterministic() ([]byte, error) {
+	var b codec.Buffer
+	b.SetDeterministic(true)
+	if err := m.marshal(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalAppendDeterministic behaves exactly the same as MarshalDeterministic,
+// except instead of allocating a new byte slice to marshal into, it uses the
+// provided byte slice. The backing array for the returned byte slice *may* be
+// the same as the one that was passed in, but it's not guaranteed as a new
+// backing array will automatically be allocated if more bytes need to be written
+// than the provided buffer has capacity for.
+func (m *Message) MarshalAppendDeterministic(b []byte) ([]byte, error) {
+	codedBuf := codec.NewBuffer(b)
+	codedBuf.SetDeterministic(true)
+	if err := m.marshal(codedBuf); err != nil {
+		return nil, err
+	}
+	return codedBuf.Bytes(), nil
+}
+
+func (m *Message) marshal(b *codec.Buffer) error {
+	if err := m.marshalKnownFields(b); err != nil {
+		return err
+	}
+	return m.marshalUnknownFields(b)
+}
+
+func (m *Message) marshalKnownFields(b *codec.Buffer) error {
+	for _, tag := range m.knownFieldTags() {
+		itag := int32(tag)
+		val := m.values[itag]
+		fd := m.FindFieldDescriptor(itag)
+		if fd == nil {
+			panic(fmt.Sprintf("Couldn't find field for tag %d", itag))
+		}
+		if err := b.EncodeFieldValue(fd, val); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *Message) marshalUnknownFields(b *codec.Buffer) error {
+	for _, tag := range m.unknownFieldTags() {
+		itag := int32(tag)
+		sl := m.unknownFields[itag]
+		for _, u := range sl {
+			if err := b.EncodeTagAndWireType(itag, u.Encoding); err != nil {
+				return err
+			}
+			switch u.Encoding {
+			case proto.WireBytes:
+				if err := b.EncodeRawBytes(u.Contents); err != nil {
+					return err
+				}
+			case proto.WireStartGroup:
+				_, _ = b.Write(u.Contents)
+				if err := b.EncodeTagAndWireType(itag, proto.WireEndGroup); err != nil {
+					return err
+				}
+			case proto.WireFixed32:
+				if err := b.EncodeFixed32(u.Value); err != nil {
+					return err
+				}
+			case proto.WireFixed64:
+				if err := b.EncodeFixed64(u.Value); err != nil {
+					return err
+				}
+			case proto.WireVarint:
+				if err := b.EncodeVarint(u.Value); err != nil {
+					return err
+				}
+			default:
+				return codec.ErrBadWireType
+			}
+		}
+	}
+	return nil
+}
+
+// Unmarshal de-serializes the message that is present in the given bytes into
+// this message. It first resets the current message. It returns an error if the
+// given bytes do not contain a valid encoding of this message type.
+func (m *Message) Unmarshal(b []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMerge(b); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMerge de-serializes the message that is present in the given bytes
+// into this message. Unlike Unmarshal, it does not first reset the message,
+// instead merging the data in the given bytes into the existing data in this
+// message.
+func (m *Message) UnmarshalMerge(b []byte) error {
+	return m.unmarshal(codec.NewBuffer(b), false)
+}
+
+func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+	for !buf.EOF() {
+		fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
+		if err != nil {
+			if err == codec.ErrWireTypeEndGroup {
+				if isGroup {
+					// finished parsing group
+					return nil
+				}
+				return codec.ErrBadWireType
+			}
+			return err
+		}
+
+		if fd == nil {
+			if m.unknownFields == nil {
+				m.unknownFields = map[int32][]UnknownField{}
+			}
+			uv := val.(codec.UnknownField)
+			u := UnknownField{
+				Encoding: uv.Encoding,
+				Value:    uv.Value,
+				Contents: uv.Contents,
+			}
+			m.unknownFields[uv.Tag] = append(m.unknownFields[uv.Tag], u)
+		} else if err := mergeField(m, fd, val); err != nil {
+			return err
+		}
+	}
+	if isGroup {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
new file mode 100644
index 0000000..c329fcd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -0,0 +1,163 @@
+// Package dynamic provides an implementation for a dynamic protobuf message.
+//
+// The dynamic message is essentially a message descriptor along with a map of
+// tag numbers to values. It has a broad API for interacting with the message,
+// including inspection and modification. Generally, most operations have two
+// forms: a regular method that panics on bad input or error and a "Try" form
+// of the method that will instead return an error.
+//
+// A dynamic message can optionally be constructed with a MessageFactory. The
+// MessageFactory has various registries that may be used by the dynamic message,
+// such as during de-serialization. The message factory is "inherited" by any
+// other dynamic messages created, such as nested messages that are created
+// during de-serialization. Similarly, any dynamic message created using
+// MessageFactory.NewMessage will be associated with that factory, which in turn
+// will be used to create other messages or parse extension fields during
+// de-serialization.
+//
+//
+// Field Types
+//
+// The types of values expected by setters and returned by getters are the
+// same as protoc generates for scalar fields. For repeated fields, there are
+// methods for getting and setting values at a particular index or for adding
+// an element. Similarly, for map fields, there are methods for getting and
+// setting values for a particular key.
+//
+// If you use GetField for a repeated field, it will return a copy of all
+// elements as a slice []interface{}. Similarly, using GetField for a map field
+// will return a copy of all mappings as a map[interface{}]interface{}. You can
+// also use SetField to supply an entire slice or map for repeated or map fields.
+// The slice need not be []interface{} but can actually be typed according to
+// the field's expected type. For example, a repeated uint64 field can be set
+// using a slice of type []uint64.
+//
+// Descriptors for map fields describe them as repeated fields with a nested
+// message type. The nested message type is a special generated type that
+// represents a single mapping: key and value pair. The dynamic message has some
+// special affordances for this representation. For example, you can use
+// SetField to set a map field using a slice of these entry messages. Internally,
+// the slice of entries will be converted to an actual map. Similarly, you can
+// use AddRepeatedField with an entry message to add (or overwrite) a mapping.
+// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps,
+// since those take numeric index arguments which are not relevant to maps
+// (since maps in Go have no defined ordering).
+//
+// When setting field values in dynamic messages, the type-checking is lenient
+// in that it accepts any named type with the right kind. So a string field can
+// be assigned to any type that is defined as a string. Enum fields require
+// int32 values (or any type that is defined as an int32).
+//
+// Unlike normal use of numeric values in Go, values will be automatically
+// widened when assigned. So, for example, an int64 field can be set using an
+// int32 value since it can be safely widened without truncation or loss of
+// precision. Similar goes for uint32 values being converted to uint64 and
+// float32 being converted to float64. Narrowing conversions are not done,
+// however. Also, unsigned values will never be automatically converted to
+// signed (and vice versa), and floating point values will never be
+// automatically converted to integral values (and vice versa). Since the bit
+// width of int and uint fields is allowed to be platform dependent, but will
+// always be less than or equal to 64, they can only be used as values for
+// int64 and uint64 fields, respectively. They cannot be used to set int32 or
+// uint32 fields, which includes enums fields.
+//
+// Fields whose type is a nested message can have values set to either other
+// dynamic messages or generated messages (e.g. pointers to structs generated by
+// protoc). Getting a value for such a field will return the actual type it is
+// set to (e.g. either a dynamic message or a generated message). If the value
+// is not set and the message uses proto2 syntax, the default message returned
+// will be whatever is returned by the dynamic message's MessageFactory (if the
+// dynamic message was not created with a factory, it will use the logic of the
+// zero value factory). In most typical cases, it will return a dynamic message,
+// but if the factory is configured with a KnownTypeRegistry, or if the field's
+// type is a well-known type, it will return a zero value generated message.
+//
+//
+// Unrecognized Fields
+//
+// Unrecognized fields are preserved by the dynamic message when unmarshaling
+// from the standard binary format. If the message's MessageFactory was
+// configured with an ExtensionRegistry, it will be used to identify and parse
+// extension fields for the message.
+//
+// Unrecognized fields can dynamically become recognized fields if the
+// application attempts to retrieve an unrecognized field's value using a
+// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the
+// unknown field and move the parsed value into the message's set of known
+// fields. This behavior is most suited to the use of extensions, where an
+// ExtensionRegistry is not setup with all known extensions ahead of time. But
+// it can even happen for non-extension fields! Here's an example scenario where
+// a non-extension field can initially be unknown and become known:
+//
+//   1. A dynamic message is created with a descriptor, A, and then
+//      de-serialized from a stream of bytes. The stream includes an
+//      unrecognized tag T. The message will include tag T in its unrecognized
+//      field set.
+//   2. Another call site retrieves a newer descriptor, A', which includes a
+//      newly added field with tag T.
+//   3. That other call site then uses a FieldDescriptor to access the value of
+//      the new field. This will cause the dynamic message to parse the bytes
+//      for the unknown tag T and store them as a known field.
+//   4. Subsequent operations for tag T, including setting the field using only
+//      tag number or de-serializing a stream that includes tag T, will operate
+//      as if that tag were part of the original descriptor, A.
+//
+//
+// Compatibility
+//
+// In addition to implementing the proto.Message interface, the included
+// Message type also provides an XXX_MessageName() method, so it can work with
+// proto.MessageName. And it provides a Descriptor() method that behaves just
+// like the method of the same signature in messages generated by protoc.
+// Because of this, it is actually compatible with proto.Message in many (though
+// not all) contexts. In particular, it is compatible with proto.Marshal and
+// proto.Unmarshal for serializing and de-serializing messages.
+//
+// The dynamic message supports binary and text marshaling, using protobuf's
+// well-defined binary format and the same text format that protoc-generated
+// types use. It also supports JSON serialization/de-serialization by
+// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic
+// messages can safely be used with the jsonpb package for JSON serialization
+// and de-serialization.
+//
+// In addition to implementing the proto.Message interface and numerous related
+// methods, it also provides inter-op with generated messages via conversion.
+// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message
+// contents from a dynamic message to a generated message and vice versa.
+//
+// When copying from a generated message into a dynamic message, if the
+// generated message contains fields unknown to the dynamic message (e.g. not
+// present in the descriptor used to create the dynamic message), these fields
+// become known to the dynamic message (as per behavior described above in
+// "Unrecognized Fields"). If the generated message has unrecognized fields of
+// its own, including unrecognized extensions, they are preserved in the dynamic
+// message. It is possible that the dynamic message knows about fields that the
+// generated message did not, like if it has a different version of the
+// descriptor or its MessageFactory has an ExtensionRegistry that knows about
+// different extensions than were linked into the program. In this case, these
+// unrecognized fields in the generated message will be known fields in the
+// dynamic message.
+//
+// Similarly, when copying from a dynamic message into a generated message, if
+// the dynamic message has unrecognized fields they can be preserved in the
+// generated message (currently only for syntax proto2 since proto3 generated
+// messages do not preserve unrecognized fields). If the generated message knows
+// about fields that the dynamic message does not, these unrecognized fields may
+// become known fields in the generated message.
+//
+//
+// Registries
+//
+// This package also contains a couple of registries, for managing known types
+// and descriptors.
+//
+// The KnownTypeRegistry allows de-serialization of a dynamic message to use
+// generated message types, instead of dynamic messages, for some kinds of
+// nested message fields. This is particularly useful for working with proto
+// messages that have special encodings as JSON (e.g. the well-known types),
+// since the dynamic message does not try to handle these special cases in its
+// JSON marshaling facilities.
+//
+// The ExtensionRegistry allows for recognizing and parsing extensions fields
+// (for proto2 messages).
+package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
new file mode 100644
index 0000000..de13b92
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -0,0 +1,2734 @@
+package dynamic
+
+import (
+	"bytes"
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/codec"
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/internal"
+)
+
+// ErrUnknownTagNumber is an error that is returned when an operation refers
+// to an unknown tag number.
+var ErrUnknownTagNumber = errors.New("unknown tag number")
+
+// UnknownTagNumberError is the same as ErrUnknownTagNumber.
+// Deprecated: use ErrUnknownTagNumber
+var UnknownTagNumberError = ErrUnknownTagNumber
+
+// ErrUnknownFieldName is an error that is returned when an operation refers
+// to an unknown field name.
+var ErrUnknownFieldName = errors.New("unknown field name")
+
+// UnknownFieldNameError is the same as ErrUnknownFieldName.
+// Deprecated: use ErrUnknownFieldName
+var UnknownFieldNameError = ErrUnknownFieldName
+
+// ErrFieldIsNotMap is an error that is returned when map-related operations
+// are attempted with fields that are not maps.
+var ErrFieldIsNotMap = errors.New("field is not a map type")
+
+// FieldIsNotMapError is the same as ErrFieldIsNotMap.
+// Deprecated: use ErrFieldIsNotMap
+var FieldIsNotMapError = ErrFieldIsNotMap
+
+// ErrFieldIsNotRepeated is an error that is returned when repeated field
+// operations are attempted with fields that are not repeated.
+var ErrFieldIsNotRepeated = errors.New("field is not repeated")
+
+// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated.
+// Deprecated: use ErrFieldIsNotRepeated
+var FieldIsNotRepeatedError = ErrFieldIsNotRepeated
+
+// ErrIndexOutOfRange is an error that is returned when an invalid index is
+// provided when access a single element of a repeated field.
+var ErrIndexOutOfRange = errors.New("index is out of range")
+
+// IndexOutOfRangeError is the same as ErrIndexOutOfRange.
+// Deprecated: use ErrIndexOutOfRange
+var IndexOutOfRangeError = ErrIndexOutOfRange
+
+// ErrNumericOverflow is an error returned by operations that encounter a
+// numeric value that is too large, for example de-serializing a value into an
+// int32 field when the value is larger that can fit into a 32-bit value.
+var ErrNumericOverflow = errors.New("numeric value is out of range")
+
+// NumericOverflowError is the same as ErrNumericOverflow.
+// Deprecated: use ErrNumericOverflow
+var NumericOverflowError = ErrNumericOverflow
+
+var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil))
+var typeOfBytes = reflect.TypeOf(([]byte)(nil))
+
+// Message is a dynamic protobuf message. Instead of a generated struct,
+// like most protobuf messages, this is a map of field number to values and
+// a message descriptor, which is used to validate the field values and
+// also to de-serialize messages (from the standard binary format, as well
+// as from the text format and from JSON).
+type Message struct {
+	md            *desc.MessageDescriptor
+	er            *ExtensionRegistry
+	mf            *MessageFactory
+	extraFields   map[int32]*desc.FieldDescriptor
+	values        map[int32]interface{}
+	unknownFields map[int32][]UnknownField
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+	// Encoding indicates how the unknown field was encoded on the wire. If it
+	// is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+	// the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+	// significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+	// Value.
+	Encoding int8
+	Contents []byte
+	Value    uint64
+}
+
+// NewMessage creates a new dynamic message for the type represented by the given
+// message descriptor. During de-serialization, a default MessageFactory is used to
+// instantiate any nested message fields and no extension fields will be parsed. To
+// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage.
+func NewMessage(md *desc.MessageDescriptor) *Message {
+	return NewMessageWithMessageFactory(md, nil)
+}
+
+// NewMessageWithExtensionRegistry creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// ExtensionRegistry is used to parse extension fields and nested messages will be
+// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er).
+func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message {
+	mf := NewMessageFactoryWithExtensionRegistry(er)
+	return NewMessageWithMessageFactory(md, mf)
+}
+
+// NewMessageWithMessageFactory creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// MessageFactory is used to instantiate nested messages.
+func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message {
+	var er *ExtensionRegistry
+	if mf != nil {
+		er = mf.er
+	}
+	return &Message{
+		md: md,
+		mf: mf,
+		er: er,
+	}
+}
+
+// AsDynamicMessage converts the given message to a dynamic message. If the
+// given message is dynamic, it is returned. Otherwise, a dynamic message is
+// created using NewMessage.
+func AsDynamicMessage(msg proto.Message) (*Message, error) {
+	return AsDynamicMessageWithMessageFactory(msg, nil)
+}
+
+// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithExtensionRegistry.
+func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) {
+	mf := NewMessageFactoryWithExtensionRegistry(er)
+	return AsDynamicMessageWithMessageFactory(msg, mf)
+}
+
+// AsDynamicMessageWithMessageFactory converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithMessageFactory.
+func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) {
+	if dm, ok := msg.(*Message); ok {
+		return dm, nil
+	}
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return nil, err
+	}
+	dm := NewMessageWithMessageFactory(md, mf)
+	err = dm.mergeFrom(msg)
+	if err != nil {
+		return nil, err
+	}
+	return dm, nil
+}
+
+// GetMessageDescriptor returns a descriptor for this message's type.
+func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor {
+	return m.md
+}
+
+// GetKnownFields returns a slice of descriptors for all known fields. The
+// fields will not be in any defined order.
+func (m *Message) GetKnownFields() []*desc.FieldDescriptor {
+	if len(m.extraFields) == 0 {
+		return m.md.GetFields()
+	}
+	flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields))
+	copy(flds, m.md.GetFields())
+	for _, fld := range m.extraFields {
+		if !fld.IsExtension() {
+			flds = append(flds, fld)
+		}
+	}
+	return flds
+}
+
+// GetKnownExtensions returns a slice of descriptors for all extensions known by
+// the message's extension registry. The fields will not be in any defined order.
+func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor {
+	if !m.md.IsExtendable() {
+		return nil
+	}
+	exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName())
+	for _, fld := range m.extraFields {
+		if fld.IsExtension() {
+			exts = append(exts, fld)
+		}
+	}
+	return exts
+}
+
+// GetUnknownFields returns a slice of tag numbers for all unknown fields that
+// this message contains. The tags will not be in any defined order.
+func (m *Message) GetUnknownFields() []int32 {
+	flds := make([]int32, 0, len(m.unknownFields))
+	for tag := range m.unknownFields {
+		flds = append(flds, tag)
+	}
+	return flds
+}
+
+// Descriptor returns the serialized form of the file descriptor in which the
+// message was defined and a path to the message type therein. This mimics the
+// method of the same name on message types generated by protoc.
+func (m *Message) Descriptor() ([]byte, []int) {
+	// get encoded file descriptor
+	b, err := proto.Marshal(m.md.GetFile().AsProto())
+	if err != nil {
+		panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+	var zippedBytes bytes.Buffer
+	w := gzip.NewWriter(&zippedBytes)
+	if _, err := w.Write(b); err != nil {
+		panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+	if err := w.Close(); err != nil {
+		panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+
+	// and path to message
+	path := []int{}
+	var d desc.Descriptor
+	name := m.md.GetFullyQualifiedName()
+	for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() {
+		found := false
+		switch d := d.(type) {
+		case (*desc.FileDescriptor):
+			for i, md := range d.GetMessageTypes() {
+				if md.GetFullyQualifiedName() == name {
+					found = true
+					path = append(path, i)
+				}
+			}
+		case (*desc.MessageDescriptor):
+			for i, md := range d.GetNestedMessageTypes() {
+				if md.GetFullyQualifiedName() == name {
+					found = true
+					path = append(path, i)
+				}
+			}
+		}
+		if !found {
+			panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName()))
+		}
+	}
+	// reverse the path
+	i := 0
+	j := len(path) - 1
+	for i < j {
+		path[i], path[j] = path[j], path[i]
+		i++
+		j--
+	}
+
+	return zippedBytes.Bytes(), path
+}
+
+// XXX_MessageName returns the fully qualified name of this message's type. This
+// allows dynamic messages to be used with proto.MessageName.
+func (m *Message) XXX_MessageName() string {
+	return m.md.GetFullyQualifiedName()
+}
+
+// FindFieldDescriptor returns a field descriptor for the given tag number. This
+// searches known fields in the descriptor, known fields discovered during calls
+// to GetField or SetField, and extension fields known by the message's extension
+// registry. It returns nil if the tag is unknown.
+func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor {
+	fd := m.md.FindFieldByNumber(tagNumber)
+	if fd != nil {
+		return fd
+	}
+	fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber)
+	if fd != nil {
+		return fd
+	}
+	return m.extraFields[tagNumber]
+}
+
+// FindFieldDescriptorByName returns a field descriptor for the given field
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. It returns nil if the name is unknown. If the
+// given name refers to an extension, it should be fully qualified and may be
+// optionally enclosed in parentheses or brackets.
+func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor {
+	if name == "" {
+		return nil
+	}
+	fd := m.md.FindFieldByName(name)
+	if fd != nil {
+		return fd
+	}
+	mustBeExt := false
+	if name[0] == '(' {
+		if name[len(name)-1] != ')' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	} else if name[0] == '[' {
+		if name[len(name)-1] != ']' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	}
+	fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name)
+	if fd != nil {
+		return fd
+	}
+	for _, fd := range m.extraFields {
+		if fd.IsExtension() && name == fd.GetFullyQualifiedName() {
+			return fd
+		} else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() {
+			return fd
+		}
+	}
+
+	return nil
+}
+
+// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. If no field matches the given JSON name, it
+// will fall back to searching field names (e.g. FindFieldDescriptorByName). If
+// this also yields no match, nil is returned.
+func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor {
+	if name == "" {
+		return nil
+	}
+	fd := m.md.FindFieldByJSONName(name)
+	if fd != nil {
+		return fd
+	}
+	mustBeExt := false
+	if name[0] == '(' {
+		if name[len(name)-1] != ')' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	} else if name[0] == '[' {
+		if name[len(name)-1] != ']' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	}
+	fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name)
+	if fd != nil {
+		return fd
+	}
+	for _, fd := range m.extraFields {
+		if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() {
+			return fd
+		} else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() {
+			return fd
+		}
+	}
+
+	// try non-JSON names
+	return m.FindFieldDescriptorByName(name)
+}
+
+func (m *Message) checkField(fd *desc.FieldDescriptor) error {
+	return checkField(fd, m.md)
+}
+
+func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error {
+	if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() {
+		return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName())
+	}
+	if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) {
+		return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges())
+	}
+	return nil
+}
+
+// GetField returns the value for the given field descriptor. It panics if an
+// error is encountered. See TryGetField.
+func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} {
+	if v, err := m.TryGetField(fd); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetField returns the value for the given field descriptor. An error is
+// returned if the given field descriptor does not belong to the right message
+// type.
+//
+// The Go type of the returned value, for scalar fields, is the same as protoc
+// would generate for the field (in a non-dynamic message). The table below
+// lists the scalar types and the corresponding Go types.
+//  +-------------------------+-----------+
+//  |       Declared Type     |  Go Type  |
+//  +-------------------------+-----------+
+//  | int32, sint32, sfixed32 | int32     |
+//  | int64, sint64, sfixed64 | int64     |
+//  | uint32, fixed32         | uint32    |
+//  | uint64, fixed64         | uint64    |
+//  | float                   | float32   |
+//  | double                  | double32  |
+//  | bool                    | bool      |
+//  | string                  | string    |
+//  | bytes                   | []byte    |
+//  +-------------------------+-----------+
+//
+// Values for enum fields will always be int32 values. You can use the enum
+// descriptor associated with the field to lookup value names with those values.
+// Values for message type fields may be an instance of the generated type *or*
+// may be another *dynamic.Message that represents the type.
+//
+// If the given field is a map field, the returned type will be
+// map[interface{}]interface{}. The actual concrete types of keys and values is
+// as described above. If the given field is a (non-map) repeated field, the
+// returned type is always []interface{}; the type of the actual elements is as
+// described above.
+//
+// If this message has no value for the given field, its default value is
+// returned. If the message is defined in a file with "proto3" syntax, the
+// default is always the zero value for the field. The default value for map and
+// repeated fields is a nil map or slice (respectively). For field's whose types
+// is a message, the default value is an empty message for "proto2" syntax or a
+// nil message for "proto3" syntax. Note that the in the latter case, a non-nil
+// interface with a nil pointer is returned, not a nil interface. Also note that
+// whether the returned value is an empty message or nil depends on if *this*
+// message was defined as "proto3" syntax, not the message type referred to by
+// the field's type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be returned, or an error will
+// be returned if the unknown value cannot be parsed according to the field
+// descriptor's type information.
+func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) {
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getField(fd)
+}
+
+// GetFieldByName returns the value for the field with the given name. It panics
+// if an error is encountered. See TryGetFieldByName.
+func (m *Message) GetFieldByName(name string) interface{} {
+	if v, err := m.TryGetFieldByName(name); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetFieldByName returns the value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByName(name string) (interface{}, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getField(fd)
+}
+
+// GetFieldByNumber returns the value for the field with the given tag number.
+// It panics if an error is encountered. See TryGetFieldByNumber.
+func (m *Message) GetFieldByNumber(tagNumber int) interface{} {
+	if v, err := m.TryGetFieldByNumber(tagNumber); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetFieldByNumber returns the value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getField(fd)
+}
+
+func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) {
+	return m.doGetField(fd, false)
+}
+
+func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) {
+	res := m.values[fd.GetNumber()]
+	if res == nil {
+		var err error
+		if res, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		}
+		if res == nil {
+			if nilIfAbsent {
+				return nil, nil
+			} else {
+				def := fd.GetDefaultValue()
+				if def != nil {
+					return def, nil
+				}
+				// GetDefaultValue only returns nil for message types
+				md := fd.GetMessageType()
+				if m.md.IsProto3() {
+					return nilMessage(md), nil
+				} else {
+					// for proto2, return default instance of message
+					return m.mf.NewMessage(md), nil
+				}
+			}
+		}
+	}
+	rt := reflect.TypeOf(res)
+	if rt.Kind() == reflect.Map {
+		// make defensive copies to prevent caller from storing illegal keys and values
+		m := res.(map[interface{}]interface{})
+		res := map[interface{}]interface{}{}
+		for k, v := range m {
+			res[k] = v
+		}
+		return res, nil
+	} else if rt.Kind() == reflect.Slice && rt != typeOfBytes {
+		// make defensive copies to prevent caller from storing illegal elements
+		sl := res.([]interface{})
+		res := make([]interface{}, len(sl))
+		copy(res, sl)
+		return res, nil
+	}
+	return res, nil
+}
+
+func nilMessage(md *desc.MessageDescriptor) interface{} {
+	// try to return a proper nil pointer
+	msgType := proto.MessageType(md.GetFullyQualifiedName())
+	if msgType != nil && msgType.Implements(typeOfProtoMessage) {
+		return reflect.Zero(msgType).Interface().(proto.Message)
+	}
+	// fallback to nil dynamic message pointer
+	return (*Message)(nil)
+}
+
+// HasField returns true if this message has a value for the given field. If the
+// given field is not valid (e.g. belongs to a different message type), false is
+// returned. If this message is defined in a file with "proto3" syntax, this
+// will return false even if a field was explicitly assigned its zero value (the
+// zero values for a field are intentionally indistinguishable from absent).
+func (m *Message) HasField(fd *desc.FieldDescriptor) bool {
+	if err := m.checkField(fd); err != nil {
+		return false
+	}
+	return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldName returns true if this message has a value for a field with the
+// given name. If the given name is unknown, this returns false.
+func (m *Message) HasFieldName(name string) bool {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return false
+	}
+	return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldNumber returns true if this message has a value for a field with the
+// given tag number. If the given tag is unknown, this returns false.
+func (m *Message) HasFieldNumber(tagNumber int) bool {
+	if _, ok := m.values[int32(tagNumber)]; ok {
+		return true
+	}
+	_, ok := m.unknownFields[int32(tagNumber)]
+	return ok
+}
+
+// SetField sets the value for the given field descriptor to the given value. It
+// panics if an error is encountered. See TrySetField.
+func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) {
+	if err := m.TrySetField(fd, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetField sets the value for the given field descriptor to the given value.
+// An error is returned if the given field descriptor does not belong to the
+// right message type or if the given value is not a correct/compatible type for
+// the given field.
+//
+// The Go type expected for a field  is the same as TryGetField would return for
+// the field. So message values can be supplied as either the correct generated
+// message type or as a *dynamic.Message.
+//
+// Since it is cumbersome to work with dynamic messages, some concessions are
+// made to simplify usage regarding types:
+//
+//  1. If a numeric type is provided that can be converted *without loss or
+//     overflow*, it is accepted. This allows for setting int64 fields using int
+//     or int32 values. Similarly for uint64 with uint and uint32 values and for
+//     float64 fields with float32 values.
+//  2. The value can be a named type, as long as its underlying type is correct.
+//  3. Map and repeated fields can be set using any kind of concrete map or
+//     slice type, as long as the values within are all of the correct type. So
+//     a field defined as a 'map<string, int32>` can be set using a
+//     map[string]int32, a map[string]interface{}, or even a
+//     map[interface{}]interface{}.
+//  4. Finally, dynamic code that chooses to not treat maps as a special-case
+//     find that they can set map fields using a slice where each element is a
+//     message that matches the implicit map-entry field message type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.setField(fd, val)
+}
+
+// SetFieldByName sets the value for the field with the given name to the given
+// value. It panics if an error is encountered. See TrySetFieldByName.
+func (m *Message) SetFieldByName(name string, val interface{}) {
+	if err := m.TrySetFieldByName(name, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetFieldByName sets the value for the field with the given name to the
+// given value. An error is returned if the given name is unknown or if the
+// given value has an incorrect type. If the given name refers to an extension
+// field, it should be fully qualified and optionally enclosed in parenthesis or
+// brackets.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByName(name string, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.setField(fd, val)
+}
+
+// SetFieldByNumber sets the value for the field with the given tag number to
+// the given value. It panics if an error is encountered. See
+// TrySetFieldByNumber.
+func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) {
+	if err := m.TrySetFieldByNumber(tagNumber, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetFieldByNumber sets the value for the field with the given tag number to
+// the given value. An error is returned if the given tag is unknown or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.setField(fd, val)
+}
+
+func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error {
+	var err error
+	if val, err = validFieldValue(fd, val); err != nil {
+		return err
+	}
+	m.internalSetField(fd, val)
+	return nil
+}
+
+func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) {
+	if fd.IsRepeated() {
+		// Unset fields and zero-length fields are indistinguishable, in both
+		// proto2 and proto3 syntax
+		if reflect.ValueOf(val).Len() == 0 {
+			if m.values != nil {
+				delete(m.values, fd.GetNumber())
+			}
+			return
+		}
+	} else if m.md.IsProto3() && fd.GetOneOf() == nil {
+		// proto3 considers fields that are set to their zero value as unset
+		// (we already handled repeated fields above)
+		var equal bool
+		if b, ok := val.([]byte); ok {
+			// can't compare slices, so we have to special-case []byte values
+			equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte))
+		} else {
+			defVal := fd.GetDefaultValue()
+			equal = defVal == val
+			if !equal && defVal == nil {
+				// above just checks if value is the nil interface,
+				// but we should also test if the given value is a
+				// nil pointer
+				rv := reflect.ValueOf(val)
+				if rv.Kind() == reflect.Ptr && rv.IsNil() {
+					equal = true
+				}
+			}
+		}
+		if equal {
+			if m.values != nil {
+				delete(m.values, fd.GetNumber())
+			}
+			return
+		}
+	}
+	if m.values == nil {
+		m.values = map[int32]interface{}{}
+	}
+	m.values[fd.GetNumber()] = val
+	// if this field is part of a one-of, make sure all other one-of choices are cleared
+	od := fd.GetOneOf()
+	if od != nil {
+		for _, other := range od.GetChoices() {
+			if other.GetNumber() != fd.GetNumber() {
+				delete(m.values, other.GetNumber())
+			}
+		}
+	}
+	// also clear any unknown fields
+	if m.unknownFields != nil {
+		delete(m.unknownFields, fd.GetNumber())
+	}
+	// and add this field if it was previously unknown
+	if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+		m.addField(fd)
+	}
+}
+
+func (m *Message) addField(fd *desc.FieldDescriptor) {
+	if m.extraFields == nil {
+		m.extraFields = map[int32]*desc.FieldDescriptor{}
+	}
+	m.extraFields[fd.GetNumber()] = fd
+}
+
+// ClearField removes any value for the given field. It panics if an error is
+// encountered. See TryClearField.
+func (m *Message) ClearField(fd *desc.FieldDescriptor) {
+	if err := m.TryClearField(fd); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearField removes any value for the given field. An error is returned if
+// the given field descriptor does not belong to the right message type.
+func (m *Message) TryClearField(fd *desc.FieldDescriptor) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	m.clearField(fd)
+	return nil
+}
+
+// ClearFieldByName removes any value for the field with the given name. It
+// panics if an error is encountered. See TryClearFieldByName.
+func (m *Message) ClearFieldByName(name string) {
+	if err := m.TryClearFieldByName(name); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearFieldByName removes any value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+func (m *Message) TryClearFieldByName(name string) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	m.clearField(fd)
+	return nil
+}
+
+// ClearFieldByNumber removes any value for the field with the given tag number.
+// It panics if an error is encountered. See TryClearFieldByNumber.
+func (m *Message) ClearFieldByNumber(tagNumber int) {
+	if err := m.TryClearFieldByNumber(tagNumber); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearFieldByNumber removes any value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+func (m *Message) TryClearFieldByNumber(tagNumber int) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	m.clearField(fd)
+	return nil
+}
+
+func (m *Message) clearField(fd *desc.FieldDescriptor) {
+	// clear value
+	if m.values != nil {
+		delete(m.values, fd.GetNumber())
+	}
+	// also clear any unknown fields
+	if m.unknownFields != nil {
+		delete(m.unknownFields, fd.GetNumber())
+	}
+	// and add this field if it was previously unknown
+	if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+		m.addField(fd)
+	}
+}
+
+// GetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. It panics if an error is encountered. See
+// TryGetOneOfField.
+func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) {
+	if fd, val, err := m.TryGetOneOfField(od); err != nil {
+		panic(err.Error())
+	} else {
+		return fd, val
+	}
+}
+
+// TryGetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. An error is returned if the given one-of belongs to the
+// wrong message type. If the given one-of has no field set, this method will
+// return nil, nil.
+//
+// The type of the value, if one is set, is the same as would be returned by
+// TryGetField using the returned field descriptor.
+//
+// Like with TryGetField, if the given one-of contains any fields that are not
+// known (e.g. not present in this message's descriptor), they will become known
+// and any unknown value will be parsed (and become a known value on success).
+func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) {
+	if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+		return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+	}
+	for _, fd := range od.GetChoices() {
+		val, err := m.doGetField(fd, true)
+		if err != nil {
+			return nil, nil, err
+		}
+		if val != nil {
+			return fd, val, nil
+		}
+	}
+	return nil, nil, nil
+}
+
+// ClearOneOfField removes any value for any of the given one-of's fields. It
+// panics if an error is encountered. See TryClearOneOfField.
+func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) {
+	if err := m.TryClearOneOfField(od); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearOneOfField removes any value for any of the given one-of's fields. An
+// error is returned if the given one-of descriptor does not belong to the right
+// message type.
+func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error {
+	if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+		return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+	}
+	for _, fd := range od.GetChoices() {
+		m.clearField(fd)
+	}
+	return nil
+}
+
+// GetMapField returns the value for the given map field descriptor and given
+// key. It panics if an error is encountered. See TryGetMapField.
+func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} {
+	if v, err := m.TryGetMapField(fd, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapField returns the value for the given map field descriptor and given
+// key. An error is returned if the given field descriptor does not belong to
+// the right message type or if it is not a map field.
+//
+// If the map field does not contain the requested key, this method returns
+// nil, nil. The Go type of the value returned mirrors the type that protoc
+// would generate for the field. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getMapField(fd, key)
+}
+
+// GetMapFieldByName returns the value for the map field with the given name and
+// given key. It panics if an error is encountered. See TryGetMapFieldByName.
+func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} {
+	if v, err := m.TryGetMapFieldByName(name, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapFieldByName returns the value for the map field with the given name
+// and given key. An error is returned if the given name is unknown or if it
+// names a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getMapField(fd, key)
+}
+
+// GetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. It panics if an error is encountered. See
+// TryGetMapFieldByNumber.
+func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} {
+	if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. An error is returned if the given tag is unknown or if
+// it indicates a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getMapField(fd, key)
+}
+
+func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+	if !fd.IsMap() {
+		return nil, FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key, false)
+	if err != nil {
+		return nil, err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		} else if mp == nil {
+			return nil, nil
+		}
+	}
+	return mp.(map[interface{}]interface{})[ki], nil
+}
+
+// ForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. It stops iteration if the function
+// returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntry.
+func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntry(fd, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. An error is returned if the given field
+// descriptor does not belong to the right message type or if it is not a  map
+// field.
+//
+// Iteration ends either when all entries have been examined or when the given
+// function returns false. So the function is expected to return true for normal
+// iteration and false to break out. If this message has no value for the given
+// field, it returns without invoking the given function.
+//
+// The Go type of the key and value supplied to the function mirrors the type
+// that protoc would generate for the field. (See TryGetField for more details
+// on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByName executes the given function for each entry in the
+// map value for the field with the given name. It stops iteration if the
+// function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByName.
+func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntryByName executes the given function for each entry in
+// the map value for the field with the given name. It stops iteration if the
+// function returns false. An error is returned if the given name is unknown or
+// if it names a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByNumber.
+func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. An error is returned if the given tag is unknown
+// or if it indicates a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err := m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			return nil
+		}
+	}
+	for k, v := range mp.(map[interface{}]interface{}) {
+		if !fn(k, v) {
+			break
+		}
+	}
+	return nil
+}
+
+// PutMapField sets the value for the given map field descriptor and given key
+// to the given value. It panics if an error is encountered. See TryPutMapField.
+func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) {
+	if err := m.TryPutMapField(fd, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapField sets the value for the given map field descriptor and given
+// key to the given value. An error is returned if the given field descriptor
+// does not belong to the right message type, if the given field is not a map
+// field, or if the given value is not a correct/compatible type for the given
+// field.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a field with the same type as the map's value type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByName sets the value for the map field with the given name and
+// given key to the given value. It panics if an error is encountered. See
+// TryPutMapFieldByName.
+func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) {
+	if err := m.TryPutMapFieldByName(name, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapFieldByName sets the value for the map field with the given name and
+// the given key to the given value. An error is returned if the given name is
+// unknown, if it names a field that is not a map, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByNumber sets the value for the map field with the given tag
+// number and given key to the given value. It panics if an error is
+// encountered. See TryPutMapFieldByNumber.
+func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) {
+	if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapFieldByNumber sets the value for the map field with the given tag
+// number and the given key to the given value. An error is returned if the
+// given tag is unknown, if it indicates a field that is not a map, or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.putMapField(fd, key, val)
+}
+
+func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key, false)
+	if err != nil {
+		return err
+	}
+	vfd := fd.GetMessageType().GetFields()[1]
+	vi, err := validElementFieldValue(vfd, val, true)
+	if err != nil {
+		return err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			m.internalSetField(fd, map[interface{}]interface{}{ki: vi})
+			return nil
+		}
+	}
+	mp.(map[interface{}]interface{})[ki] = vi
+	return nil
+}
+
+// RemoveMapField changes the value for the given field descriptor by removing
+// any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapField.
+func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) {
+	if err := m.TryRemoveMapField(fd, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapField changes the value for the given field descriptor by
+// removing any value associated with the given key. An error is returned if the
+// given field descriptor does not belong to the right message type or if the
+// given field is not a map field.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and any value for the given key
+// removed.
+func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByName changes the value for the field with the given name by
+// removing any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapFieldByName.
+func (m *Message) RemoveMapFieldByName(name string, key interface{}) {
+	if err := m.TryRemoveMapFieldByName(name, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapFieldByName changes the value for the field with the given name
+// by removing any value associated with the given key. An error is returned if
+// the given name is unknown or if it names a field that is not a map.
+func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. It panics if an
+// error is encountered. See TryRemoveMapFieldByNumber.
+func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) {
+	if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. An error is
+// returned if the given tag is unknown or if it indicates a field that is not
+// a map.
+func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.removeMapField(fd, key)
+}
+
+func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key, false)
+	if err != nil {
+		return err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			return nil
+		}
+	}
+	res := mp.(map[interface{}]interface{})
+	delete(res, ki)
+	if len(res) == 0 {
+		delete(m.values, fd.GetNumber())
+	}
+	return nil
+}
+
+// FieldLength returns the number of elements in this message for the given
+// field descriptor. It panics if an error is encountered. See TryFieldLength.
+func (m *Message) FieldLength(fd *desc.FieldDescriptor) int {
+	l, err := m.TryFieldLength(fd)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLength returns the number of elements in this message for the given
+// field descriptor. An error is returned if the given field descriptor does not
+// belong to the right message type or if it is neither a map field nor a
+// repeated field.
+func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) {
+	if err := m.checkField(fd); err != nil {
+		return 0, err
+	}
+	return m.fieldLength(fd)
+}
+
+// FieldLengthByName returns the number of elements in this message for the
+// field with the given name. It panics if an error is encountered. See
+// TryFieldLengthByName.
+func (m *Message) FieldLengthByName(name string) int {
+	l, err := m.TryFieldLengthByName(name)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLengthByName returns the number of elements in this message for the
+// field with the given name. An error is returned if the given name is unknown
+// or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByName(name string) (int, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return 0, UnknownFieldNameError
+	}
+	return m.fieldLength(fd)
+}
+
+// FieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. It panics if an error is encountered. See
+// TryFieldLengthByNumber.
+func (m *Message) FieldLengthByNumber(tagNumber int32) int {
+	l, err := m.TryFieldLengthByNumber(tagNumber)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. An error is returned if the given tag is
+// unknown or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return 0, UnknownTagNumberError
+	}
+	return m.fieldLength(fd)
+}
+
+func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) {
+	if !fd.IsRepeated() {
+		return 0, FieldIsNotRepeatedError
+	}
+	val := m.values[fd.GetNumber()]
+	if val == nil {
+		var err error
+		if val, err = m.parseUnknownField(fd); err != nil {
+			return 0, err
+		} else if val == nil {
+			return 0, nil
+		}
+	}
+	if sl, ok := val.([]interface{}); ok {
+		return len(sl), nil
+	} else if mp, ok := val.(map[interface{}]interface{}); ok {
+		return len(mp), nil
+	}
+	return 0, nil
+}
+
+// GetRepeatedField returns the value for the given repeated field descriptor at
+// the given index. It panics if an error is encountered. See
+// TryGetRepeatedField.
+func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} {
+	if v, err := m.TryGetRepeatedField(fd, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedField returns the value for the given repeated field descriptor
+// at the given index. An error is returned if the given field descriptor does
+// not belong to the right message type, if it is not a repeated field, or if
+// the given index is out of range (less than zero or greater than or equal to
+// the length of the repeated field). Also, even though map fields technically
+// are repeated fields, if the given field is a map field an error will result:
+// map representation does not lend itself to random access by index.
+//
+// The Go type of the value returned mirrors the type that protoc would generate
+// for the field's element type. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The value at the given index in the parsed value
+// will be returned. An error will be returned if the unknown value cannot be
+// parsed according to the field descriptor's type information.
+func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. It panics if an error is encountered. See
+// TryGetRepeatedFieldByName.
+func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} {
+	if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. An error is returned if the given name is
+// unknown, if it names a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater
+// than or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. It panics if an error is encountered.
+// See TryGetRepeatedFieldByNumber.
+func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} {
+	if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. An error is returned if the given tag is
+// unknown, if it indicates a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater than
+// or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+	if fd.IsMap() || !fd.IsRepeated() {
+		return nil, FieldIsNotRepeatedError
+	}
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		var err error
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		} else if sl == nil {
+			return nil, IndexOutOfRangeError
+		}
+	}
+	res := sl.([]interface{})
+	if index >= len(res) {
+		return nil, IndexOutOfRangeError
+	}
+	return res[index], nil
+}
+
+// AddRepeatedField appends the given value to the given repeated field. It
+// panics if an error is encountered. See TryAddRepeatedField.
+func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) {
+	if err := m.TryAddRepeatedField(fd, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedField appends the given value to the given repeated field. An
+// error is returned if the given field descriptor does not belong to the right
+// message type, if the given field is not repeated, or if the given value is
+// not a correct/compatible type for the given field. If the given field is a
+// map field, the call will succeed if the given value is an instance of the
+// map's entry message type.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the given value is appended to
+// it.
+func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByName appends the given value to the repeated field with the
+// given name. It panics if an error is encountered. See
+// TryAddRepeatedFieldByName.
+func (m *Message) AddRepeatedFieldByName(name string, val interface{}) {
+	if err := m.TryAddRepeatedFieldByName(name, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedFieldByName appends the given value to the repeated field with
+// the given name. An error is returned if the given name is unknown, if it
+// names a field that is not repeated, or if the given value has an incorrect
+// type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByNumber appends the given value to the repeated field with
+// the given tag number. It panics if an error is encountered. See
+// TryAddRepeatedFieldByNumber.
+func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) {
+	if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedFieldByNumber appends the given value to the repeated field
+// with the given tag number. An error is returned if the given tag is unknown,
+// if it indicates a field that is not repeated, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+	if !fd.IsRepeated() {
+		return FieldIsNotRepeatedError
+	}
+	val, err := validElementFieldValue(fd, val, false)
+	if err != nil {
+		return err
+	}
+
+	if fd.IsMap() {
+		// We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow
+		// adding entries one at a time (as if the field were a normal repeated field).
+		msg := val.(proto.Message)
+		dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf)
+		if err != nil {
+			return err
+		}
+		k, err := dm.TryGetFieldByNumber(1)
+		if err != nil {
+			return err
+		}
+		v, err := dm.TryGetFieldByNumber(2)
+		if err != nil {
+			return err
+		}
+		return m.putMapField(fd, k, v)
+	}
+
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if sl == nil {
+			sl = []interface{}{}
+		}
+	}
+	res := sl.([]interface{})
+	res = append(res, val)
+	m.internalSetField(fd, res)
+	return nil
+}
+
+// SetRepeatedField sets the value for the given repeated field descriptor and
+// given index to the given value. It panics if an error is encountered. See
+// SetRepeatedField.
+func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) {
+	if err := m.TrySetRepeatedField(fd, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedField sets the value for the given repeated field descriptor
+// and given index to the given value. An error is returned if the given field
+// descriptor does not belong to the right message type, if the given field is
+// not repeated, or if the given value is not a correct/compatible type for the
+// given field. Also, even though map fields technically are repeated fields, if
+// the given field is a map field an error will result: map representation does
+// not lend itself to random access by index.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the element at the given index
+// is replaced with the given value.
+func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByName sets the value for the repeated field with the given
+// name and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByName.
+func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) {
+	if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedFieldByName sets the value for the repeated field with the
+// given name and the given index to the given value. An error is returned if
+// the given name is unknown, if it names a field that is not repeated (or is a
+// map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByNumber sets the value for the repeated field with the given
+// tag number and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByNumber.
+func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) {
+	if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedFieldByNumber sets the value for the repeated field with the
+// given tag number and the given index to the given value. An error is returned
+// if the given tag is unknown, if it indicates a field that is not repeated (or
+// is a map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+	if fd.IsMap() || !fd.IsRepeated() {
+		return FieldIsNotRepeatedError
+	}
+	val, err := validElementFieldValue(fd, val, false)
+	if err != nil {
+		return err
+	}
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if sl == nil {
+			return IndexOutOfRangeError
+		}
+	}
+	res := sl.([]interface{})
+	if index >= len(res) {
+		return IndexOutOfRangeError
+	}
+	res[index] = val
+	return nil
+}
+
+// GetUnknownField gets the value(s) for the given unknown tag number. If this
+// message has no unknown fields with the given tag, nil is returned.
+func (m *Message) GetUnknownField(tagNumber int32) []UnknownField {
+	if u, ok := m.unknownFields[tagNumber]; ok {
+		return u
+	} else {
+		return nil
+	}
+}
+
+func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) {
+	unks, ok := m.unknownFields[fd.GetNumber()]
+	if !ok {
+		return nil, nil
+	}
+	var v interface{}
+	var sl []interface{}
+	var mp map[interface{}]interface{}
+	if fd.IsMap() {
+		mp = map[interface{}]interface{}{}
+	}
+	var err error
+	for _, unk := range unks {
+		var val interface{}
+		if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup {
+			val, err = codec.DecodeLengthDelimitedField(fd, unk.Contents, m.mf)
+		} else {
+			val, err = codec.DecodeScalarField(fd, unk.Value)
+		}
+		if err != nil {
+			return nil, err
+		}
+		if fd.IsMap() {
+			newEntry := val.(*Message)
+			kk, err := newEntry.TryGetFieldByNumber(1)
+			if err != nil {
+				return nil, err
+			}
+			vv, err := newEntry.TryGetFieldByNumber(2)
+			if err != nil {
+				return nil, err
+			}
+			mp[kk] = vv
+			v = mp
+		} else if fd.IsRepeated() {
+			t := reflect.TypeOf(val)
+			if t.Kind() == reflect.Slice && t != typeOfBytes {
+				// append slices if we unmarshalled a packed repeated field
+				newVals := val.([]interface{})
+				sl = append(sl, newVals...)
+			} else {
+				sl = append(sl, val)
+			}
+			v = sl
+		} else {
+			v = val
+		}
+	}
+	m.internalSetField(fd, v)
+	return v, nil
+}
+
+func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+	return validFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	if fd.IsMap() && val.Kind() == reflect.Map {
+		return validFieldValueForMapField(fd, val)
+	}
+
+	if fd.IsRepeated() { // this will also catch map fields where given value was not a map
+		if val.Kind() != reflect.Array && val.Kind() != reflect.Slice {
+			if fd.IsMap() {
+				return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type())
+			} else {
+				return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type())
+			}
+		}
+
+		if fd.IsMap() {
+			// value should be a slice of entry messages that we need convert into a map[interface{}]interface{}
+			m := map[interface{}]interface{}{}
+			for i := 0; i < val.Len(); i++ {
+				e, err := validElementFieldValue(fd, val.Index(i).Interface(), false)
+				if err != nil {
+					return nil, err
+				}
+				msg := e.(proto.Message)
+				dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil)
+				if err != nil {
+					return nil, err
+				}
+				k, err := dm.TryGetFieldByNumber(1)
+				if err != nil {
+					return nil, err
+				}
+				v, err := dm.TryGetFieldByNumber(2)
+				if err != nil {
+					return nil, err
+				}
+				m[k] = v
+			}
+			return m, nil
+		}
+
+		// make a defensive copy while checking contents (also converts to []interface{})
+		s := make([]interface{}, val.Len())
+		for i := 0; i < val.Len(); i++ {
+			ev := val.Index(i)
+			if ev.Kind() == reflect.Interface {
+				// unwrap it
+				ev = reflect.ValueOf(ev.Interface())
+			}
+			e, err := validElementFieldValueForRv(fd, ev, false)
+			if err != nil {
+				return nil, err
+			}
+			s[i] = e
+		}
+
+		return s, nil
+	}
+
+	return validElementFieldValueForRv(fd, val, false)
+}
+
+func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) {
+	if dm, ok := m.(*Message); ok {
+		return dm, nil
+	}
+	dm := NewMessageWithMessageFactory(md, mf)
+	if err := dm.mergeFrom(m); err != nil {
+		return nil, err
+	}
+	return dm, nil
+}
+
+func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}, allowNilMessage bool) (interface{}, error) {
+	return validElementFieldValueForRv(fd, reflect.ValueOf(val), allowNilMessage)
+}
+
+func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value, allowNilMessage bool) (interface{}, error) {
+	t := fd.GetType()
+	if !val.IsValid() {
+		return nil, typeError(fd, nil)
+	}
+
+	switch t {
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM:
+		return toInt32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return toInt64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_UINT32:
+		return toUint32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return toUint64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return toFloat32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return toFloat64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return toBool(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return toBytes(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return toString(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+		m, err := asMessage(val, fd.GetFullyQualifiedName())
+		// check that message is correct type
+		if err != nil {
+			return nil, err
+		}
+		var msgType string
+		if dm, ok := m.(*Message); ok {
+			if allowNilMessage && dm == nil {
+				// if dm == nil, we'll panic below, so early out if that is allowed
+				// (only allowed for map values, to indicate an entry w/ no value)
+				return m, nil
+			}
+			msgType = dm.GetMessageDescriptor().GetFullyQualifiedName()
+		} else {
+			msgType = proto.MessageName(m)
+		}
+		if msgType != fd.GetMessageType().GetFullyQualifiedName() {
+			return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType)
+		}
+		return m, nil
+
+	default:
+		return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType())
+	}
+}
+
+func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) {
+	if v.Kind() == reflect.Int32 {
+		return int32(v.Int()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) {
+	if v.Kind() == reflect.Uint32 {
+		return uint32(v.Uint()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) {
+	if v.Kind() == reflect.Float32 {
+		return float32(v.Float()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) {
+	if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 {
+		return v.Int(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) {
+	if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 {
+		return v.Uint(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) {
+	if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 {
+		return v.Float(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) {
+	if v.Kind() == reflect.Bool {
+		return v.Bool(), nil
+	}
+	return false, typeError(fd, v.Type())
+}
+
+func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) {
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+		return v.Bytes(), nil
+	}
+	return nil, typeError(fd, v.Type())
+}
+
+func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) {
+	if v.Kind() == reflect.String {
+		return v.String(), nil
+	}
+	return "", typeError(fd, v.Type())
+}
+
+func typeError(fd *desc.FieldDescriptor, t reflect.Type) error {
+	return fmt.Errorf(
+		"%s field %s is not compatible with value of type %v",
+		getTypeString(fd), fd.GetFullyQualifiedName(), t)
+}
+
+func getTypeString(fd *desc.FieldDescriptor) string {
+	return strings.ToLower(fd.GetType().String())
+}
+
+func asMessage(v reflect.Value, fieldName string) (proto.Message, error) {
+	t := v.Type()
+	// we need a pointer to a struct that implements proto.Message
+	if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) {
+		return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type())
+	}
+	return v.Interface().(proto.Message), nil
+}
+
+// Reset resets this message to an empty message. It removes all values set in
+// the message.
+func (m *Message) Reset() {
+	for k := range m.values {
+		delete(m.values, k)
+	}
+	for k := range m.unknownFields {
+		delete(m.unknownFields, k)
+	}
+}
+
+// String returns this message rendered in compact text format.
+func (m *Message) String() string {
+	b, err := m.MarshalText()
+	if err != nil {
+		panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error()))
+	}
+	return string(b)
+}
+
+// ProtoMessage is present to satisfy the proto.Message interface.
+func (m *Message) ProtoMessage() {
+}
+
+// ConvertTo converts this dynamic message into the given message. This is
+// shorthand for resetting then merging:
+//   target.Reset()
+//   m.MergeInto(target)
+func (m *Message) ConvertTo(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	target.Reset()
+	return m.mergeInto(target, defaultDeterminism)
+}
+
+// ConvertToDeterministic converts this dynamic message into the given message.
+// It is just like ConvertTo, but it attempts to produce deterministic results.
+// That means that if the target is a generated message (not another dynamic
+// message) and the current runtime is unaware of any fields or extensions that
+// are present in m, they will be serialized into the target's unrecognized
+// fields deterministically.
+func (m *Message) ConvertToDeterministic(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	target.Reset()
+	return m.mergeInto(target, true)
+}
+
+// ConvertFrom converts the given message into this dynamic message. This is
+// shorthand for resetting then merging:
+//   m.Reset()
+//   m.MergeFrom(target)
+func (m *Message) ConvertFrom(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	m.Reset()
+	return m.mergeFrom(target)
+}
+
+// MergeInto merges this dynamic message into the given message. All field
+// values in this message will be set on the given message. For map fields,
+// entries are added to the given message (if the given message has existing
+// values for like keys, they are overwritten). For slice fields, elements are
+// added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in this message to be represented as unknown fields in the
+// given message after merging, and vice versa.
+func (m *Message) MergeInto(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+	return m.mergeInto(target, defaultDeterminism)
+}
+
+// MergeIntoDeterministic merges this dynamic message into the given message.
+// It is just like MergeInto, but it attempts to produce deterministic results.
+// That means that if the target is a generated message (not another dynamic
+// message) and the current runtime is unaware of any fields or extensions that
+// are present in m, they will be serialized into the target's unrecognized
+// fields deterministically.
+func (m *Message) MergeIntoDeterministic(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+	return m.mergeInto(target, true)
+}
+
+// MergeFrom merges the given message into this dynamic message. All field
+// values in the given message will be set on this message. For map fields,
+// entries are added to this message (if this message has existing values for
+// like keys, they are overwritten). For slice fields, elements are added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in that message to be represented as unknown fields in this
+// message after merging, and vice versa.
+func (m *Message) MergeFrom(source proto.Message) error {
+	if err := m.checkType(source); err != nil {
+		return err
+	}
+	return m.mergeFrom(source)
+}
+
+// Merge implements the proto.Merger interface so that dynamic messages are
+// compatible with the proto.Merge function. It delegates to MergeFrom but will
+// panic on error as the proto.Merger interface doesn't allow for returning an
+// error.
+//
+// Unlike nearly all other methods, this method can work if this message's type
+// is not defined (such as instantiating the message without using NewMessage).
+// This is strictly so that dynamic message's are compatible with the
+// proto.Clone function, which instantiates a new message via reflection (thus
+// its message descriptor will not be set) and than calls Merge.
+func (m *Message) Merge(source proto.Message) {
+	if m.md == nil {
+		// To support proto.Clone, initialize the descriptor from the source.
+		if dm, ok := source.(*Message); ok {
+			m.md = dm.md
+			// also make sure the clone uses the same message factory and
+			// extensions and also knows about the same extra fields (if any)
+			m.mf = dm.mf
+			m.er = dm.er
+			m.extraFields = dm.extraFields
+		} else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil {
+			panic(err.Error())
+		} else {
+			m.md = md
+		}
+	}
+
+	if err := m.MergeFrom(source); err != nil {
+		panic(err.Error())
+	}
+}
+
+func (m *Message) checkType(target proto.Message) error {
+	if dm, ok := target.(*Message); ok {
+		if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+			return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+		}
+		return nil
+	}
+
+	msgName := proto.MessageName(target)
+	if msgName != m.md.GetFullyQualifiedName() {
+		return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName())
+	}
+	return nil
+}
+
+func (m *Message) mergeInto(pm proto.Message, deterministic bool) error {
+	if dm, ok := pm.(*Message); ok {
+		return dm.mergeFrom(m)
+	}
+
+	target := reflect.ValueOf(pm)
+	if target.Kind() == reflect.Ptr {
+		target = target.Elem()
+	}
+
+	// track tags for which the dynamic message has data but the given
+	// message doesn't know about it
+	unknownTags := map[int32]struct{}{}
+	for tag := range m.values {
+		unknownTags[tag] = struct{}{}
+	}
+
+	// check that we can successfully do the merge
+	structProps := proto.GetProperties(reflect.TypeOf(pm).Elem())
+	for _, prop := range structProps.Prop {
+		if prop.Tag == 0 {
+			continue // one-of or special field (such as XXX_unrecognized, etc.)
+		}
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		f := target.FieldByName(prop.Name)
+		ft := f.Type()
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+	// check one-of fields
+	for _, oop := range structProps.OneofTypes {
+		prop := oop.Prop
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		stf, ok := oop.Type.Elem().FieldByName(prop.Name)
+		if !ok {
+			return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem())
+		}
+		ft := stf.Type
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+	// and check extensions, too
+	for tag, ext := range proto.RegisteredExtensions(pm) {
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		ft := reflect.TypeOf(ext.ExtensionType)
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+
+	// now actually perform the merge
+	for _, prop := range structProps.Prop {
+		v, ok := m.values[int32(prop.Tag)]
+		if !ok {
+			continue
+		}
+		f := target.FieldByName(prop.Name)
+		if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil {
+			return err
+		}
+	}
+	// merge one-ofs
+	for _, oop := range structProps.OneofTypes {
+		prop := oop.Prop
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		oov := reflect.New(oop.Type.Elem())
+		f := oov.Elem().FieldByName(prop.Name)
+		if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil {
+			return err
+		}
+		target.Field(oop.Field).Set(oov)
+	}
+	// merge extensions, too
+	for tag, ext := range proto.RegisteredExtensions(pm) {
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem()
+		if err := mergeVal(reflect.ValueOf(v), e, deterministic); err != nil {
+			return err
+		}
+		if err := proto.SetExtension(pm, ext, e.Interface()); err != nil {
+			// shouldn't happen since we already checked that the extension type was compatible above
+			return err
+		}
+	}
+
+	// if we have fields that the given message doesn't know about, add to its unknown fields
+	if len(unknownTags) > 0 {
+		var b codec.Buffer
+		b.SetDeterministic(deterministic)
+		if deterministic {
+			// if we need to emit things deterministically, sort the
+			// extensions by their tag number
+			sortedUnknownTags := make([]int32, 0, len(unknownTags))
+			for tag := range unknownTags {
+				sortedUnknownTags = append(sortedUnknownTags, tag)
+			}
+			sort.Slice(sortedUnknownTags, func(i, j int) bool {
+				return sortedUnknownTags[i] < sortedUnknownTags[j]
+			})
+			for _, tag := range sortedUnknownTags {
+				fd := m.FindFieldDescriptor(tag)
+				if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil {
+					return err
+				}
+			}
+		} else {
+			for tag := range unknownTags {
+				fd := m.FindFieldDescriptor(tag)
+				if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil {
+					return err
+				}
+			}
+		}
+
+		internal.SetUnrecognized(pm, b.Bytes())
+	}
+
+	// finally, convey unknown fields into the given message by letting it unmarshal them
+	// (this will append to its unknown fields if not known; if somehow the given message recognizes
+	// a field even though the dynamic message did not, it will get correctly unmarshalled)
+	if unknownTags != nil && len(m.unknownFields) > 0 {
+		var b codec.Buffer
+		_ = m.marshalUnknownFields(&b)
+		_ = proto.UnmarshalMerge(b.Bytes(), pm)
+	}
+
+	return nil
+}
+
+func canConvert(src reflect.Value, target reflect.Type) bool {
+	if src.Kind() == reflect.Interface {
+		src = reflect.ValueOf(src.Interface())
+	}
+	srcType := src.Type()
+	// we allow convertible types instead of requiring exact types so that calling
+	// code can, for example, assign an enum constant to an enum field. In that case,
+	// one type is the enum type (a sub-type of int32) and the other may be the int32
+	// type. So we automatically do the conversion in that case.
+	if srcType.ConvertibleTo(target) {
+		return true
+	} else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) {
+		return true
+	} else if target.Kind() == reflect.Slice {
+		if srcType.Kind() != reflect.Slice {
+			return false
+		}
+		et := target.Elem()
+		for i := 0; i < src.Len(); i++ {
+			if !canConvert(src.Index(i), et) {
+				return false
+			}
+		}
+		return true
+	} else if target.Kind() == reflect.Map {
+		if srcType.Kind() != reflect.Map {
+			return false
+		}
+		return canConvertMap(src, target)
+	} else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) {
+		z := reflect.Zero(target).Interface()
+		msgType := proto.MessageName(z.(proto.Message))
+		return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName()
+	} else {
+		return false
+	}
+}
+
+func mergeVal(src, target reflect.Value, deterministic bool) error {
+	if src.Kind() == reflect.Interface && !src.IsNil() {
+		src = src.Elem()
+	}
+	srcType := src.Type()
+	targetType := target.Type()
+	if srcType.ConvertibleTo(targetType) {
+		if targetType.Implements(typeOfProtoMessage) && !target.IsNil() {
+			Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message))
+		} else {
+			target.Set(src.Convert(targetType))
+		}
+	} else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) {
+		if !src.CanAddr() {
+			target.Set(reflect.New(targetType.Elem()))
+			target.Elem().Set(src.Convert(targetType.Elem()))
+		} else {
+			target.Set(src.Addr().Convert(targetType))
+		}
+	} else if targetType.Kind() == reflect.Slice {
+		l := target.Len()
+		newL := l + src.Len()
+		if target.Cap() < newL {
+			// expand capacity of the slice and copy
+			newSl := reflect.MakeSlice(targetType, newL, newL)
+			for i := 0; i < target.Len(); i++ {
+				newSl.Index(i).Set(target.Index(i))
+			}
+			target.Set(newSl)
+		} else {
+			target.SetLen(newL)
+		}
+		for i := 0; i < src.Len(); i++ {
+			dest := target.Index(l + i)
+			if dest.Kind() == reflect.Ptr {
+				dest.Set(reflect.New(dest.Type().Elem()))
+			}
+			if err := mergeVal(src.Index(i), dest, deterministic); err != nil {
+				return err
+			}
+		}
+	} else if targetType.Kind() == reflect.Map {
+		return mergeMapVal(src, target, targetType, deterministic)
+	} else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) {
+		dm := src.Interface().(*Message)
+		if target.IsNil() {
+			target.Set(reflect.New(targetType.Elem()))
+		}
+		m := target.Interface().(proto.Message)
+		if err := dm.mergeInto(m, deterministic); err != nil {
+			return err
+		}
+	} else {
+		return fmt.Errorf("cannot convert %v to %v", srcType, targetType)
+	}
+	return nil
+}
+
+func (m *Message) mergeFrom(pm proto.Message) error {
+	if dm, ok := pm.(*Message); ok {
+		// if given message is also a dynamic message, we merge differently
+		for tag, v := range dm.values {
+			fd := m.FindFieldDescriptor(tag)
+			if fd == nil {
+				fd = dm.FindFieldDescriptor(tag)
+			}
+			if err := mergeField(m, fd, v); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	pmrv := reflect.ValueOf(pm)
+	if pmrv.IsNil() {
+		// nil is an empty message, so nothing to do
+		return nil
+	}
+
+	// check that we can successfully do the merge
+	src := pmrv.Elem()
+	values := map[*desc.FieldDescriptor]interface{}{}
+	props := proto.GetProperties(reflect.TypeOf(pm).Elem())
+	if props == nil {
+		return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem())
+	}
+
+	// regular fields
+	for _, prop := range props.Prop {
+		if prop.Tag == 0 {
+			continue // one-of or special field (such as XXX_unrecognized, etc.)
+		}
+		fd := m.FindFieldDescriptor(int32(prop.Tag))
+		if fd == nil {
+			// Our descriptor has different fields than this message object. So
+			// try to reflect on the message object's fields.
+			md, err := desc.LoadMessageDescriptorForMessage(pm)
+			if err != nil {
+				return err
+			}
+			fd = md.FindFieldByNumber(int32(prop.Tag))
+			if fd == nil {
+				return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name)
+			}
+		}
+		rv := src.FieldByName(prop.Name)
+		if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() {
+			continue
+		}
+		if v, err := validFieldValueForRv(fd, rv); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// one-of fields
+	for _, oop := range props.OneofTypes {
+		oov := src.Field(oop.Field).Elem()
+		if !oov.IsValid() || oov.Type() != oop.Type {
+			// this field is unset (in other words, one-of message field is not currently set to this option)
+			continue
+		}
+		prop := oop.Prop
+		rv := oov.Elem().FieldByName(prop.Name)
+		fd := m.FindFieldDescriptor(int32(prop.Tag))
+		if fd == nil {
+			// Our descriptor has different fields than this message object. So
+			// try to reflect on the message object's fields.
+			md, err := desc.LoadMessageDescriptorForMessage(pm)
+			if err != nil {
+				return err
+			}
+			fd = md.FindFieldByNumber(int32(prop.Tag))
+			if fd == nil {
+				return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name)
+			}
+		}
+		if v, err := validFieldValueForRv(fd, rv); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// extension fields
+	rexts, _ := proto.ExtensionDescs(pm)
+	for _, ed := range rexts {
+		v, _ := proto.GetExtension(pm, ed)
+		if v == nil {
+			continue
+		}
+		if ed.ExtensionType == nil {
+			// unrecognized extension: we'll handle that below when we
+			// handle other unrecognized fields
+			continue
+		}
+		fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field)
+		if fd == nil {
+			var err error
+			if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil {
+				return err
+			}
+		}
+		if v, err := validFieldValue(fd, v); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// now actually perform the merge
+	for fd, v := range values {
+		if err := mergeField(m, fd, v); err != nil {
+			return err
+		}
+	}
+
+	data := internal.GetUnrecognized(pm)
+	if len(data) > 0 {
+		// ignore any error returned: pulling in unknown fields is best-effort
+		_ = m.UnmarshalMerge(data)
+	}
+
+	return nil
+}
+
+// Validate checks that all required fields are present. It returns an error if any are absent.
+func (m *Message) Validate() error {
+	missingFields := m.findMissingFields()
+	if len(missingFields) == 0 {
+		return nil
+	}
+	return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+}
+
+func (m *Message) findMissingFields() []string {
+	if m.md.IsProto3() {
+		// proto3 does not allow required fields
+		return nil
+	}
+	var missingFields []string
+	for _, fd := range m.md.GetFields() {
+		if fd.IsRequired() {
+			if _, ok := m.values[fd.GetNumber()]; !ok {
+				missingFields = append(missingFields, fd.GetName())
+			}
+		}
+	}
+	return missingFields
+}
+
+// ValidateRecursive checks that all required fields are present and also
+// recursively validates all fields who are also messages. It returns an error
+// if any required fields, in this message or nested within, are absent.
+func (m *Message) ValidateRecursive() error {
+	return m.validateRecursive("")
+}
+
+func (m *Message) validateRecursive(prefix string) error {
+	if missingFields := m.findMissingFields(); len(missingFields) > 0 {
+		for i := range missingFields {
+			missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i])
+		}
+		return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+	}
+
+	for tag, fld := range m.values {
+		fd := m.FindFieldDescriptor(tag)
+		var chprefix string
+		var md *desc.MessageDescriptor
+		checkMsg := func(pm proto.Message) error {
+			var dm *Message
+			if d, ok := pm.(*Message); ok {
+				dm = d
+			} else if pm != nil {
+				dm = m.mf.NewDynamicMessage(md)
+				if err := dm.ConvertFrom(pm); err != nil {
+					return nil
+				}
+			}
+			if dm == nil {
+				return nil
+			}
+			if err := dm.validateRecursive(chprefix); err != nil {
+				return err
+			}
+			return nil
+		}
+		isMap := fd.IsMap()
+		if isMap && fd.GetMapValueType().GetMessageType() != nil {
+			md = fd.GetMapValueType().GetMessageType()
+			mp := fld.(map[interface{}]interface{})
+			for k, v := range mp {
+				chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k)
+				if err := checkMsg(v.(proto.Message)); err != nil {
+					return err
+				}
+			}
+		} else if !isMap && fd.GetMessageType() != nil {
+			md = fd.GetMessageType()
+			if fd.IsRepeated() {
+				sl := fld.([]interface{})
+				for i, v := range sl {
+					chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i)
+					if err := checkMsg(v.(proto.Message)); err != nil {
+						return err
+					}
+				}
+			} else {
+				chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd))
+				if err := checkMsg(fld.(proto.Message)); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+func getName(fd *desc.FieldDescriptor) string {
+	if fd.IsExtension() {
+		return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName())
+	} else {
+		return fd.GetName()
+	}
+}
+
+// knownFieldTags return tags of present and recognized fields, in sorted order.
+func (m *Message) knownFieldTags() []int {
+	if len(m.values) == 0 {
+		return []int(nil)
+	}
+
+	keys := make([]int, len(m.values))
+	i := 0
+	for k := range m.values {
+		keys[i] = int(k)
+		i++
+	}
+
+	sort.Ints(keys)
+	return keys
+}
+
+// allKnownFieldTags return tags of present and recognized fields, including
+// those that are unset, in sorted order. This only includes extensions that are
+// present. Known but not-present extensions are not included in the returned
+// set of tags.
+func (m *Message) allKnownFieldTags() []int {
+	fds := m.md.GetFields()
+	keys := make([]int, 0, len(fds)+len(m.extraFields))
+
+	for k := range m.values {
+		keys = append(keys, int(k))
+	}
+
+	// also include known fields that are not present
+	for _, fd := range fds {
+		if _, ok := m.values[fd.GetNumber()]; !ok {
+			keys = append(keys, int(fd.GetNumber()))
+		}
+	}
+	for _, fd := range m.extraFields {
+		if !fd.IsExtension() { // skip extensions that are not present
+			if _, ok := m.values[fd.GetNumber()]; !ok {
+				keys = append(keys, int(fd.GetNumber()))
+			}
+		}
+	}
+
+	sort.Ints(keys)
+	return keys
+}
+
+// unknownFieldTags return tags of present but unrecognized fields, in sorted order.
+func (m *Message) unknownFieldTags() []int {
+	if len(m.unknownFields) == 0 {
+		return []int(nil)
+	}
+	keys := make([]int, len(m.unknownFields))
+	i := 0
+	for k := range m.unknownFields {
+		keys[i] = int(k)
+		i++
+	}
+	sort.Ints(keys)
+	return keys
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
new file mode 100644
index 0000000..e44c6c5
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
@@ -0,0 +1,157 @@
+package dynamic
+
+import (
+	"bytes"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
+// have the same message type and same fields set to equal values. For proto3 messages, fields set
+// to their zero value are considered unset.
+func Equal(a, b *Message) bool {
+	if a == b {
+		return true
+	}
+	if (a == nil) != (b == nil) {
+		return false
+	}
+	if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
+		return false
+	}
+	if len(a.values) != len(b.values) {
+		return false
+	}
+	if len(a.unknownFields) != len(b.unknownFields) {
+		return false
+	}
+	for tag, aval := range a.values {
+		bval, ok := b.values[tag]
+		if !ok {
+			return false
+		}
+		if !fieldsEqual(aval, bval) {
+			return false
+		}
+	}
+	for tag, au := range a.unknownFields {
+		bu, ok := b.unknownFields[tag]
+		if !ok {
+			return false
+		}
+		if len(au) != len(bu) {
+			return false
+		}
+		for i, aval := range au {
+			bval := bu[i]
+			if aval.Encoding != bval.Encoding {
+				return false
+			}
+			if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
+				if !bytes.Equal(aval.Contents, bval.Contents) {
+					return false
+				}
+			} else if aval.Value != bval.Value {
+				return false
+			}
+		}
+	}
+	// all checks pass!
+	return true
+}
+
+func fieldsEqual(aval, bval interface{}) bool {
+	arv := reflect.ValueOf(aval)
+	brv := reflect.ValueOf(bval)
+	if arv.Type() != brv.Type() {
+		// it is possible that one is a dynamic message and one is not
+		apm, ok := aval.(proto.Message)
+		if !ok {
+			return false
+		}
+		bpm, ok := bval.(proto.Message)
+		if !ok {
+			return false
+		}
+		return MessagesEqual(apm, bpm)
+
+	} else {
+		switch arv.Kind() {
+		case reflect.Ptr:
+			apm, ok := aval.(proto.Message)
+			if !ok {
+				// Don't know how to compare pointer values that aren't messages!
+				// Maybe this should panic?
+				return false
+			}
+			bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
+			return MessagesEqual(apm, bpm)
+
+		case reflect.Map:
+			return mapsEqual(arv, brv)
+
+		case reflect.Slice:
+			if arv.Type() == typeOfBytes {
+				return bytes.Equal(aval.([]byte), bval.([]byte))
+			} else {
+				return slicesEqual(arv, brv)
+			}
+
+		default:
+			return aval == bval
+		}
+	}
+}
+
+func slicesEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	for i := 0; i < a.Len(); i++ {
+		ai := a.Index(i)
+		bi := b.Index(i)
+		if !fieldsEqual(ai.Interface(), bi.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
+// when one or both of the messages might be a dynamic message.
+func MessagesEqual(a, b proto.Message) bool {
+	da, aok := a.(*Message)
+	db, bok := b.(*Message)
+	// Both dynamic messages
+	if aok && bok {
+		return Equal(da, db)
+	}
+	// Neither dynamic messages
+	if !aok && !bok {
+		return proto.Equal(a, b)
+	}
+	// Mixed
+	if bok {
+		// we want a to be the dynamic one
+		b, da = a, db
+	}
+
+	// Instead of panic'ing below if we have a nil dynamic message, check
+	// now and return false if the input message is not also nil.
+	if da == nil {
+		return isNil(b)
+	}
+
+	md, err := desc.LoadMessageDescriptorForMessage(b)
+	if err != nil {
+		return false
+	}
+	db = NewMessageWithMessageFactory(md, da.mf)
+	if db.ConvertFrom(b) != nil {
+		return false
+	}
+	return Equal(da, db)
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
new file mode 100644
index 0000000..1d38161
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
@@ -0,0 +1,46 @@
+package dynamic
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/codec"
+	"github.com/jhump/protoreflect/desc"
+)
+
+// SetExtension sets the given extension value. If the given message is not a
+// dynamic message, the given extension may not be recognized (or may differ
+// from the compiled and linked in version of the extension. So in that case,
+// this function will serialize the given value to bytes and then use
+// proto.SetRawExtension to set the value.
+func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error {
+	if !extd.IsExtension() {
+		return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName())
+	}
+
+	if dm, ok := msg.(*Message); ok {
+		return dm.TrySetField(extd, val)
+	}
+
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return err
+	}
+	if err := checkField(extd, md); err != nil {
+		return err
+	}
+
+	val, err = validFieldValue(extd, val)
+	if err != nil {
+		return err
+	}
+
+	var b codec.Buffer
+	b.SetDeterministic(defaultDeterminism)
+	if err := b.EncodeFieldValue(extd, val); err != nil {
+		return err
+	}
+	proto.SetRawExtension(msg, extd.GetNumber(), b.Bytes())
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
new file mode 100644
index 0000000..6876827
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
@@ -0,0 +1,241 @@
+package dynamic
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// ExtensionRegistry is a registry of known extension fields. This is used to parse
+// extension fields encountered when de-serializing a dynamic message.
+type ExtensionRegistry struct {
+	includeDefault bool
+	mu             sync.RWMutex
+	exts           map[string]map[int32]*desc.FieldDescriptor
+}
+
+// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions,
+// which are those that are statically linked into the current program (e.g. registered by
+// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the
+// registry will override any default extensions that are for the same extendee and have the
+// same tag number and/or name.
+func NewExtensionRegistryWithDefaults() *ExtensionRegistry {
+	return &ExtensionRegistry{includeDefault: true}
+}
+
+// AddExtensionDesc adds the given extensions to the registry.
+func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error {
+	flds := make([]*desc.FieldDescriptor, len(exts))
+	for i, ext := range exts {
+		fd, err := desc.LoadFieldDescriptorForExtension(ext)
+		if err != nil {
+			return err
+		}
+		flds[i] = fd
+	}
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, fd := range flds {
+		r.putExtensionLocked(fd)
+	}
+	return nil
+}
+
+// AddExtension adds the given extensions to the registry. The given extensions
+// will overwrite any previously added extensions that are for the same extendee
+// message and same extension tag number.
+func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error {
+	for _, ext := range exts {
+		if !ext.IsExtension() {
+			return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName())
+		}
+	}
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, ext := range exts {
+		r.putExtensionLocked(ext)
+	}
+	return nil
+}
+
+// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor.
+func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.addExtensionsFromFileLocked(fd, false, nil)
+}
+
+// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file
+// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds
+// extensions from the entire transitive closure for the given file.
+func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	already := map[*desc.FileDescriptor]struct{}{}
+	r.addExtensionsFromFileLocked(fd, true, already)
+}
+
+func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) {
+	if _, ok := alreadySeen[fd]; ok {
+		return
+	}
+
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, ext := range fd.GetExtensions() {
+		r.putExtensionLocked(ext)
+	}
+	for _, msg := range fd.GetMessageTypes() {
+		r.addExtensionsFromMessageLocked(msg)
+	}
+
+	if recursive {
+		alreadySeen[fd] = struct{}{}
+		for _, dep := range fd.GetDependencies() {
+			r.addExtensionsFromFileLocked(dep, recursive, alreadySeen)
+		}
+	}
+}
+
+func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) {
+	for _, ext := range md.GetNestedExtensions() {
+		r.putExtensionLocked(ext)
+	}
+	for _, msg := range md.GetNestedMessageTypes() {
+		r.addExtensionsFromMessageLocked(msg)
+	}
+}
+
+func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) {
+	msgName := fd.GetOwner().GetFullyQualifiedName()
+	m := r.exts[msgName]
+	if m == nil {
+		m = map[int32]*desc.FieldDescriptor{}
+		r.exts[msgName] = m
+	}
+	m[fd.GetNumber()] = fd
+}
+
+// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and tag number. If no extension is known, nil is returned.
+func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	fd := r.exts[messageName][tagNumber]
+	if fd == nil && r.includeDefault {
+		ext := getDefaultExtensions(messageName)[tagNumber]
+		if ext != nil {
+			fd, _ = desc.LoadFieldDescriptorForExtension(ext)
+		}
+	}
+	return fd
+}
+
+// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil
+// is returned.
+func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, fd := range r.exts[messageName] {
+		if fd.GetFullyQualifiedName() == fieldName {
+			return fd
+		}
+	}
+	if r.includeDefault {
+		for _, ext := range getDefaultExtensions(messageName) {
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd.GetFullyQualifiedName() == fieldName {
+				return fd
+			}
+		}
+	}
+	return nil
+}
+
+// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned.
+// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last
+// component uses the field's JSON name (if present).
+func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, fd := range r.exts[messageName] {
+		if fd.GetFullyQualifiedJSONName() == fieldName {
+			return fd
+		}
+	}
+	if r.includeDefault {
+		for _, ext := range getDefaultExtensions(messageName) {
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd.GetFullyQualifiedJSONName() == fieldName {
+				return fd
+			}
+		}
+	}
+	return nil
+}
+
+func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc {
+	t := proto.MessageType(messageName)
+	if t != nil {
+		msg := reflect.Zero(t).Interface().(proto.Message)
+		return proto.RegisteredExtensions(msg)
+	}
+	return nil
+}
+
+// AllExtensionsForType returns all known extension fields for the given extendee name (must be a
+// fully-qualified message name).
+func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor {
+	if r == nil {
+		return []*desc.FieldDescriptor(nil)
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	flds := r.exts[messageName]
+	var ret []*desc.FieldDescriptor
+	if r.includeDefault {
+		exts := getDefaultExtensions(messageName)
+		if len(exts) > 0 || len(flds) > 0 {
+			ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds))
+		}
+		for tag, ext := range exts {
+			if _, ok := flds[tag]; ok {
+				// skip default extension and use the one explicitly registered instead
+				continue
+			}
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd != nil {
+				ret = append(ret, fd)
+			}
+		}
+	} else if len(flds) > 0 {
+		ret = make([]*desc.FieldDescriptor, 0, len(flds))
+	}
+
+	for _, ext := range flds {
+		ret = append(ret, ext)
+	}
+	return ret
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
new file mode 100644
index 0000000..1eaedfa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -0,0 +1,303 @@
+// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC
+// method where only method descriptors are known. The actual request and response
+// messages may be dynamic messages.
+package grpcdynamic
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/dynamic"
+)
+
+// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server.
+type Stub struct {
+	channel Channel
+	mf      *dynamic.MessageFactory
+}
+
+// Channel represents the operations necessary to issue RPCs via gRPC. The
+// *grpc.ClientConn type provides this interface and will typically the concrete
+// type used to construct Stubs. But the use of this interface allows
+// construction of stubs that use alternate concrete types as the transport for
+// RPC operations.
+type Channel interface {
+	Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
+	NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
+}
+
+var _ Channel = (*grpc.ClientConn)(nil)
+
+// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
+func NewStub(channel Channel) Stub {
+	return NewStubWithMessageFactory(channel, nil)
+}
+
+// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for
+// dispatching RPCs and the given MessageFactory for creating response messages.
+func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub {
+	return Stub{channel: channel, mf: mf}
+}
+
+func requestMethod(md *desc.MethodDescriptor) string {
+	return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName())
+}
+
+// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods.
+func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) {
+	if method.IsClientStreaming() || method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	if err := checkMessageType(method.GetInputType(), request); err != nil {
+		return nil, err
+	}
+	resp := s.mf.NewMessage(method.GetOutputType())
+	if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods.
+func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) {
+	if method.IsClientStreaming() || !method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	if err := checkMessageType(method.GetInputType(), request); err != nil {
+		return nil, err
+	}
+	ctx, cancel := context.WithCancel(ctx)
+	sd := grpc.StreamDesc{
+		StreamName:    method.GetName(),
+		ServerStreams: method.IsServerStreaming(),
+		ClientStreams: method.IsClientStreaming(),
+	}
+	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		return nil, err
+	} else {
+		err = cs.SendMsg(request)
+		if err != nil {
+			cancel()
+			return nil, err
+		}
+		err = cs.CloseSend()
+		if err != nil {
+			cancel()
+			return nil, err
+		}
+		return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
+	}
+}
+
+// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end,
+// receive the response message. Use this for client-streaming methods.
+func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) {
+	if !method.IsClientStreaming() || method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	ctx, cancel := context.WithCancel(ctx)
+	sd := grpc.StreamDesc{
+		StreamName:    method.GetName(),
+		ServerStreams: method.IsServerStreaming(),
+		ClientStreams: method.IsClientStreaming(),
+	}
+	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		return nil, err
+	} else {
+		return &ClientStream{cs, method, s.mf, cancel}, nil
+	}
+}
+
+// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response
+// messages. Use this for bidi-streaming methods.
+func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) {
+	if !method.IsClientStreaming() || !method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	sd := grpc.StreamDesc{
+		StreamName:    method.GetName(),
+		ServerStreams: method.IsServerStreaming(),
+		ClientStreams: method.IsClientStreaming(),
+	}
+	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		return nil, err
+	} else {
+		return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil
+	}
+}
+
+func methodType(md *desc.MethodDescriptor) string {
+	if md.IsClientStreaming() && md.IsServerStreaming() {
+		return "bidi-streaming"
+	} else if md.IsClientStreaming() {
+		return "client-streaming"
+	} else if md.IsServerStreaming() {
+		return "server-streaming"
+	} else {
+		return "unary"
+	}
+}
+
+func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error {
+	var typeName string
+	if dm, ok := msg.(*dynamic.Message); ok {
+		typeName = dm.GetMessageDescriptor().GetFullyQualifiedName()
+	} else {
+		typeName = proto.MessageName(msg)
+	}
+	if typeName != md.GetFullyQualifiedName() {
+		return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName)
+	}
+	return nil
+}
+
+// ServerStream represents a response stream from a server. Messages in the stream can be queried
+// as can header and trailer metadata sent by the server.
+type ServerStream struct {
+	stream   grpc.ClientStream
+	respType *desc.MessageDescriptor
+	mf       *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ServerStream) Header() (metadata.MD, error) {
+	return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ServerStream) Trailer() metadata.MD {
+	return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ServerStream) Context() context.Context {
+	return s.stream.Context()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *ServerStream) RecvMsg() (proto.Message, error) {
+	resp := s.mf.NewMessage(s.respType)
+	if err := s.stream.RecvMsg(resp); err != nil {
+		return nil, err
+	} else {
+		return resp, nil
+	}
+}
+
+// ClientStream represents a response stream from a client. Messages in the stream can be sent
+// and, when done, the unary server message and header and trailer metadata can be queried.
+type ClientStream struct {
+	stream grpc.ClientStream
+	method *desc.MethodDescriptor
+	mf     *dynamic.MessageFactory
+	cancel context.CancelFunc
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ClientStream) Header() (metadata.MD, error) {
+	return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ClientStream) Trailer() metadata.MD {
+	return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ClientStream) Context() context.Context {
+	return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *ClientStream) SendMsg(m proto.Message) error {
+	if err := checkMessageType(s.method.GetInputType(), m); err != nil {
+		return err
+	}
+	return s.stream.SendMsg(m)
+}
+
+// CloseAndReceive closes the outgoing request stream and then blocks for the server's response.
+func (s *ClientStream) CloseAndReceive() (proto.Message, error) {
+	if err := s.stream.CloseSend(); err != nil {
+		return nil, err
+	}
+	resp := s.mf.NewMessage(s.method.GetOutputType())
+	if err := s.stream.RecvMsg(resp); err != nil {
+		return nil, err
+	}
+	// make sure we get EOF for a second message
+	if err := s.stream.RecvMsg(resp); err != io.EOF {
+		if err == nil {
+			s.cancel()
+			return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName())
+		} else {
+			return nil, err
+		}
+	}
+	return resp, nil
+}
+
+// BidiStream represents a bi-directional stream for sending messages to and receiving
+// messages from a server. The header and trailer metadata sent by the server can also be
+// queried.
+type BidiStream struct {
+	stream   grpc.ClientStream
+	reqType  *desc.MessageDescriptor
+	respType *desc.MessageDescriptor
+	mf       *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *BidiStream) Header() (metadata.MD, error) {
+	return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *BidiStream) Trailer() metadata.MD {
+	return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *BidiStream) Context() context.Context {
+	return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *BidiStream) SendMsg(m proto.Message) error {
+	if err := checkMessageType(s.reqType, m); err != nil {
+		return err
+	}
+	return s.stream.SendMsg(m)
+}
+
+// CloseSend indicates the request stream has ended. Invoke this after all request messages
+// are sent (even if there are zero such messages).
+func (s *BidiStream) CloseSend() error {
+	return s.stream.CloseSend()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *BidiStream) RecvMsg() (proto.Message, error) {
+	resp := s.mf.NewMessage(s.respType)
+	if err := s.stream.RecvMsg(resp); err != nil {
+		return nil, err
+	} else {
+		return resp, nil
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
new file mode 100644
index 0000000..bd7fcaa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
@@ -0,0 +1,76 @@
+package dynamic
+
+import "bytes"
+
+type indentBuffer struct {
+	bytes.Buffer
+	indent      string
+	indentCount int
+	comma       bool
+}
+
+func (b *indentBuffer) start() error {
+	if b.indentCount >= 0 {
+		b.indentCount++
+		return b.newLine(false)
+	}
+	return nil
+}
+
+func (b *indentBuffer) sep() error {
+	if b.indentCount >= 0 {
+		_, err := b.WriteString(": ")
+		return err
+	} else {
+		return b.WriteByte(':')
+	}
+}
+
+func (b *indentBuffer) end() error {
+	if b.indentCount >= 0 {
+		b.indentCount--
+		return b.newLine(false)
+	}
+	return nil
+}
+
+func (b *indentBuffer) maybeNext(first *bool) error {
+	if *first {
+		*first = false
+		return nil
+	} else {
+		return b.next()
+	}
+}
+
+func (b *indentBuffer) next() error {
+	if b.indentCount >= 0 {
+		return b.newLine(b.comma)
+	} else if b.comma {
+		return b.WriteByte(',')
+	} else {
+		return b.WriteByte(' ')
+	}
+}
+
+func (b *indentBuffer) newLine(comma bool) error {
+	if comma {
+		err := b.WriteByte(',')
+		if err != nil {
+			return err
+		}
+	}
+
+	err := b.WriteByte('\n')
+	if err != nil {
+		return err
+	}
+
+	for i := 0; i < b.indentCount; i++ {
+		_, err := b.WriteString(b.indent)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
new file mode 100644
index 0000000..02c8298
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -0,0 +1,1256 @@
+package dynamic
+
+// JSON marshalling and unmarshalling for dynamic messages
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	// link in the well-known-types that have a special JSON format
+	_ "github.com/golang/protobuf/ptypes/any"
+	_ "github.com/golang/protobuf/ptypes/duration"
+	_ "github.com/golang/protobuf/ptypes/empty"
+	_ "github.com/golang/protobuf/ptypes/struct"
+	_ "github.com/golang/protobuf/ptypes/timestamp"
+	_ "github.com/golang/protobuf/ptypes/wrappers"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+var wellKnownTypeNames = map[string]struct{}{
+	"google.protobuf.Any":       {},
+	"google.protobuf.Empty":     {},
+	"google.protobuf.Duration":  {},
+	"google.protobuf.Timestamp": {},
+	// struct.proto
+	"google.protobuf.Struct":    {},
+	"google.protobuf.Value":     {},
+	"google.protobuf.ListValue": {},
+	// wrappers.proto
+	"google.protobuf.DoubleValue": {},
+	"google.protobuf.FloatValue":  {},
+	"google.protobuf.Int64Value":  {},
+	"google.protobuf.UInt64Value": {},
+	"google.protobuf.Int32Value":  {},
+	"google.protobuf.UInt32Value": {},
+	"google.protobuf.BoolValue":   {},
+	"google.protobuf.StringValue": {},
+	"google.protobuf.BytesValue":  {},
+}
+
+// MarshalJSON serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a compact form: no newlines, and spaces between fields and
+// between field identifiers and values are elided.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+//    m.MarshalJSONPB(&jsonpb.Marshaler{})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSON() ([]byte, error) {
+	return m.MarshalJSONPB(&jsonpb.Marshaler{})
+}
+
+// MarshalJSONIndent serializes this message to bytes in JSON format, returning
+// an error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values. Indentation of two spaces is
+// used.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+//    m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSONIndent() ([]byte, error) {
+	return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+}
+
+// MarshalJSONPB serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string. The given marshaler is used to convey options used during marshaling.
+//
+// If this message contains nested messages that are generated message types (as
+// opposed to dynamic messages), the given marshaler is used to marshal it.
+//
+// When marshaling any nested messages, any jsonpb.AnyResolver configured in the
+// given marshaler is augmented with knowledge of message types known to this
+// message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) {
+	var b indentBuffer
+	b.indent = opts.Indent
+	if len(opts.Indent) == 0 {
+		b.indentCount = -1
+	}
+	b.comma = true
+	if err := m.marshalJSON(&b, opts); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error {
+	if m == nil {
+		_, err := b.WriteString("null")
+		return err
+	}
+	if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+		newOpts := *opts
+		newOpts.AnyResolver = r
+		opts = &newOpts
+	}
+
+	if ok, err := marshalWellKnownType(m, b, opts); ok {
+		return err
+	}
+
+	err := b.WriteByte('{')
+	if err != nil {
+		return err
+	}
+	err = b.start()
+	if err != nil {
+		return err
+	}
+
+	var tags []int
+	if opts.EmitDefaults {
+		tags = m.allKnownFieldTags()
+	} else {
+		tags = m.knownFieldTags()
+	}
+
+	first := true
+
+	for _, tag := range tags {
+		itag := int32(tag)
+		fd := m.FindFieldDescriptor(itag)
+
+		v, ok := m.values[itag]
+		if !ok {
+			if fd.GetOneOf() != nil {
+				// don't print defaults for fields in a oneof
+				continue
+			}
+			v = fd.GetDefaultValue()
+		}
+
+		err := b.maybeNext(&first)
+		if err != nil {
+			return err
+		}
+		err = marshalKnownFieldJSON(b, fd, v, opts)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = b.end()
+	if err != nil {
+		return err
+	}
+	err = b.WriteByte('}')
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) {
+	fqn := m.md.GetFullyQualifiedName()
+	if _, ok := wellKnownTypeNames[fqn]; !ok {
+		return false, nil
+	}
+
+	msgType := proto.MessageType(fqn)
+	if msgType == nil {
+		// wtf?
+		panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+	}
+
+	// convert dynamic message to well-known type and let jsonpb marshal it
+	msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+	if err := m.MergeInto(msg); err != nil {
+		return true, err
+	}
+	return true, opts.Marshal(b, msg)
+}
+
+func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+	var jsonName string
+	if opts.OrigName {
+		jsonName = fd.GetName()
+	} else {
+		jsonName = fd.AsFieldDescriptorProto().GetJsonName()
+		if jsonName == "" {
+			jsonName = fd.GetName()
+		}
+	}
+	if fd.IsExtension() {
+		var scope string
+		switch parent := fd.GetParent().(type) {
+		case *desc.FileDescriptor:
+			scope = parent.GetPackage()
+		default:
+			scope = parent.GetFullyQualifiedName()
+		}
+		if scope == "" {
+			jsonName = fmt.Sprintf("[%s]", jsonName)
+		} else {
+			jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName)
+		}
+	}
+	err := writeJsonString(b, jsonName)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+
+	if isNil(v) {
+		_, err := b.WriteString("null")
+		return err
+	}
+
+	if fd.IsMap() {
+		err = b.WriteByte('{')
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+
+		md := fd.GetMessageType()
+		vfd := md.FindFieldByNumber(2)
+
+		mp := v.(map[interface{}]interface{})
+		keys := make([]interface{}, 0, len(mp))
+		for k := range mp {
+			keys = append(keys, k)
+		}
+		sort.Sort(sortable(keys))
+		first := true
+		for _, mk := range keys {
+			mv := mp[mk]
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+
+			err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts)
+			if err != nil {
+				return err
+			}
+		}
+
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		return b.WriteByte('}')
+
+	} else if fd.IsRepeated() {
+		err = b.WriteByte('[')
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+
+		sl := v.([]interface{})
+		first := true
+		for _, slv := range sl {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			err = marshalKnownFieldValueJSON(b, fd, slv, opts)
+			if err != nil {
+				return err
+			}
+		}
+
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		return b.WriteByte(']')
+
+	} else {
+		return marshalKnownFieldValueJSON(b, fd, v, opts)
+	}
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+	return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+	vi := s[i]
+	vj := s[j]
+	switch reflect.TypeOf(vi).Kind() {
+	case reflect.Int32:
+		return vi.(int32) < vj.(int32)
+	case reflect.Int64:
+		return vi.(int64) < vj.(int64)
+	case reflect.Uint32:
+		return vi.(uint32) < vj.(uint32)
+	case reflect.Uint64:
+		return vi.(uint64) < vj.(uint64)
+	case reflect.String:
+		return vi.(string) < vj.(string)
+	case reflect.Bool:
+		return !vi.(bool) && vj.(bool)
+	default:
+		panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+	}
+}
+
+func (s sortable) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func isNil(v interface{}) bool {
+	if v == nil {
+		return true
+	}
+	rv := reflect.ValueOf(v)
+	return rv.Kind() == reflect.Ptr && rv.IsNil()
+}
+
+func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error {
+	rk := reflect.ValueOf(mk)
+	var strkey string
+	switch rk.Kind() {
+	case reflect.Bool:
+		strkey = strconv.FormatBool(rk.Bool())
+	case reflect.Int32, reflect.Int64:
+		strkey = strconv.FormatInt(rk.Int(), 10)
+	case reflect.Uint32, reflect.Uint64:
+		strkey = strconv.FormatUint(rk.Uint(), 10)
+	case reflect.String:
+		strkey = rk.String()
+	default:
+		return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type())
+	}
+	err := writeJsonString(b, strkey)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+	return marshalKnownFieldValueJSON(b, vfd, mv, opts)
+}
+
+func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Int64:
+		return writeJsonString(b, strconv.FormatInt(rv.Int(), 10))
+	case reflect.Int32:
+		ed := fd.GetEnumType()
+		if !opts.EnumsAsInts && ed != nil {
+			n := int32(rv.Int())
+			vd := ed.FindValueByNumber(n)
+			if vd == nil {
+				_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+				return err
+			} else {
+				return writeJsonString(b, vd.GetName())
+			}
+		} else {
+			_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+			return err
+		}
+	case reflect.Uint64:
+		return writeJsonString(b, strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Uint32:
+		_, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+		return err
+	case reflect.Float32, reflect.Float64:
+		f := rv.Float()
+		var str string
+		if math.IsNaN(f) {
+			str = `"NaN"`
+		} else if math.IsInf(f, 1) {
+			str = `"Infinity"`
+		} else if math.IsInf(f, -1) {
+			str = `"-Infinity"`
+		} else {
+			var bits int
+			if rv.Kind() == reflect.Float32 {
+				bits = 32
+			} else {
+				bits = 64
+			}
+			str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+		}
+		_, err := b.WriteString(str)
+		return err
+	case reflect.Bool:
+		_, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+		return err
+	case reflect.Slice:
+		bstr := base64.StdEncoding.EncodeToString(rv.Bytes())
+		return writeJsonString(b, bstr)
+	case reflect.String:
+		return writeJsonString(b, rv.String())
+	default:
+		// must be a message
+		if isNil(v) {
+			_, err := b.WriteString("null")
+			return err
+		}
+
+		if dm, ok := v.(*Message); ok {
+			return dm.marshalJSON(b, opts)
+		}
+
+		var err error
+		if b.indentCount <= 0 || len(b.indent) == 0 {
+			err = opts.Marshal(b, v.(proto.Message))
+		} else {
+			str, err := opts.MarshalToString(v.(proto.Message))
+			if err != nil {
+				return err
+			}
+			indent := strings.Repeat(b.indent, b.indentCount)
+			pos := 0
+			// add indention prefix to each line
+			for pos < len(str) {
+				start := pos
+				nextPos := strings.Index(str[pos:], "\n")
+				if nextPos == -1 {
+					nextPos = len(str)
+				} else {
+					nextPos = pos + nextPos + 1 // include newline
+				}
+				line := str[start:nextPos]
+				if pos > 0 {
+					_, err = b.WriteString(indent)
+					if err != nil {
+						return err
+					}
+				}
+				_, err = b.WriteString(line)
+				if err != nil {
+					return err
+				}
+				pos = nextPos
+			}
+		}
+		return err
+	}
+}
+
+func writeJsonString(b *indentBuffer, s string) error {
+	if sbytes, err := json.Marshal(s); err != nil {
+		return err
+	} else {
+		_, err := b.Write(sbytes)
+		return err
+	}
+}
+
+// UnmarshalJSON de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
+// value) unmarshaler:
+//
+//    m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//
+// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
+// will be used when parsing google.protobuf.Any messages.
+func (m *Message) UnmarshalJSON(js []byte) error {
+	return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalMergeJSON de-serializes the message that is present, in JSON format,
+// in the given bytes into this message. Unlike UnmarshalJSON, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSON(js []byte) error {
+	return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. The given unmarshaler conveys options used
+// when parsing the JSON. This function first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// The decoding is lenient:
+//  1. The JSON can refer to fields either by their JSON name or by their
+//     declared name.
+//  2. The JSON can use either numeric values or string names for enum values.
+//
+// When instantiating nested messages, if this message's associated factory
+// returns a generated message type (as opposed to a dynamic message), the given
+// unmarshaler is used to unmarshal it.
+//
+// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in
+// the given unmarshaler is augmented with knowledge of message types known to
+// this message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMergeJSONPB(opts, js); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON
+// format, in the given bytes into this message. The given unmarshaler conveys
+// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+	r := newJsReader(js)
+	err := m.unmarshalJson(r, opts)
+	if err != nil {
+		return err
+	}
+	if t, err := r.poll(); err != io.EOF {
+		b, _ := ioutil.ReadAll(r.unread())
+		s := fmt.Sprintf("%v%s", t, string(b))
+		return fmt.Errorf("superfluous data found after JSON object: %q", s)
+	}
+	return nil
+}
+
+func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) {
+	fqn := m.md.GetFullyQualifiedName()
+	if _, ok := wellKnownTypeNames[fqn]; !ok {
+		return false, nil
+	}
+
+	msgType := proto.MessageType(fqn)
+	if msgType == nil {
+		// wtf?
+		panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+	}
+
+	// extract json value from r
+	var js json.RawMessage
+	if err := json.NewDecoder(r.unread()).Decode(&js); err != nil {
+		return true, err
+	}
+	if err := r.skip(); err != nil {
+		return true, err
+	}
+
+	// unmarshal into well-known type and then convert to dynamic message
+	msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+	if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil {
+		return true, err
+	}
+	return true, m.MergeFrom(msg)
+}
+
+func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error {
+	if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+		newOpts := *opts
+		newOpts.AnyResolver = r
+		opts = &newOpts
+	}
+
+	if ok, err := unmarshalWellKnownType(m, r, opts); ok {
+		return err
+	}
+
+	t, err := r.peek()
+	if err != nil {
+		return err
+	}
+	if t == nil {
+		// if json is simply "null" we do nothing
+		r.poll()
+		return nil
+	}
+
+	if err := r.beginObject(); err != nil {
+		return err
+	}
+
+	for r.hasNext() {
+		f, err := r.nextObjectKey()
+		if err != nil {
+			return err
+		}
+		fd := m.FindFieldDescriptorByJSONName(f)
+		if fd == nil {
+			if opts.AllowUnknownFields {
+				r.skip()
+				continue
+			}
+			return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f)
+		}
+		v, err := unmarshalJsField(fd, r, m.mf, opts)
+		if err != nil {
+			return err
+		}
+		if v != nil {
+			if err := mergeField(m, fd, v); err != nil {
+				return err
+			}
+		} else if fd.GetOneOf() != nil {
+			// preserve explicit null for oneof fields (this is a little odd but
+			// mimics the behavior of jsonpb with oneofs in generated message types)
+			if fd.GetMessageType() != nil {
+				typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName())
+				if typ != nil {
+					// typed nil
+					if typ.Kind() != reflect.Ptr {
+						typ = reflect.PtrTo(typ)
+					}
+					v = reflect.Zero(typ).Interface()
+				} else {
+					// can't use nil dynamic message, so we just use empty one instead
+					v = m.mf.NewDynamicMessage(fd.GetMessageType())
+				}
+				if err := m.setField(fd, v); err != nil {
+					return err
+				}
+			} else {
+				// not a message... explicit null makes no sense
+				return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f)
+			}
+		} else {
+			m.clearField(fd)
+		}
+	}
+
+	if err := r.endObject(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func isWellKnownValue(fd *desc.FieldDescriptor) bool {
+	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
+}
+
+func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
+	// we look for ListValue; but we also look for Value, which can be assigned a ListValue
+	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+		(fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" ||
+			fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value")
+}
+
+func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+	t, err := r.peek()
+	if err != nil {
+		return nil, err
+	}
+	if t == nil && !isWellKnownValue(fd) {
+		// if value is null, just return nil
+		// (unless field is google.protobuf.Value, in which case
+		// we fall through to parse it as an instance where its
+		// underlying value is set to a NullValue)
+		r.poll()
+		return nil, nil
+	}
+
+	if t == json.Delim('{') && fd.IsMap() {
+		entryType := fd.GetMessageType()
+		keyType := entryType.FindFieldByNumber(1)
+		valueType := entryType.FindFieldByNumber(2)
+		mp := map[interface{}]interface{}{}
+
+		// TODO: if there are just two map keys "key" and "value" and they have the right type of values,
+		// treat this JSON object as a single map entry message. (In keeping with support of map fields as
+		// if they were normal repeated field of entry messages as well as supporting a transition from
+		// optional to repeated...)
+
+		if err := r.beginObject(); err != nil {
+			return nil, err
+		}
+		for r.hasNext() {
+			kk, err := unmarshalJsFieldElement(keyType, r, mf, opts, false)
+			if err != nil {
+				return nil, err
+			}
+			vv, err := unmarshalJsFieldElement(valueType, r, mf, opts, true)
+			if err != nil {
+				return nil, err
+			}
+			mp[kk] = vv
+		}
+		if err := r.endObject(); err != nil {
+			return nil, err
+		}
+
+		return mp, nil
+	} else if t == json.Delim('[') && !isWellKnownListValue(fd) {
+		// We support parsing an array, even if field is not repeated, to mimic support in proto
+		// binary wire format that supports changing an optional field to repeated and vice versa.
+		// If the field is not repeated, we only keep the last value in the array.
+
+		if err := r.beginArray(); err != nil {
+			return nil, err
+		}
+		var sl []interface{}
+		var v interface{}
+		for r.hasNext() {
+			var err error
+			v, err = unmarshalJsFieldElement(fd, r, mf, opts, false)
+			if err != nil {
+				return nil, err
+			}
+			if fd.IsRepeated() && v != nil {
+				sl = append(sl, v)
+			}
+		}
+		if err := r.endArray(); err != nil {
+			return nil, err
+		}
+		if fd.IsMap() {
+			mp := map[interface{}]interface{}{}
+			for _, m := range sl {
+				msg := m.(*Message)
+				kk, err := msg.TryGetFieldByNumber(1)
+				if err != nil {
+					return nil, err
+				}
+				vv, err := msg.TryGetFieldByNumber(2)
+				if err != nil {
+					return nil, err
+				}
+				mp[kk] = vv
+			}
+			return mp, nil
+		} else if fd.IsRepeated() {
+			return sl, nil
+		} else {
+			return v, nil
+		}
+	} else {
+		// We support parsing a singular value, even if field is repeated, to mimic support in proto
+		// binary wire format that supports changing an optional field to repeated and vice versa.
+		// If the field is repeated, we store value as singleton slice of that one value.
+
+		v, err := unmarshalJsFieldElement(fd, r, mf, opts, false)
+		if err != nil {
+			return nil, err
+		}
+		if v == nil {
+			return nil, nil
+		}
+		if fd.IsRepeated() {
+			return []interface{}{v}, nil
+		} else {
+			return v, nil
+		}
+	}
+}
+
+func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler, allowNilMessage bool) (interface{}, error) {
+	t, err := r.peek()
+	if err != nil {
+		return nil, err
+	}
+
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+
+		if t == nil && allowNilMessage {
+			// if json is simply "null" return a nil pointer
+			r.poll()
+			return nilMessage(fd.GetMessageType()), nil
+		}
+
+		m := mf.NewMessage(fd.GetMessageType())
+		if dm, ok := m.(*Message); ok {
+			if err := dm.unmarshalJson(r, opts); err != nil {
+				return nil, err
+			}
+		} else {
+			var msg json.RawMessage
+			if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil {
+				return nil, err
+			}
+			if err := r.skip(); err != nil {
+				return nil, err
+			}
+			if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil {
+				return nil, err
+			}
+		}
+		return m, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		if e, err := r.nextNumber(); err != nil {
+			return nil, err
+		} else {
+			// value could be string or number
+			if i, err := e.Int64(); err != nil {
+				// number cannot be parsed, so see if it's an enum value name
+				vd := fd.GetEnumType().FindValueByName(string(e))
+				if vd != nil {
+					return vd.GetNumber(), nil
+				} else {
+					return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e)
+				}
+			} else if i > math.MaxInt32 || i < math.MinInt32 {
+				return nil, NumericOverflowError
+			} else {
+				return int32(i), err
+			}
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, err := r.nextInt(); err != nil {
+			return nil, err
+		} else if i > math.MaxInt32 || i < math.MinInt32 {
+			return nil, NumericOverflowError
+		} else {
+			return int32(i), err
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return r.nextInt()
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if i, err := r.nextUint(); err != nil {
+			return nil, err
+		} else if i > math.MaxUint32 {
+			return nil, NumericOverflowError
+		} else {
+			return uint32(i), err
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return r.nextUint()
+
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		if str, ok := t.(string); ok {
+			if str == "true" {
+				r.poll() // consume token
+				return true, err
+			} else if str == "false" {
+				r.poll() // consume token
+				return false, err
+			}
+		}
+		return r.nextBool()
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		if f, err := r.nextFloat(); err != nil {
+			return nil, err
+		} else {
+			return float32(f), nil
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return r.nextFloat()
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return r.nextBytes()
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return r.nextString()
+
+	default:
+		return nil, fmt.Errorf("unknown field type: %v", fd.GetType())
+	}
+}
+
+type jsReader struct {
+	reader  *bytes.Reader
+	dec     *json.Decoder
+	current json.Token
+	peeked  bool
+}
+
+func newJsReader(b []byte) *jsReader {
+	reader := bytes.NewReader(b)
+	dec := json.NewDecoder(reader)
+	dec.UseNumber()
+	return &jsReader{reader: reader, dec: dec}
+}
+
+func (r *jsReader) unread() io.Reader {
+	bufs := make([]io.Reader, 3)
+	var peeked []byte
+	if r.peeked {
+		if _, ok := r.current.(json.Delim); ok {
+			peeked = []byte(fmt.Sprintf("%v", r.current))
+		} else {
+			peeked, _ = json.Marshal(r.current)
+		}
+	}
+	readerCopy := *r.reader
+	decCopy := *r.dec
+
+	bufs[0] = bytes.NewReader(peeked)
+	bufs[1] = decCopy.Buffered()
+	bufs[2] = &readerCopy
+	return &concatReader{bufs: bufs}
+}
+
+func (r *jsReader) hasNext() bool {
+	return r.dec.More()
+}
+
+func (r *jsReader) peek() (json.Token, error) {
+	if r.peeked {
+		return r.current, nil
+	}
+	t, err := r.dec.Token()
+	if err != nil {
+		return nil, err
+	}
+	r.peeked = true
+	r.current = t
+	return t, nil
+}
+
+func (r *jsReader) poll() (json.Token, error) {
+	if r.peeked {
+		ret := r.current
+		r.current = nil
+		r.peeked = false
+		return ret, nil
+	}
+	return r.dec.Token()
+}
+
+func (r *jsReader) beginObject() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'")
+	return err
+}
+
+func (r *jsReader) endObject() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'")
+	return err
+}
+
+func (r *jsReader) beginArray() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['")
+	return err
+}
+
+func (r *jsReader) endArray() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'")
+	return err
+}
+
+func (r *jsReader) nextObjectKey() (string, error) {
+	return r.nextString()
+}
+
+func (r *jsReader) nextString() (string, error) {
+	t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string")
+	if err != nil {
+		return "", err
+	}
+	return t.(string), nil
+}
+
+func (r *jsReader) nextBytes() ([]byte, error) {
+	str, err := r.nextString()
+	if err != nil {
+		return nil, err
+	}
+	return base64.StdEncoding.DecodeString(str)
+}
+
+func (r *jsReader) nextBool() (bool, error) {
+	t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean")
+	if err != nil {
+		return false, err
+	}
+	return t.(bool), nil
+}
+
+func (r *jsReader) nextInt() (int64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return n.Int64()
+}
+
+func (r *jsReader) nextUint() (uint64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(string(n), 10, 64)
+}
+
+func (r *jsReader) nextFloat() (float64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return n.Float64()
+}
+
+func (r *jsReader) nextNumber() (json.Number, error) {
+	t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number")
+	if err != nil {
+		return "", err
+	}
+	switch t := t.(type) {
+	case json.Number:
+		return t, nil
+	case string:
+		return json.Number(t), nil
+	}
+	return "", fmt.Errorf("expecting a number but got %v", t)
+}
+
+func (r *jsReader) skip() error {
+	t, err := r.poll()
+	if err != nil {
+		return err
+	}
+	if t == json.Delim('[') {
+		if err := r.skipArray(); err != nil {
+			return err
+		}
+	} else if t == json.Delim('{') {
+		if err := r.skipObject(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *jsReader) skipArray() error {
+	for r.hasNext() {
+		if err := r.skip(); err != nil {
+			return err
+		}
+	}
+	if err := r.endArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *jsReader) skipObject() error {
+	for r.hasNext() {
+		// skip object key
+		if err := r.skip(); err != nil {
+			return err
+		}
+		// and value
+		if err := r.skip(); err != nil {
+			return err
+		}
+	}
+	if err := r.endObject(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) {
+	t, err := r.poll()
+	if err != nil {
+		return nil, err
+	}
+	if t == nil && ifNil != nil {
+		return ifNil, nil
+	}
+	if !predicate(t) {
+		return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t)
+	}
+	return t, nil
+}
+
+type concatReader struct {
+	bufs []io.Reader
+	curr int
+}
+
+func (r *concatReader) Read(p []byte) (n int, err error) {
+	for {
+		if r.curr >= len(r.bufs) {
+			err = io.EOF
+			return
+		}
+		var c int
+		c, err = r.bufs[r.curr].Read(p)
+		n += c
+		if err != io.EOF {
+			return
+		}
+		r.curr++
+		p = p[c:]
+	}
+}
+
+// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors
+// to resolve message names. It uses the given factory, which may be nil, to
+// instantiate messages. The messages that it returns when resolving a type name
+// may often be dynamic messages.
+func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver {
+	return &anyResolver{mf: mf, files: files}
+}
+
+type anyResolver struct {
+	mf      *MessageFactory
+	files   []*desc.FileDescriptor
+	ignored map[*desc.FileDescriptor]struct{}
+	other   jsonpb.AnyResolver
+}
+
+func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) {
+	if r, ok := r.(*anyResolver); ok {
+		if _, ok := r.ignored[f]; ok {
+			// if the current resolver is ignoring this file, it's because another
+			// (upstream) resolver is already handling it, so nothing to do
+			return r, false
+		}
+		for _, file := range r.files {
+			if file == f {
+				// no need to wrap!
+				return r, false
+			}
+		}
+		// ignore files that will be checked by the resolver we're wrapping
+		// (we'll just delegate and let it search those files)
+		ignored := map[*desc.FileDescriptor]struct{}{}
+		for i := range r.ignored {
+			ignored[i] = struct{}{}
+		}
+		ignore(r.files, ignored)
+		return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true
+	}
+	return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true
+}
+
+func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) {
+	for _, f := range files {
+		if _, ok := ignored[f]; ok {
+			continue
+		}
+		ignored[f] = struct{}{}
+		ignore(f.GetDependencies(), ignored)
+	}
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+
+	// see if the user-specified resolver is able to do the job
+	if r.other != nil {
+		msg, err := r.other.Resolve(typeUrl)
+		if err == nil {
+			return msg, nil
+		}
+	}
+
+	// try to find the message in our known set of files
+	checked := map[*desc.FileDescriptor]struct{}{}
+	for _, f := range r.files {
+		md := r.findMessage(f, mname, checked)
+		if md != nil {
+			return r.mf.NewMessage(md), nil
+		}
+	}
+	// failing that, see if the message factory knows about this type
+	var ktr *KnownTypeRegistry
+	if r.mf != nil {
+		ktr = r.mf.ktr
+	} else {
+		ktr = (*KnownTypeRegistry)(nil)
+	}
+	m := ktr.CreateIfKnown(mname)
+	if m != nil {
+		return m, nil
+	}
+
+	// no other resolver to fallback to? mimic default behavior
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+	// if this is an ignored descriptor, skip
+	if _, ok := r.ignored[fd]; ok {
+		return nil
+	}
+
+	// bail if we've already checked this file
+	if _, ok := checked[fd]; ok {
+		return nil
+	}
+	checked[fd] = struct{}{}
+
+	// see if this file has the message
+	md := fd.FindMessage(msgName)
+	if md != nil {
+		return md
+	}
+
+	// if not, recursively search the file's imports
+	for _, dep := range fd.GetDependencies() {
+		md = r.findMessage(dep, msgName, checked)
+		if md != nil {
+			return md
+		}
+	}
+	return nil
+}
+
+var _ jsonpb.AnyResolver = (*anyResolver)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
new file mode 100644
index 0000000..03162a4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -0,0 +1,130 @@
+//go:build !go1.12
+// +build !go1.12
+
+package dynamic
+
+import (
+	"github.com/jhump/protoreflect/desc"
+	"reflect"
+)
+
+// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
+// iterate a map. (We can be more efficient in Go 1.12 and up...)
+
+func mapsEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	if a.Len() == 0 && b.Len() == 0 {
+		// Optimize the case where maps are frequently empty because MapKeys()
+		// function allocates heavily.
+		return true
+	}
+
+	for _, k := range a.MapKeys() {
+		av := a.MapIndex(k)
+		bv := b.MapIndex(k)
+		if !bv.IsValid() {
+			return false
+		}
+		if !fieldsEqual(av.Interface(), bv.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	// make a defensive copy while we check the contents
+	// (also converts to map[interface{}]interface{} if it's some other type)
+	keyField := fd.GetMessageType().GetFields()[0]
+	valField := fd.GetMessageType().GetFields()[1]
+	m := map[interface{}]interface{}{}
+	for _, k := range val.MapKeys() {
+		if k.Kind() == reflect.Interface {
+			// unwrap it
+			k = reflect.ValueOf(k.Interface())
+		}
+		kk, err := validElementFieldValueForRv(keyField, k, false)
+		if err != nil {
+			return nil, err
+		}
+		v := val.MapIndex(k)
+		if v.Kind() == reflect.Interface {
+			// unwrap it
+			v = reflect.ValueOf(v.Interface())
+		}
+		vv, err := validElementFieldValueForRv(valField, v, true)
+		if err != nil {
+			return nil, err
+		}
+		m[kk] = vv
+	}
+	return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+	kt := target.Key()
+	vt := target.Elem()
+	for _, k := range src.MapKeys() {
+		if !canConvert(k, kt) {
+			return false
+		}
+		if !canConvert(src.MapIndex(k), vt) {
+			return false
+		}
+	}
+	return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error {
+	tkt := targetType.Key()
+	tvt := targetType.Elem()
+	for _, k := range src.MapKeys() {
+		v := src.MapIndex(k)
+		skt := k.Type()
+		svt := v.Type()
+		var nk, nv reflect.Value
+		if tkt == skt {
+			nk = k
+		} else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+			nk = k.Addr()
+		} else {
+			nk = reflect.New(tkt).Elem()
+			if err := mergeVal(k, nk, deterministic); err != nil {
+				return err
+			}
+		}
+		if tvt == svt {
+			nv = v
+		} else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+			nv = v.Addr()
+		} else {
+			nv = reflect.New(tvt).Elem()
+			if err := mergeVal(v, nv, deterministic); err != nil {
+				return err
+			}
+		}
+		if target.IsNil() {
+			target.Set(reflect.MakeMap(targetType))
+		}
+		target.SetMapIndex(nk, nv)
+	}
+	return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+	for _, k := range rv.MapKeys() {
+		if k.Kind() == reflect.Interface && !k.IsNil() {
+			k = k.Elem()
+		}
+		v := rv.MapIndex(k)
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			v = v.Elem()
+		}
+		if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
new file mode 100644
index 0000000..ef1b370
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -0,0 +1,138 @@
+//go:build go1.12
+// +build go1.12
+
+package dynamic
+
+import (
+	"github.com/jhump/protoreflect/desc"
+	"reflect"
+)
+
+// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
+// over maps more efficiently than using reflect.Value.MapKeys.
+
+func mapsEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	if a.Len() == 0 && b.Len() == 0 {
+		// Optimize the case where maps are frequently empty
+		return true
+	}
+
+	iter := a.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		av := iter.Value()
+		bv := b.MapIndex(k)
+		if !bv.IsValid() {
+			return false
+		}
+		if !fieldsEqual(av.Interface(), bv.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	// make a defensive copy while we check the contents
+	// (also converts to map[interface{}]interface{} if it's some other type)
+	keyField := fd.GetMessageType().GetFields()[0]
+	valField := fd.GetMessageType().GetFields()[1]
+	m := map[interface{}]interface{}{}
+	iter := val.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		if k.Kind() == reflect.Interface {
+			// unwrap it
+			k = reflect.ValueOf(k.Interface())
+		}
+		kk, err := validElementFieldValueForRv(keyField, k, false)
+		if err != nil {
+			return nil, err
+		}
+		v := iter.Value()
+		if v.Kind() == reflect.Interface {
+			// unwrap it
+			v = reflect.ValueOf(v.Interface())
+		}
+		vv, err := validElementFieldValueForRv(valField, v, true)
+		if err != nil {
+			return nil, err
+		}
+		m[kk] = vv
+	}
+	return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+	kt := target.Key()
+	vt := target.Elem()
+	iter := src.MapRange()
+	for iter.Next() {
+		if !canConvert(iter.Key(), kt) {
+			return false
+		}
+		if !canConvert(iter.Value(), vt) {
+			return false
+		}
+	}
+	return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error {
+	tkt := targetType.Key()
+	tvt := targetType.Elem()
+	iter := src.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		v := iter.Value()
+		skt := k.Type()
+		svt := v.Type()
+		var nk, nv reflect.Value
+		if tkt == skt {
+			nk = k
+		} else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+			nk = k.Addr()
+		} else {
+			nk = reflect.New(tkt).Elem()
+			if err := mergeVal(k, nk, deterministic); err != nil {
+				return err
+			}
+		}
+		if tvt == svt {
+			nv = v
+		} else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+			nv = v.Addr()
+		} else {
+			nv = reflect.New(tvt).Elem()
+			if err := mergeVal(v, nv, deterministic); err != nil {
+				return err
+			}
+		}
+		if target.IsNil() {
+			target.Set(reflect.MakeMap(targetType))
+		}
+		target.SetMapIndex(nk, nv)
+	}
+	return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+	iter := rv.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		v := iter.Value()
+		if k.Kind() == reflect.Interface && !k.IsNil() {
+			k = k.Elem()
+		}
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			v = v.Elem()
+		}
+		if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
new file mode 100644
index 0000000..ce727fd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
@@ -0,0 +1,100 @@
+package dynamic
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// Merge merges the given source message into the given destination message. Use
+// use this instead of proto.Merge when one or both of the messages might be a
+// a dynamic message. If there is a problem merging the messages, such as the
+// two messages having different types, then this method will panic (just as
+// proto.Merges does).
+func Merge(dst, src proto.Message) {
+	if dm, ok := dst.(*Message); ok {
+		if err := dm.MergeFrom(src); err != nil {
+			panic(err.Error())
+		}
+	} else if dm, ok := src.(*Message); ok {
+		if err := dm.MergeInto(dst); err != nil {
+			panic(err.Error())
+		}
+	} else {
+		proto.Merge(dst, src)
+	}
+}
+
+// TryMerge merges the given source message into the given destination message.
+// You can use this instead of proto.Merge when one or both of the messages
+// might be a dynamic message. Unlike proto.Merge, this method will return an
+// error on failure instead of panic'ing.
+func TryMerge(dst, src proto.Message) error {
+	if dm, ok := dst.(*Message); ok {
+		if err := dm.MergeFrom(src); err != nil {
+			return err
+		}
+	} else if dm, ok := src.(*Message); ok {
+		if err := dm.MergeInto(dst); err != nil {
+			return err
+		}
+	} else {
+		// proto.Merge panics on bad input, so we first verify
+		// inputs and return error instead of panic
+		out := reflect.ValueOf(dst)
+		if out.IsNil() {
+			return errors.New("proto: nil destination")
+		}
+		in := reflect.ValueOf(src)
+		if in.Type() != out.Type() {
+			return errors.New("proto: type mismatch")
+		}
+		proto.Merge(dst, src)
+	}
+	return nil
+}
+
+func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error {
+	rv := reflect.ValueOf(val)
+
+	if fd.IsMap() && rv.Kind() == reflect.Map {
+		return mergeMapField(m, fd, rv)
+	}
+
+	if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes {
+		for i := 0; i < rv.Len(); i++ {
+			e := rv.Index(i)
+			if e.Kind() == reflect.Interface && !e.IsNil() {
+				e = e.Elem()
+			}
+			if err := m.addRepeatedField(fd, e.Interface()); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	if fd.IsRepeated() {
+		return m.addRepeatedField(fd, val)
+	} else if fd.GetMessageType() == nil {
+		return m.setField(fd, val)
+	}
+
+	// it's a message type, so we want to merge contents
+	var err error
+	if val, err = validFieldValue(fd, val); err != nil {
+		return err
+	}
+
+	existing, _ := m.doGetField(fd, true)
+	if existing != nil && !reflect.ValueOf(existing).IsNil() {
+		return TryMerge(existing.(proto.Message), val.(proto.Message))
+	}
+
+	// no existing message, so just set field
+	m.internalSetField(fd, val)
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
new file mode 100644
index 0000000..9ab8e61
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -0,0 +1,201 @@
+package dynamic
+
+import (
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// MessageFactory can be used to create new empty message objects. A default instance
+// (without extension registry or known-type registry specified) will always return
+// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types.
+// The well-known types include primitive wrapper types and a handful of other special
+// types defined in standard protobuf definitions, like Any, Duration, and Timestamp.
+type MessageFactory struct {
+	er  *ExtensionRegistry
+	ktr *KnownTypeRegistry
+}
+
+// NewMessageFactoryWithExtensionRegistry creates a new message factory where any
+// dynamic messages produced will use the given extension registry to recognize and
+// parse extension fields.
+func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory {
+	return NewMessageFactoryWithRegistries(er, nil)
+}
+
+// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the
+// known types, per the given registry, will be returned as normal protobuf messages
+// (e.g. generated structs, instead of dynamic messages).
+func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory {
+	return NewMessageFactoryWithRegistries(nil, ktr)
+}
+
+// NewMessageFactoryWithDefaults creates a new message factory where all "default" types
+// (those for which protoc-generated code is statically linked into the Go program) are
+// known types. If any dynamic messages are produced, they will recognize and parse all
+// "default" extension fields. This is the equivalent of:
+//   NewMessageFactoryWithRegistries(
+//       NewExtensionRegistryWithDefaults(),
+//       NewKnownTypeRegistryWithDefaults())
+func NewMessageFactoryWithDefaults() *MessageFactory {
+	return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
+}
+
+// NewMessageFactoryWithRegistries creates a new message factory with the given extension
+// and known type registries.
+func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory {
+	return &MessageFactory{
+		er:  er,
+		ktr: ktr,
+	}
+}
+
+// NewMessage creates a new empty message that corresponds to the given descriptor.
+// If the given descriptor describes a "known type" then that type is instantiated.
+// Otherwise, an empty dynamic message is returned.
+func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message {
+	var ktr *KnownTypeRegistry
+	if f != nil {
+		ktr = f.ktr
+	}
+	if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil {
+		return m
+	}
+	return NewMessageWithMessageFactory(md, f)
+}
+
+// NewDynamicMessage creates a new empty dynamic message that corresponds to the given
+// descriptor. This is like f.NewMessage(md) except the known type registry is not
+// consulted so the return value is always a dynamic message.
+//
+// This is also like dynamic.NewMessage(md) except that the returned message will use
+// this factory when creating other messages, like during de-serialization of fields
+// that are themselves message types.
+func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message {
+	return NewMessageWithMessageFactory(md, f)
+}
+
+// GetKnownTypeRegistry returns the known type registry that this factory uses to
+// instantiate known (e.g. generated) message types.
+func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry {
+	if f == nil {
+		return nil
+	}
+	return f.ktr
+}
+
+// GetExtensionRegistry returns the extension registry that this factory uses to
+// create dynamic messages. The registry is used by dynamic messages to recognize
+// and parse extension fields during de-serialization.
+func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry {
+	if f == nil {
+		return nil
+	}
+	return f.er
+}
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem()
+
+// KnownTypeRegistry is a registry of known message types, as identified by their
+// fully-qualified name. A known message type is one for which a protoc-generated
+// struct exists, so a dynamic message is not necessary to represent it. A
+// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated
+// struct or a dynamic message. The zero-value registry (including the behavior of
+// a nil pointer) only knows about the "well-known types" in protobuf. These
+// include only the wrapper types and a handful of other special types like Any,
+// Duration, and Timestamp.
+type KnownTypeRegistry struct {
+	excludeWkt     bool
+	includeDefault bool
+	mu             sync.RWMutex
+	types          map[string]reflect.Type
+}
+
+// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all
+// "default" types (those for which protoc-generated code is statically linked
+// into the Go program).
+func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry {
+	return &KnownTypeRegistry{includeDefault: true}
+}
+
+// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not*
+// include the "well-known types" in protobuf. So even well-known types would be
+// represented by a dynamic message.
+func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry {
+	return &KnownTypeRegistry{excludeWkt: true}
+}
+
+// AddKnownType adds the types of the given messages as known types.
+func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.types == nil {
+		r.types = map[string]reflect.Type{}
+	}
+	for _, kt := range kts {
+		r.types[proto.MessageName(kt)] = reflect.TypeOf(kt)
+	}
+}
+
+// CreateIfKnown will construct an instance of the given message if it is a known type.
+// If the given name is unknown, nil is returned.
+func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message {
+	msgType := r.GetKnownType(messageName)
+	if msgType == nil {
+		return nil
+	}
+
+	if msgType.Kind() == reflect.Ptr {
+		return reflect.New(msgType.Elem()).Interface().(proto.Message)
+	} else {
+		return reflect.New(msgType).Elem().Interface().(proto.Message)
+	}
+}
+
+func isWellKnownType(t reflect.Type) bool {
+	if t.Implements(typeOfWkt) {
+		return true
+	}
+	if msg, ok := reflect.Zero(t).Interface().(proto.Message); ok {
+		name := proto.MessageName(msg)
+		_, ok := wellKnownTypeNames[name]
+		return ok
+	}
+	return false
+}
+
+// GetKnownType will return the reflect.Type for the given message name if it is
+// known. If it is not known, nil is returned.
+func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
+	var msgType reflect.Type
+	if r == nil {
+		// a nil registry behaves the same as zero value instance: only know of well-known types
+		t := proto.MessageType(messageName)
+		if t != nil && isWellKnownType(t) {
+			msgType = t
+		}
+	} else {
+		if r.includeDefault {
+			msgType = proto.MessageType(messageName)
+		} else if !r.excludeWkt {
+			t := proto.MessageType(messageName)
+			if t != nil && isWellKnownType(t) {
+				msgType = t
+			}
+		}
+		if msgType == nil {
+			r.mu.RLock()
+			msgType = r.types[messageName]
+			r.mu.RUnlock()
+		}
+	}
+
+	return msgType
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
new file mode 100644
index 0000000..5784d3e
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -0,0 +1,1177 @@
+package dynamic
+
+// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"text/scanner"
+	"unicode"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/codec"
+	"github.com/jhump/protoreflect/desc"
+)
+
+// MarshalText serializes this message to bytes in the standard text format,
+// returning an error if the operation fails. The resulting bytes will be a
+// valid UTF8 string.
+//
+// This method uses a compact form: no newlines, and spaces between field
+// identifiers and values are elided.
+func (m *Message) MarshalText() ([]byte, error) {
+	var b indentBuffer
+	b.indentCount = -1 // no indentation
+	if err := m.marshalText(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalTextIndent serializes this message to bytes in the standard text
+// format, returning an error if the operation fails. The resulting bytes will
+// be a valid UTF8 string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values.
+func (m *Message) MarshalTextIndent() ([]byte, error) {
+	var b indentBuffer
+	b.indent = "  " // TODO: option for indent?
+	if err := m.marshalText(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func (m *Message) marshalText(b *indentBuffer) error {
+	// TODO: option for emitting extended Any format?
+	first := true
+	// first the known fields
+	for _, tag := range m.knownFieldTags() {
+		itag := int32(tag)
+		v := m.values[itag]
+		fd := m.FindFieldDescriptor(itag)
+		if fd.IsMap() {
+			md := fd.GetMessageType()
+			kfd := md.FindFieldByNumber(1)
+			vfd := md.FindFieldByNumber(2)
+			mp := v.(map[interface{}]interface{})
+			keys := make([]interface{}, 0, len(mp))
+			for k := range mp {
+				keys = append(keys, k)
+			}
+			sort.Sort(sortable(keys))
+			for _, mk := range keys {
+				mv := mp[mk]
+				err := b.maybeNext(&first)
+				if err != nil {
+					return err
+				}
+				err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv)
+				if err != nil {
+					return err
+				}
+			}
+		} else if fd.IsRepeated() {
+			sl := v.([]interface{})
+			for _, slv := range sl {
+				err := b.maybeNext(&first)
+				if err != nil {
+					return err
+				}
+				err = marshalKnownFieldText(b, fd, slv)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			err = marshalKnownFieldText(b, fd, v)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	// then the unknown fields
+	for _, tag := range m.unknownFieldTags() {
+		itag := int32(tag)
+		ufs := m.unknownFields[itag]
+		for _, uf := range ufs {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			_, err = fmt.Fprintf(b, "%d", tag)
+			if err != nil {
+				return err
+			}
+			if uf.Encoding == proto.WireStartGroup {
+				err = b.WriteByte('{')
+				if err != nil {
+					return err
+				}
+				err = b.start()
+				if err != nil {
+					return err
+				}
+				in := codec.NewBuffer(uf.Contents)
+				err = marshalUnknownGroupText(b, in, true)
+				if err != nil {
+					return err
+				}
+				err = b.end()
+				if err != nil {
+					return err
+				}
+				err = b.WriteByte('}')
+				if err != nil {
+					return err
+				}
+			} else {
+				err = b.sep()
+				if err != nil {
+					return err
+				}
+				if uf.Encoding == proto.WireBytes {
+					err = writeString(b, string(uf.Contents))
+					if err != nil {
+						return err
+					}
+				} else {
+					_, err = b.WriteString(strconv.FormatUint(uf.Value, 10))
+					if err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error {
+	var name string
+	if fd.IsExtension() {
+		name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+	} else {
+		name = fd.GetName()
+	}
+	_, err := b.WriteString(name)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+
+	err = b.WriteByte('<')
+	if err != nil {
+		return err
+	}
+	err = b.start()
+	if err != nil {
+		return err
+	}
+
+	err = marshalKnownFieldText(b, kfd, mk)
+	if err != nil {
+		return err
+	}
+	err = b.next()
+	if err != nil {
+		return err
+	}
+	if !isNil(mv) {
+		err = marshalKnownFieldText(b, vfd, mv)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = b.end()
+	if err != nil {
+		return err
+	}
+	return b.WriteByte('>')
+}
+
+func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
+	group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+	if group {
+		var name string
+		if fd.IsExtension() {
+			name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName())
+		} else {
+			name = fd.GetMessageType().GetName()
+		}
+		_, err := b.WriteString(name)
+		if err != nil {
+			return err
+		}
+	} else {
+		var name string
+		if fd.IsExtension() {
+			name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+		} else {
+			name = fd.GetName()
+		}
+		_, err := b.WriteString(name)
+		if err != nil {
+			return err
+		}
+		err = b.sep()
+		if err != nil {
+			return err
+		}
+	}
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Int32, reflect.Int64:
+		ed := fd.GetEnumType()
+		if ed != nil {
+			n := int32(rv.Int())
+			vd := ed.FindValueByNumber(n)
+			if vd == nil {
+				_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+				return err
+			} else {
+				_, err := b.WriteString(vd.GetName())
+				return err
+			}
+		} else {
+			_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+			return err
+		}
+	case reflect.Uint32, reflect.Uint64:
+		_, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+		return err
+	case reflect.Float32, reflect.Float64:
+		f := rv.Float()
+		var str string
+		if math.IsNaN(f) {
+			str = "nan"
+		} else if math.IsInf(f, 1) {
+			str = "inf"
+		} else if math.IsInf(f, -1) {
+			str = "-inf"
+		} else {
+			var bits int
+			if rv.Kind() == reflect.Float32 {
+				bits = 32
+			} else {
+				bits = 64
+			}
+			str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+		}
+		_, err := b.WriteString(str)
+		return err
+	case reflect.Bool:
+		_, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+		return err
+	case reflect.Slice:
+		return writeString(b, string(rv.Bytes()))
+	case reflect.String:
+		return writeString(b, rv.String())
+	default:
+		var err error
+		if group {
+			err = b.WriteByte('{')
+		} else {
+			err = b.WriteByte('<')
+		}
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+		// must be a message
+		if dm, ok := v.(*Message); ok {
+			err = dm.marshalText(b)
+			if err != nil {
+				return err
+			}
+		} else {
+			err = proto.CompactText(b, v.(proto.Message))
+			if err != nil {
+				return err
+			}
+		}
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		if group {
+			return b.WriteByte('}')
+		} else {
+			return b.WriteByte('>')
+		}
+	}
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(b *indentBuffer, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := b.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = b.WriteString("\\n")
+		case '\r':
+			_, err = b.WriteString("\\r")
+		case '\t':
+			_, err = b.WriteString("\\t")
+		case '"':
+			_, err = b.WriteString("\\\"")
+		case '\\':
+			_, err = b.WriteString("\\\\")
+		default:
+			if c >= 0x20 && c < 0x7f {
+				err = b.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(b, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return b.WriteByte('"')
+}
+
+func marshalUnknownGroupText(b *indentBuffer, in *codec.Buffer, topLevel bool) error {
+	first := true
+	for {
+		if in.EOF() {
+			if topLevel {
+				return nil
+			}
+			// this is a nested message: we are expecting an end-group tag, not EOF!
+			return io.ErrUnexpectedEOF
+		}
+		tag, wireType, err := in.DecodeTagAndWireType()
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireEndGroup {
+			return nil
+		}
+		err = b.maybeNext(&first)
+		if err != nil {
+			return err
+		}
+		_, err = fmt.Fprintf(b, "%d", tag)
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireStartGroup {
+			err = b.WriteByte('{')
+			if err != nil {
+				return err
+			}
+			err = b.start()
+			if err != nil {
+				return err
+			}
+			err = marshalUnknownGroupText(b, in, false)
+			if err != nil {
+				return err
+			}
+			err = b.end()
+			if err != nil {
+				return err
+			}
+			err = b.WriteByte('}')
+			if err != nil {
+				return err
+			}
+			continue
+		} else {
+			err = b.sep()
+			if err != nil {
+				return err
+			}
+			if wireType == proto.WireBytes {
+				contents, err := in.DecodeRawBytes(false)
+				if err != nil {
+					return err
+				}
+				err = writeString(b, string(contents))
+				if err != nil {
+					return err
+				}
+			} else {
+				var v uint64
+				switch wireType {
+				case proto.WireVarint:
+					v, err = in.DecodeVarint()
+				case proto.WireFixed32:
+					v, err = in.DecodeFixed32()
+				case proto.WireFixed64:
+					v, err = in.DecodeFixed64()
+				default:
+					return proto.ErrInternalBadWireType
+				}
+				if err != nil {
+					return err
+				}
+				_, err = b.WriteString(strconv.FormatUint(v, 10))
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+}
+
+// UnmarshalText de-serializes the message that is present, in text format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in the standard text format
+func (m *Message) UnmarshalText(text []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMergeText(text); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMergeText de-serializes the message that is present, in text format,
+// in the given bytes into this message. Unlike UnmarshalText, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeText(text []byte) error {
+	return m.unmarshalText(newReader(text), tokenEOF)
+}
+
+func (m *Message) unmarshalText(tr *txtReader, end tokenType) error {
+	for {
+		tok := tr.next()
+		if tok.tokTyp == end {
+			return nil
+		}
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		}
+		var fd *desc.FieldDescriptor
+		var extendedAnyType *desc.MessageDescriptor
+		if tok.tokTyp == tokenInt {
+			// tag number (indicates unknown field)
+			tag, err := strconv.ParseInt(tok.val.(string), 10, 32)
+			if err != nil {
+				return err
+			}
+			itag := int32(tag)
+			fd = m.FindFieldDescriptor(itag)
+			if fd == nil {
+				// can't parse the value w/out field descriptor, so skip it
+				tok = tr.next()
+				if tok.tokTyp == tokenEOF {
+					return io.ErrUnexpectedEOF
+				} else if tok.tokTyp == tokenOpenBrace {
+					if err := skipMessageText(tr, true); err != nil {
+						return err
+					}
+				} else if tok.tokTyp == tokenColon {
+					if err := skipFieldValueText(tr); err != nil {
+						return err
+					}
+				} else {
+					return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+				}
+				tok = tr.peek()
+				if tok.tokTyp.IsSep() {
+					tr.next() // consume separator
+				}
+				continue
+			}
+		} else {
+			fieldName, err := unmarshalFieldNameText(tr, tok)
+			if err != nil {
+				return err
+			}
+			fd = m.FindFieldDescriptorByName(fieldName)
+			if fd == nil {
+				// See if it's a group name
+				for _, field := range m.md.GetFields() {
+					if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+						fd = field
+						break
+					}
+				}
+				if fd == nil {
+					// maybe this is an extended Any
+					if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") {
+						// strip surrounding "[" and "]" and extract type name from URL
+						typeUrl := fieldName[1 : len(fieldName)-1]
+						mname := typeUrl
+						if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+							mname = mname[slash+1:]
+						}
+						// TODO: add a way to weave an AnyResolver to this point
+						extendedAnyType = findMessageDescriptor(mname, m.md.GetFile())
+						if extendedAnyType == nil {
+							return textError(tok, "could not parse Any with unknown type URL %q", fieldName)
+						}
+						// field 1 is "type_url"
+						typeUrlField := m.md.FindFieldByNumber(1)
+						if err := m.TrySetField(typeUrlField, typeUrl); err != nil {
+							return err
+						}
+					} else {
+						// TODO: add a flag to just ignore unrecognized field names
+						return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName())
+					}
+				}
+			}
+		}
+		tok = tr.next()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		}
+		if extendedAnyType != nil {
+			// consume optional colon; make sure this is a "start message" token
+			if tok.tokTyp == tokenColon {
+				tok = tr.next()
+				if tok.tokTyp == tokenEOF {
+					return io.ErrUnexpectedEOF
+				}
+			}
+			if tok.tokTyp.EndToken() == tokenError {
+				return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt)
+			}
+
+			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+			g := m.mf.NewDynamicMessage(extendedAnyType)
+			if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+				return err
+			}
+			// now we marshal the message to bytes and store in the Any
+			b, err := g.Marshal()
+			if err != nil {
+				return err
+			}
+			// field 2 is "value"
+			anyValueField := m.md.FindFieldByNumber(2)
+			if err := m.TrySetField(anyValueField, b); err != nil {
+				return err
+			}
+
+		} else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
+			fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+			tok.tokTyp.EndToken() != tokenError {
+
+			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+			g := m.mf.NewDynamicMessage(fd.GetMessageType())
+			if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+				return err
+			}
+			if fd.IsRepeated() {
+				if err := m.TryAddRepeatedField(fd, g); err != nil {
+					return err
+				}
+			} else {
+				if err := m.TrySetField(fd, g); err != nil {
+					return err
+				}
+			}
+		} else {
+			if tok.tokTyp != tokenColon {
+				return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt)
+			}
+			if err := m.unmarshalFieldValueText(fd, tr); err != nil {
+				return err
+			}
+		}
+		tok = tr.peek()
+		if tok.tokTyp.IsSep() {
+			tr.next() // consume separator
+		}
+	}
+}
+func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor {
+	md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{})
+	if md == nil {
+		// couldn't find it; see if we have this message linked in
+		md, _ = desc.LoadMessageDescriptor(name)
+	}
+	return md
+}
+
+func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+	if _, ok := seen[fd]; ok {
+		// already checked this file
+		return nil
+	}
+	seen[fd] = struct{}{}
+	md := fd.FindMessage(name)
+	if md != nil {
+		return md
+	}
+	// not in this file so recursively search its deps
+	for _, dep := range fd.GetDependencies() {
+		md = findMessageInTransitiveDeps(name, dep, seen)
+		if md != nil {
+			return md
+		}
+	}
+	// couldn't find it
+	return nil
+}
+
+func textError(tok *token, format string, args ...interface{}) error {
+	var msg string
+	if tok.tokTyp == tokenError {
+		msg = tok.val.(error).Error()
+	} else {
+		msg = fmt.Sprintf(format, args...)
+	}
+	return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg)
+}
+
+type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error
+
+func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error {
+	var set setFunction
+	if fd.IsRepeated() {
+		set = (*Message).addRepeatedField
+	} else {
+		set = mergeField
+	}
+	tok := tr.peek()
+	if tok.tokTyp == tokenOpenBracket {
+		tr.next() // consume tok
+		for {
+			if err := m.unmarshalFieldElementText(fd, tr, set); err != nil {
+				return err
+			}
+			tok = tr.peek()
+			if tok.tokTyp == tokenCloseBracket {
+				tr.next() // consume tok
+				return nil
+			} else if tok.tokTyp.IsSep() {
+				tr.next() // consume separator
+			}
+		}
+	}
+	return m.unmarshalFieldElementText(fd, tr, set)
+}
+
+func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error {
+	tok := tr.next()
+	if tok.tokTyp == tokenEOF {
+		return io.ErrUnexpectedEOF
+	}
+
+	var expected string
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		if tok.tokTyp == tokenIdent {
+			if tok.val.(string) == "true" {
+				return set(m, fd, true)
+			} else if tok.val.(string) == "false" {
+				return set(m, fd, false)
+			}
+		}
+		expected = "boolean value"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		if tok.tokTyp == tokenString {
+			return set(m, fd, []byte(tok.val.(string)))
+		}
+		expected = "bytes string value"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		if tok.tokTyp == tokenString {
+			return set(m, fd, tok.val)
+		}
+		expected = "string value"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		switch tok.tokTyp {
+		case tokenFloat:
+			return set(m, fd, float32(tok.val.(float64)))
+		case tokenInt:
+			if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, float32(f))
+			}
+		case tokenIdent:
+			ident := strings.ToLower(tok.val.(string))
+			if ident == "inf" {
+				return set(m, fd, float32(math.Inf(1)))
+			} else if ident == "nan" {
+				return set(m, fd, float32(math.NaN()))
+			}
+		case tokenMinus:
+			peeked := tr.peek()
+			if peeked.tokTyp == tokenIdent {
+				ident := strings.ToLower(peeked.val.(string))
+				if ident == "inf" {
+					tr.next() // consume peeked token
+					return set(m, fd, float32(math.Inf(-1)))
+				}
+			}
+		}
+		expected = "float value"
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		switch tok.tokTyp {
+		case tokenFloat:
+			return set(m, fd, tok.val)
+		case tokenInt:
+			if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, f)
+			}
+		case tokenIdent:
+			ident := strings.ToLower(tok.val.(string))
+			if ident == "inf" {
+				return set(m, fd, math.Inf(1))
+			} else if ident == "nan" {
+				return set(m, fd, math.NaN())
+			}
+		case tokenMinus:
+			peeked := tr.peek()
+			if peeked.tokTyp == tokenIdent {
+				ident := strings.ToLower(peeked.val.(string))
+				if ident == "inf" {
+					tr.next() // consume peeked token
+					return set(m, fd, math.Inf(-1))
+				}
+			}
+		}
+		expected = "float value"
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, int32(i))
+			}
+		}
+		expected = "int value"
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, i)
+			}
+		}
+		expected = "int value"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, uint32(i))
+			}
+		}
+		expected = "unsigned int value"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, i)
+			}
+		}
+		expected = "unsigned int value"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		if tok.tokTyp == tokenIdent {
+			// TODO: add a flag to just ignore unrecognized enum value names?
+			vd := fd.GetEnumType().FindValueByName(tok.val.(string))
+			if vd != nil {
+				return set(m, fd, vd.GetNumber())
+			}
+		} else if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, int32(i))
+			}
+		}
+		expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+
+		endTok := tok.tokTyp.EndToken()
+		if endTok != tokenError {
+			dm := m.mf.NewDynamicMessage(fd.GetMessageType())
+			if err := dm.unmarshalText(tr, endTok); err != nil {
+				return err
+			}
+			// TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use
+			// proto package to unmarshal it. But the text parser isn't particularly amenable
+			// to that, so we instead convert a dynamic message to a generated one if the
+			// known-type registry knows about the generated type...
+			var ktr *KnownTypeRegistry
+			if m.mf != nil {
+				ktr = m.mf.ktr
+			}
+			pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName())
+			if pm != nil {
+				if err := dm.ConvertTo(pm); err != nil {
+					return set(m, fd, pm)
+				}
+			}
+			return set(m, fd, dm)
+		}
+		expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName())
+	default:
+		return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType())
+	}
+
+	// if we get here, token was wrong type; create error message
+	var article string
+	if strings.Contains("aieou", expected[0:1]) {
+		article = "an"
+	} else {
+		article = "a"
+	}
+	return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt)
+}
+
+func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) {
+	if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen {
+		// extension name
+		var closeType tokenType
+		var closeChar string
+		if tok.tokTyp == tokenOpenBracket {
+			closeType = tokenCloseBracket
+			closeChar = "close bracket ']'"
+		} else {
+			closeType = tokenCloseParen
+			closeChar = "close paren ')'"
+		}
+		// must be followed by an identifier
+		idents := make([]string, 0, 1)
+		for {
+			tok = tr.next()
+			if tok.tokTyp == tokenEOF {
+				return "", io.ErrUnexpectedEOF
+			} else if tok.tokTyp != tokenIdent {
+				return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt)
+			}
+			idents = append(idents, tok.val.(string))
+			// and then close bracket/paren, or "/" to keep adding URL elements to name
+			tok = tr.next()
+			if tok.tokTyp == tokenEOF {
+				return "", io.ErrUnexpectedEOF
+			} else if tok.tokTyp == closeType {
+				break
+			} else if tok.tokTyp != tokenSlash {
+				return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt)
+			}
+		}
+		return "[" + strings.Join(idents, "/") + "]", nil
+	} else if tok.tokTyp == tokenIdent {
+		// normal field name
+		return tok.val.(string), nil
+	} else {
+		return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt)
+	}
+}
+
+func skipFieldNameText(tr *txtReader) error {
+	tok := tr.next()
+	if tok.tokTyp == tokenEOF {
+		return io.ErrUnexpectedEOF
+	} else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent {
+		return nil
+	} else {
+		_, err := unmarshalFieldNameText(tr, tok)
+		return err
+	}
+}
+
+func skipFieldValueText(tr *txtReader) error {
+	tok := tr.peek()
+	if tok.tokTyp == tokenOpenBracket {
+		tr.next() // consume tok
+		for {
+			if err := skipFieldElementText(tr); err != nil {
+				return err
+			}
+			tok = tr.peek()
+			if tok.tokTyp == tokenCloseBracket {
+				tr.next() // consume tok
+				return nil
+			} else if tok.tokTyp.IsSep() {
+				tr.next() // consume separator
+			}
+
+		}
+	}
+	return skipFieldElementText(tr)
+}
+
+func skipFieldElementText(tr *txtReader) error {
+	tok := tr.next()
+	switch tok.tokTyp {
+	case tokenEOF:
+		return io.ErrUnexpectedEOF
+	case tokenInt, tokenFloat, tokenString, tokenIdent:
+		return nil
+	case tokenOpenAngle:
+		return skipMessageText(tr, false)
+	default:
+		return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt)
+	}
+}
+
+func skipMessageText(tr *txtReader, isGroup bool) error {
+	for {
+		tok := tr.peek()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		} else if isGroup && tok.tokTyp == tokenCloseBrace {
+			return nil
+		} else if !isGroup && tok.tokTyp == tokenCloseAngle {
+			return nil
+		}
+
+		// field name or tag
+		if err := skipFieldNameText(tr); err != nil {
+			return err
+		}
+
+		// field value
+		tok = tr.next()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		} else if tok.tokTyp == tokenOpenBrace {
+			if err := skipMessageText(tr, true); err != nil {
+				return err
+			}
+		} else if tok.tokTyp == tokenColon {
+			if err := skipFieldValueText(tr); err != nil {
+				return err
+			}
+		} else {
+			return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+		}
+
+		tok = tr.peek()
+		if tok.tokTyp.IsSep() {
+			tr.next() // consume separator
+		}
+	}
+}
+
+type tokenType int
+
+const (
+	tokenError tokenType = iota
+	tokenEOF
+	tokenIdent
+	tokenString
+	tokenInt
+	tokenFloat
+	tokenColon
+	tokenComma
+	tokenSemiColon
+	tokenOpenBrace
+	tokenCloseBrace
+	tokenOpenBracket
+	tokenCloseBracket
+	tokenOpenAngle
+	tokenCloseAngle
+	tokenOpenParen
+	tokenCloseParen
+	tokenSlash
+	tokenMinus
+)
+
+func (t tokenType) IsSep() bool {
+	return t == tokenComma || t == tokenSemiColon
+}
+
+func (t tokenType) EndToken() tokenType {
+	switch t {
+	case tokenOpenAngle:
+		return tokenCloseAngle
+	case tokenOpenBrace:
+		return tokenCloseBrace
+	default:
+		return tokenError
+	}
+}
+
+type token struct {
+	tokTyp tokenType
+	val    interface{}
+	txt    string
+	pos    scanner.Position
+}
+
+type txtReader struct {
+	scanner    scanner.Scanner
+	peeked     token
+	havePeeked bool
+}
+
+func newReader(text []byte) *txtReader {
+	sc := scanner.Scanner{}
+	sc.Init(bytes.NewReader(text))
+	sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars |
+		scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+	// identifiers are same restrictions as Go identifiers, except we also allow dots since
+	// we accept fully-qualified names
+	sc.IsIdentRune = func(ch rune, i int) bool {
+		return ch == '_' || unicode.IsLetter(ch) ||
+			(i > 0 && unicode.IsDigit(ch)) ||
+			(i > 0 && ch == '.')
+	}
+	// ignore errors; we handle them if/when we see malformed tokens
+	sc.Error = func(s *scanner.Scanner, msg string) {}
+	return &txtReader{scanner: sc}
+}
+
+func (p *txtReader) peek() *token {
+	if p.havePeeked {
+		return &p.peeked
+	}
+	t := p.scanner.Scan()
+	if t == scanner.EOF {
+		p.peeked.tokTyp = tokenEOF
+		p.peeked.val = nil
+		p.peeked.txt = ""
+		p.peeked.pos = p.scanner.Position
+	} else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil {
+		p.peeked.tokTyp = tokenError
+		p.peeked.val = err
+	}
+	p.havePeeked = true
+	return &p.peeked
+}
+
+func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error {
+	p.peeked.pos = pos
+	p.peeked.txt = text
+	switch t {
+	case scanner.Ident:
+		p.peeked.tokTyp = tokenIdent
+		p.peeked.val = text
+	case scanner.Int:
+		p.peeked.tokTyp = tokenInt
+		p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+	case scanner.Float:
+		p.peeked.tokTyp = tokenFloat
+		var err error
+		if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+			return err
+		}
+	case scanner.Char, scanner.String:
+		p.peeked.tokTyp = tokenString
+		var err error
+		if p.peeked.val, err = strconv.Unquote(text); err != nil {
+			return err
+		}
+	case '-': // unary minus, for negative ints and floats
+		ch := p.scanner.Peek()
+		if ch < '0' || ch > '9' {
+			p.peeked.tokTyp = tokenMinus
+			p.peeked.val = '-'
+		} else {
+			t := p.scanner.Scan()
+			if t == scanner.EOF {
+				return io.ErrUnexpectedEOF
+			} else if t == scanner.Float {
+				p.peeked.tokTyp = tokenFloat
+				text += p.scanner.TokenText()
+				p.peeked.txt = text
+				var err error
+				if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+					p.peeked.pos = p.scanner.Position
+					return err
+				}
+			} else if t == scanner.Int {
+				p.peeked.tokTyp = tokenInt
+				text += p.scanner.TokenText()
+				p.peeked.txt = text
+				p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+			} else {
+				p.peeked.pos = p.scanner.Position
+				return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText())
+			}
+		}
+	case ':':
+		p.peeked.tokTyp = tokenColon
+		p.peeked.val = ':'
+	case ',':
+		p.peeked.tokTyp = tokenComma
+		p.peeked.val = ','
+	case ';':
+		p.peeked.tokTyp = tokenSemiColon
+		p.peeked.val = ';'
+	case '{':
+		p.peeked.tokTyp = tokenOpenBrace
+		p.peeked.val = '{'
+	case '}':
+		p.peeked.tokTyp = tokenCloseBrace
+		p.peeked.val = '}'
+	case '<':
+		p.peeked.tokTyp = tokenOpenAngle
+		p.peeked.val = '<'
+	case '>':
+		p.peeked.tokTyp = tokenCloseAngle
+		p.peeked.val = '>'
+	case '[':
+		p.peeked.tokTyp = tokenOpenBracket
+		p.peeked.val = '['
+	case ']':
+		p.peeked.tokTyp = tokenCloseBracket
+		p.peeked.val = ']'
+	case '(':
+		p.peeked.tokTyp = tokenOpenParen
+		p.peeked.val = '('
+	case ')':
+		p.peeked.tokTyp = tokenCloseParen
+		p.peeked.val = ')'
+	case '/':
+		// only allowed to separate URL components in expanded Any format
+		p.peeked.tokTyp = tokenSlash
+		p.peeked.val = '/'
+	default:
+		return fmt.Errorf("invalid character: %c", t)
+	}
+	return nil
+}
+
+func (p *txtReader) next() *token {
+	t := p.peek()
+	if t.tokTyp != tokenEOF && t.tokTyp != tokenError {
+		p.havePeeked = false
+	}
+	return t
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
new file mode 100644
index 0000000..3fca3eb
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -0,0 +1,666 @@
+package grpcreflect
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"runtime"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+	"google.golang.org/grpc/status"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/internal"
+)
+
+// elementNotFoundError is the error returned by reflective operations where the
+// server does not recognize a given file name, symbol name, or extension.
+type elementNotFoundError struct {
+	name    string
+	kind    elementKind
+	symType symbolType // only used when kind == elementKindSymbol
+	tag     int32      // only used when kind == elementKindExtension
+
+	// only errors with a kind of elementKindFile will have a cause, which means
+	// the named file count not be resolved because of a dependency that could
+	// not be found where cause describes the missing dependency
+	cause *elementNotFoundError
+}
+
+type elementKind int
+
+const (
+	elementKindSymbol elementKind = iota
+	elementKindFile
+	elementKindExtension
+)
+
+type symbolType string
+
+const (
+	symbolTypeService = "Service"
+	symbolTypeMessage = "Message"
+	symbolTypeEnum    = "Enum"
+	symbolTypeUnknown = "Symbol"
+)
+
+func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+	return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
+}
+
+func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+	return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
+}
+
+func fileNotFound(file string, cause *elementNotFoundError) error {
+	return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
+}
+
+func (e *elementNotFoundError) Error() string {
+	first := true
+	var b bytes.Buffer
+	for ; e != nil; e = e.cause {
+		if first {
+			first = false
+		} else {
+			fmt.Fprint(&b, "\ncaused by: ")
+		}
+		switch e.kind {
+		case elementKindSymbol:
+			fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+		case elementKindExtension:
+			fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+		default:
+			fmt.Fprintf(&b, "File not found: %s", e.name)
+		}
+	}
+	return b.String()
+}
+
+// IsElementNotFoundError determines if the given error indicates that a file
+// name, symbol name, or extension field was could not be found by the server.
+func IsElementNotFoundError(err error) bool {
+	_, ok := err.(*elementNotFoundError)
+	return ok
+}
+
+// ProtocolError is an error returned when the server sends a response of the
+// wrong type.
+type ProtocolError struct {
+	missingType reflect.Type
+}
+
+func (p ProtocolError) Error() string {
+	return fmt.Sprintf("Protocol error: response was missing %v", p.missingType)
+}
+
+type extDesc struct {
+	extendedMessageName string
+	extensionNumber     int32
+}
+
+// Client is a client connection to a server for performing reflection calls
+// and resolving remote symbols.
+type Client struct {
+	ctx  context.Context
+	stub rpb.ServerReflectionClient
+
+	connMu sync.Mutex
+	cancel context.CancelFunc
+	stream rpb.ServerReflection_ServerReflectionInfoClient
+
+	cacheMu          sync.RWMutex
+	protosByName     map[string]*dpb.FileDescriptorProto
+	filesByName      map[string]*desc.FileDescriptor
+	filesBySymbol    map[string]*desc.FileDescriptor
+	filesByExtension map[extDesc]*desc.FileDescriptor
+}
+
+// NewClient creates a new Client with the given root context and using the
+// given RPC stub for talking to the server.
+func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+	cr := &Client{
+		ctx:              ctx,
+		stub:             stub,
+		protosByName:     map[string]*dpb.FileDescriptorProto{},
+		filesByName:      map[string]*desc.FileDescriptor{},
+		filesBySymbol:    map[string]*desc.FileDescriptor{},
+		filesByExtension: map[extDesc]*desc.FileDescriptor{},
+	}
+	// don't leak a grpc stream
+	runtime.SetFinalizer(cr, (*Client).Reset)
+	return cr
+}
+
+// FileByFilename asks the server for a file descriptor for the proto file with
+// the given name.
+func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
+	// hit the cache first
+	cr.cacheMu.RLock()
+	if fd, ok := cr.filesByName[filename]; ok {
+		cr.cacheMu.RUnlock()
+		return fd, nil
+	}
+	fdp, ok := cr.protosByName[filename]
+	cr.cacheMu.RUnlock()
+	// not there? see if we've downloaded the proto
+	if ok {
+		return cr.descriptorFromProto(fdp)
+	}
+
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+			FileByFilename: filename,
+		},
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+	if isNotFound(err) {
+		// file not found? see if we can look up via alternate name
+		if alternate, ok := internal.StdFileAliases[filename]; ok {
+			req := &rpb.ServerReflectionRequest{
+				MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+					FileByFilename: alternate,
+				},
+			}
+			fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
+			if isNotFound(err) {
+				err = fileNotFound(filename, nil)
+			}
+		} else {
+			err = fileNotFound(filename, nil)
+		}
+	} else if e, ok := err.(*elementNotFoundError); ok {
+		err = fileNotFound(filename, e)
+	}
+	return fd, err
+}
+
+// FileContainingSymbol asks the server for a file descriptor for the proto file
+// that declares the given fully-qualified symbol.
+func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
+	// hit the cache first
+	cr.cacheMu.RLock()
+	fd, ok := cr.filesBySymbol[symbol]
+	cr.cacheMu.RUnlock()
+	if ok {
+		return fd, nil
+	}
+
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+			FileContainingSymbol: symbol,
+		},
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	if isNotFound(err) {
+		err = symbolNotFound(symbol, symbolTypeUnknown, nil)
+	} else if e, ok := err.(*elementNotFoundError); ok {
+		err = symbolNotFound(symbol, symbolTypeUnknown, e)
+	}
+	return fd, err
+}
+
+// FileContainingExtension asks the server for a file descriptor for the proto
+// file that declares an extension with the given number for the given
+// fully-qualified message name.
+func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
+	// hit the cache first
+	cr.cacheMu.RLock()
+	fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+	cr.cacheMu.RUnlock()
+	if ok {
+		return fd, nil
+	}
+
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
+			FileContainingExtension: &rpb.ExtensionRequest{
+				ContainingType:  extendedMessageName,
+				ExtensionNumber: extensionNumber,
+			},
+		},
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	if isNotFound(err) {
+		err = extensionNotFound(extendedMessageName, extensionNumber, nil)
+	} else if e, ok := err.(*elementNotFoundError); ok {
+		err = extensionNotFound(extendedMessageName, extensionNumber, e)
+	}
+	return fd, err
+}
+
+func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+	resp, err := cr.send(req)
+	if err != nil {
+		return nil, err
+	}
+
+	fdResp := resp.GetFileDescriptorResponse()
+	if fdResp == nil {
+		return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()}
+	}
+
+	// Response can contain the result file descriptor, but also its transitive
+	// deps. Furthermore, protocol states that subsequent requests do not need
+	// to send transitive deps that have been sent in prior responses. So we
+	// need to cache all file descriptors and then return the first one (which
+	// should be the answer). If we're looking for a file by name, we can be
+	// smarter and make sure to grab one by name instead of just grabbing the
+	// first one.
+	var firstFd *dpb.FileDescriptorProto
+	for _, fdBytes := range fdResp.FileDescriptorProto {
+		fd := &dpb.FileDescriptorProto{}
+		if err = proto.Unmarshal(fdBytes, fd); err != nil {
+			return nil, err
+		}
+
+		if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName {
+			// we found a file was aliased, so we need to update the proto to reflect that
+			fd.Name = proto.String(alias)
+		}
+
+		cr.cacheMu.Lock()
+		// see if this file was created and cached concurrently
+		if firstFd == nil {
+			if d, ok := cr.filesByName[fd.GetName()]; ok {
+				cr.cacheMu.Unlock()
+				return d, nil
+			}
+		}
+		// store in cache of raw descriptor protos, but don't overwrite existing protos
+		if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
+			fd = existingFd
+		} else {
+			cr.protosByName[fd.GetName()] = fd
+		}
+		cr.cacheMu.Unlock()
+		if firstFd == nil {
+			firstFd = fd
+		}
+	}
+	if firstFd == nil {
+		return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+	}
+
+	return cr.descriptorFromProto(firstFd)
+}
+
+func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+	deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+	for i, depName := range fd.GetDependency() {
+		if dep, err := cr.FileByFilename(depName); err != nil {
+			return nil, err
+		} else {
+			deps[i] = dep
+		}
+	}
+	d, err := desc.CreateFileDescriptor(fd, deps...)
+	if err != nil {
+		return nil, err
+	}
+	d = cr.cacheFile(d)
+	return d, nil
+}
+
+func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+	cr.cacheMu.Lock()
+	defer cr.cacheMu.Unlock()
+
+	// cache file descriptor by name, but don't overwrite existing entry
+	// (existing entry could come from concurrent caller)
+	if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
+		return existingFd
+	}
+	cr.filesByName[fd.GetName()] = fd
+
+	// also cache by symbols and extensions
+	for _, m := range fd.GetMessageTypes() {
+		cr.cacheMessageLocked(fd, m)
+	}
+	for _, e := range fd.GetEnumTypes() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		for _, v := range e.GetValues() {
+			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+		}
+	}
+	for _, e := range fd.GetExtensions() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+	}
+	for _, s := range fd.GetServices() {
+		cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+		for _, m := range s.GetMethods() {
+			cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+		}
+	}
+
+	return fd
+}
+
+func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
+	cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+	for _, f := range md.GetFields() {
+		cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+	}
+	for _, o := range md.GetOneOfs() {
+		cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+	}
+	for _, e := range md.GetNestedEnumTypes() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		for _, v := range e.GetValues() {
+			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+		}
+	}
+	for _, e := range md.GetNestedExtensions() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+	}
+	for _, m := range md.GetNestedMessageTypes() {
+		cr.cacheMessageLocked(fd, m) // recurse
+	}
+}
+
+// AllExtensionNumbersForType asks the server for all known extension numbers
+// for the given fully-qualified message name.
+func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+			AllExtensionNumbersOfType: extendedMessageName,
+		},
+	}
+	resp, err := cr.send(req)
+	if err != nil {
+		if isNotFound(err) {
+			return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
+		}
+		return nil, err
+	}
+
+	extResp := resp.GetAllExtensionNumbersResponse()
+	if extResp == nil {
+		return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+	}
+	return extResp.ExtensionNumber, nil
+}
+
+// ListServices asks the server for the fully-qualified names of all exposed
+// services.
+func (cr *Client) ListServices() ([]string, error) {
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+			// proto doesn't indicate any purpose for this value and server impl
+			// doesn't actually use it...
+			ListServices: "*",
+		},
+	}
+	resp, err := cr.send(req)
+	if err != nil {
+		return nil, err
+	}
+
+	listResp := resp.GetListServicesResponse()
+	if listResp == nil {
+		return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()}
+	}
+	serviceNames := make([]string, len(listResp.Service))
+	for i, s := range listResp.Service {
+		serviceNames[i] = s.Name
+	}
+	return serviceNames, nil
+}
+
+func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+	// we allow one immediate retry, in case we have a stale stream
+	// (e.g. closed by server)
+	resp, err := cr.doSend(true, req)
+	if err != nil {
+		return nil, err
+	}
+
+	// convert error response messages into errors
+	errResp := resp.GetErrorResponse()
+	if errResp != nil {
+		return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage)
+	}
+
+	return resp, nil
+}
+
+func isNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+	s, ok := status.FromError(err)
+	return ok && s.Code() == codes.NotFound
+}
+
+func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+	// TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
+	// (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
+	// delivered in correct oder.
+	cr.connMu.Lock()
+	defer cr.connMu.Unlock()
+	return cr.doSendLocked(retry, req)
+}
+
+func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+	if err := cr.initStreamLocked(); err != nil {
+		return nil, err
+	}
+
+	if err := cr.stream.Send(req); err != nil {
+		if err == io.EOF {
+			// if send returns EOF, must call Recv to get real underlying error
+			_, err = cr.stream.Recv()
+		}
+		cr.resetLocked()
+		if retry {
+			return cr.doSendLocked(false, req)
+		}
+		return nil, err
+	}
+
+	if resp, err := cr.stream.Recv(); err != nil {
+		cr.resetLocked()
+		if retry {
+			return cr.doSendLocked(false, req)
+		}
+		return nil, err
+	} else {
+		return resp, nil
+	}
+}
+
+func (cr *Client) initStreamLocked() error {
+	if cr.stream != nil {
+		return nil
+	}
+	var newCtx context.Context
+	newCtx, cr.cancel = context.WithCancel(cr.ctx)
+	var err error
+	cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+	return err
+}
+
+// Reset ensures that any active stream with the server is closed, releasing any
+// resources.
+func (cr *Client) Reset() {
+	cr.connMu.Lock()
+	defer cr.connMu.Unlock()
+	cr.resetLocked()
+}
+
+func (cr *Client) resetLocked() {
+	if cr.stream != nil {
+		cr.stream.CloseSend()
+		for {
+			// drain the stream, this covers io.EOF too
+			if _, err := cr.stream.Recv(); err != nil {
+				break
+			}
+		}
+		cr.stream = nil
+	}
+	if cr.cancel != nil {
+		cr.cancel()
+		cr.cancel = nil
+	}
+}
+
+// ResolveService asks the server to resolve the given fully-qualified service
+// name into a service descriptor.
+func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) {
+	file, err := cr.FileContainingSymbol(serviceName)
+	if err != nil {
+		return nil, setSymbolType(err, serviceName, symbolTypeService)
+	}
+	d := file.FindSymbol(serviceName)
+	if d == nil {
+		return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+	}
+	if s, ok := d.(*desc.ServiceDescriptor); ok {
+		return s, nil
+	} else {
+		return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+	}
+}
+
+// ResolveMessage asks the server to resolve the given fully-qualified message
+// name into a message descriptor.
+func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) {
+	file, err := cr.FileContainingSymbol(messageName)
+	if err != nil {
+		return nil, setSymbolType(err, messageName, symbolTypeMessage)
+	}
+	d := file.FindSymbol(messageName)
+	if d == nil {
+		return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+	}
+	if s, ok := d.(*desc.MessageDescriptor); ok {
+		return s, nil
+	} else {
+		return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+	}
+}
+
+// ResolveEnum asks the server to resolve the given fully-qualified enum name
+// into an enum descriptor.
+func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) {
+	file, err := cr.FileContainingSymbol(enumName)
+	if err != nil {
+		return nil, setSymbolType(err, enumName, symbolTypeEnum)
+	}
+	d := file.FindSymbol(enumName)
+	if d == nil {
+		return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+	}
+	if s, ok := d.(*desc.EnumDescriptor); ok {
+		return s, nil
+	} else {
+		return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+	}
+}
+
+func setSymbolType(err error, name string, symType symbolType) error {
+	if e, ok := err.(*elementNotFoundError); ok {
+		if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown {
+			e.symType = symType
+		}
+	}
+	return err
+}
+
+// ResolveEnumValues asks the server to resolve the given fully-qualified enum
+// name into a map of names to numbers that represents the enum's values.
+func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) {
+	enumDesc, err := cr.ResolveEnum(enumName)
+	if err != nil {
+		return nil, err
+	}
+	vals := map[string]int32{}
+	for _, valDesc := range enumDesc.GetValues() {
+		vals[valDesc.GetName()] = valDesc.GetNumber()
+	}
+	return vals, nil
+}
+
+// ResolveExtension asks the server to resolve the given extension number and
+// fully-qualified message name into a field descriptor.
+func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) {
+	file, err := cr.FileContainingExtension(extendedType, extensionNumber)
+	if err != nil {
+		return nil, err
+	}
+	d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file})
+	if d == nil {
+		return nil, extensionNotFound(extendedType, extensionNumber, nil)
+	} else {
+		return d, nil
+	}
+}
+
+func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
+	// search extensions in this scope
+	for _, ext := range scope.extensions() {
+		if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType {
+			return ext
+		}
+	}
+
+	// if not found, search nested scopes
+	for _, nested := range scope.nestedScopes() {
+		ext := findExtension(extendedType, extensionNumber, nested)
+		if ext != nil {
+			return ext
+		}
+	}
+
+	return nil
+}
+
+type extensionScope interface {
+	extensions() []*desc.FieldDescriptor
+	nestedScopes() []extensionScope
+}
+
+// fileDescriptorExtensions implements extensionHolder interface on top of
+// FileDescriptorProto
+type fileDescriptorExtensions struct {
+	proto *desc.FileDescriptor
+}
+
+func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+	return fde.proto.GetExtensions()
+}
+
+func (fde fileDescriptorExtensions) nestedScopes() []extensionScope {
+	scopes := make([]extensionScope, len(fde.proto.GetMessageTypes()))
+	for i, m := range fde.proto.GetMessageTypes() {
+		scopes[i] = msgDescriptorExtensions{m}
+	}
+	return scopes
+}
+
+// msgDescriptorExtensions implements extensionHolder interface on top of
+// DescriptorProto
+type msgDescriptorExtensions struct {
+	proto *desc.MessageDescriptor
+}
+
+func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+	return mde.proto.GetNestedExtensions()
+}
+
+func (mde msgDescriptorExtensions) nestedScopes() []extensionScope {
+	scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes()))
+	for i, m := range mde.proto.GetNestedMessageTypes() {
+		scopes[i] = msgDescriptorExtensions{m}
+	}
+	return scopes
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
new file mode 100644
index 0000000..ec7bd02
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
@@ -0,0 +1,10 @@
+// Package grpcreflect provides GRPC-specific extensions to protobuf reflection.
+// This includes a way to access rich service descriptors for all services that
+// a GRPC server exports.
+//
+// Also included is an easy-to-use client for the GRPC reflection service
+// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that
+// supports the reflection service) for metadata on its exported services, which
+// could be used to construct a dynamic client. (See the grpcdynamic package in
+// this same repo for more on that.)
+package grpcreflect
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
new file mode 100644
index 0000000..c9ef619
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -0,0 +1,61 @@
+package grpcreflect
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// LoadServiceDescriptors loads the service descriptors for all services exposed by the
+// given GRPC server.
+func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+	descs := map[string]*desc.ServiceDescriptor{}
+	for name, info := range s.GetServiceInfo() {
+		file, ok := info.Metadata.(string)
+		if !ok {
+			return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata)
+		}
+		fd, err := desc.LoadFileDescriptor(file)
+		if err != nil {
+			return nil, err
+		}
+		d := fd.FindSymbol(name)
+		if d == nil {
+			return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name)
+		}
+		sd, ok := d.(*desc.ServiceDescriptor)
+		if !ok {
+			return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d)
+		}
+		descs[name] = sd
+	}
+	return descs, nil
+}
+
+// LoadServiceDescriptor loads a rich descriptor for a given service description
+// generated by protoc-gen-go. Generated code contains an unexported symbol with
+// a name like "_<Service>_serviceDesc" which is the service's description. It
+// is used internally to register a service implementation with a GRPC server.
+// But it can also be used by this package to retrieve the rich descriptor for
+// the service.
+func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) {
+	file, ok := svc.Metadata.(string)
+	if !ok {
+		return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata)
+	}
+	fd, err := desc.LoadFileDescriptor(file)
+	if err != nil {
+		return nil, err
+	}
+	d := fd.FindSymbol(svc.ServiceName)
+	if d == nil {
+		return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName)
+	}
+	sd, ok := d.(*desc.ServiceDescriptor)
+	if !ok {
+		return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d)
+	}
+	return sd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go
new file mode 100644
index 0000000..09f8849
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go
@@ -0,0 +1,118 @@
+package codec
+
+import (
+	"fmt"
+	"io"
+)
+
+// Buffer is a reader and a writer that wraps a slice of bytes and also
+// provides API for decoding and encoding the protobuf binary format.
+//
+// Its operation is similar to that of a bytes.Buffer: writing pushes
+// data to the end of the buffer while reading pops data from the head
+// of the buffer. So the same buffer can be used to both read and write.
+type Buffer struct {
+	buf   []byte
+	index int
+
+	// tmp is used when another byte slice is needed, such as when
+	// serializing messages, since we need to know the length before
+	// we can write the length prefix; by caching this, including
+	// after it is grown by serialization operations, we reduce the
+	// number of allocations needed
+	tmp []byte
+
+	deterministic bool
+}
+
+// NewBuffer creates a new buffer with the given slice of bytes as the
+// buffer's initial contents.
+func NewBuffer(buf []byte) *Buffer {
+	return &Buffer{buf: buf}
+}
+
+// SetDeterministic sets this buffer to encode messages deterministically. This
+// is useful for tests. But the overhead is non-zero, so it should not likely be
+// used outside of tests. When true, map fields in a message must have their
+// keys sorted before serialization to ensure deterministic output. Otherwise,
+// values in a map field will be serialized in map iteration order.
+func (cb *Buffer) SetDeterministic(deterministic bool) {
+	cb.deterministic = deterministic
+}
+
+// IsDeterministic returns whether or not this buffer is configured to encode
+// messages deterministically.
+func (cb *Buffer) IsDeterministic() bool {
+	return cb.deterministic
+}
+
+// Reset resets this buffer back to empty. Any subsequent writes/encodes
+// to the buffer will allocate a new backing slice of bytes.
+func (cb *Buffer) Reset() {
+	cb.buf = []byte(nil)
+	cb.index = 0
+}
+
+// Bytes returns the slice of bytes remaining in the buffer. Note that
+// this does not perform a copy: if the contents of the returned slice
+// are modified, the modifications will be visible to subsequent reads
+// via the buffer.
+func (cb *Buffer) Bytes() []byte {
+	return cb.buf[cb.index:]
+}
+
+// String returns the remaining bytes in the buffer as a string.
+func (cb *Buffer) String() string {
+	return string(cb.Bytes())
+}
+
+// EOF returns true if there are no more bytes remaining to read.
+func (cb *Buffer) EOF() bool {
+	return cb.index >= len(cb.buf)
+}
+
+// Skip attempts to skip the given number of bytes in the input. If
+// the input has fewer bytes than the given count, io.ErrUnexpectedEOF
+// is returned and the buffer is unchanged. Otherwise, the given number
+// of bytes are skipped and nil is returned.
+func (cb *Buffer) Skip(count int) error {
+	if count < 0 {
+		return fmt.Errorf("proto: bad byte length %d", count)
+	}
+	newIndex := cb.index + count
+	if newIndex < cb.index || newIndex > len(cb.buf) {
+		return io.ErrUnexpectedEOF
+	}
+	cb.index = newIndex
+	return nil
+}
+
+// Len returns the remaining number of bytes in the buffer.
+func (cb *Buffer) Len() int {
+	return len(cb.buf) - cb.index
+}
+
+// Read implements the io.Reader interface. If there are no bytes
+// remaining in the buffer, it will return 0, io.EOF. Otherwise,
+// it reads max(len(dest), cb.Len()) bytes from input and copies
+// them into dest. It returns the number of bytes copied and a nil
+// error in this case.
+func (cb *Buffer) Read(dest []byte) (int, error) {
+	if cb.index == len(cb.buf) {
+		return 0, io.EOF
+	}
+	copied := copy(dest, cb.buf[cb.index:])
+	cb.index += copied
+	return copied, nil
+}
+
+var _ io.Reader = (*Buffer)(nil)
+
+// Write implements the io.Writer interface. It always returns
+// len(data), nil.
+func (cb *Buffer) Write(data []byte) (int, error) {
+	cb.buf = append(cb.buf, data...)
+	return len(data), nil
+}
+
+var _ io.Writer = (*Buffer)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/decode.go b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go
new file mode 100644
index 0000000..a25f680
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go
@@ -0,0 +1,346 @@
+package codec
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/golang/protobuf/proto"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = errors.New("proto: integer overflow")
+
+// ErrBadWireType is returned when decoding a wire-type from a buffer that
+// is not valid.
+var ErrBadWireType = errors.New("proto: bad wiretype")
+
+func (cb *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := cb.index
+	l := len(cb.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := cb.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			cb.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = ErrOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) DecodeVarint() (uint64, error) {
+	i := cb.index
+	buf := cb.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		cb.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return cb.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x := uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, ErrOverflow
+
+done:
+	cb.index = i
+	return x, nil
+}
+
+// DecodeTagAndWireType decodes a field tag and wire type from input.
+// This reads a varint and then extracts the two fields from the varint
+// value read.
+func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) {
+	var v uint64
+	v, err = cb.DecodeVarint()
+	if err != nil {
+		return
+	}
+	// low 7 bits is wire type
+	wireType = int8(v & 7)
+	// rest is int32 tag number
+	v = v >> 3
+	if v > math.MaxInt32 {
+		err = fmt.Errorf("tag number out of range: %d", v)
+		return
+	}
+	tag = int32(v)
+	return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := cb.index + 8
+	if i < 0 || i > len(cb.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	cb.index = i
+
+	x = uint64(cb.buf[i-8])
+	x |= uint64(cb.buf[i-7]) << 8
+	x |= uint64(cb.buf[i-6]) << 16
+	x |= uint64(cb.buf[i-5]) << 24
+	x |= uint64(cb.buf[i-4]) << 32
+	x |= uint64(cb.buf[i-3]) << 40
+	x |= uint64(cb.buf[i-2]) << 48
+	x |= uint64(cb.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := cb.index + 4
+	if i < 0 || i > len(cb.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	cb.index = i
+
+	x = uint64(cb.buf[i-4])
+	x |= uint64(cb.buf[i-3]) << 8
+	x |= uint64(cb.buf[i-2]) << 16
+	x |= uint64(cb.buf[i-1]) << 24
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := cb.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := cb.index + nb
+	if end < cb.index || end > len(cb.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		buf = cb.buf[cb.index:end]
+		cb.index = end
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, cb.buf[cb.index:])
+	cb.index = end
+	return
+}
+
+// ReadGroup reads the input until a "group end" tag is found
+// and returns the data up to that point. Subsequent reads from
+// the buffer will read data after the group end tag. If alloc
+// is true, the data is copied to a new slice before being returned.
+// Otherwise, the returned slice is a view into the buffer's
+// underlying byte slice.
+//
+// This function correctly handles nested groups: if a "group start"
+// tag is found, then that group's end tag will be included in the
+// returned data.
+func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) {
+	var groupEnd, dataEnd int
+	groupEnd, dataEnd, err := cb.findGroupEnd()
+	if err != nil {
+		return nil, err
+	}
+	var results []byte
+	if !alloc {
+		results = cb.buf[cb.index:dataEnd]
+	} else {
+		results = make([]byte, dataEnd-cb.index)
+		copy(results, cb.buf[cb.index:])
+	}
+	cb.index = groupEnd
+	return results, nil
+}
+
+// SkipGroup is like ReadGroup, except that it discards the
+// data and just advances the buffer to point to the input
+// right *after* the "group end" tag.
+func (cb *Buffer) SkipGroup() error {
+	groupEnd, _, err := cb.findGroupEnd()
+	if err != nil {
+		return err
+	}
+	cb.index = groupEnd
+	return nil
+}
+
+// SkipField attempts to skip the value of a field with the given wire
+// type. When consuming a protobuf-encoded stream, it can be called immediately
+// after DecodeTagAndWireType to discard the subsequent data for the field.
+func (cb *Buffer) SkipField(wireType int8) error {
+	switch wireType {
+	case proto.WireFixed32:
+		if err := cb.Skip(4); err != nil {
+			return err
+		}
+	case proto.WireFixed64:
+		if err := cb.Skip(8); err != nil {
+			return err
+		}
+	case proto.WireVarint:
+		// skip varint by finding last byte (has high bit unset)
+		i := cb.index
+		limit := i + 10 // varint cannot be >10 bytes
+		for {
+			if i >= limit {
+				return ErrOverflow
+			}
+			if i >= len(cb.buf) {
+				return io.ErrUnexpectedEOF
+			}
+			if cb.buf[i]&0x80 == 0 {
+				break
+			}
+			i++
+		}
+		// TODO: This would only overflow if buffer length was MaxInt and we
+		// read the last byte. This is not a real/feasible concern on 64-bit
+		// systems. Something to worry about for 32-bit systems? Do we care?
+		cb.index = i + 1
+	case proto.WireBytes:
+		l, err := cb.DecodeVarint()
+		if err != nil {
+			return err
+		}
+		if err := cb.Skip(int(l)); err != nil {
+			return err
+		}
+	case proto.WireStartGroup:
+		if err := cb.SkipGroup(); err != nil {
+			return err
+		}
+	default:
+		return ErrBadWireType
+	}
+	return nil
+}
+
+func (cb *Buffer) findGroupEnd() (groupEnd int, dataEnd int, err error) {
+	start := cb.index
+	defer func() {
+		cb.index = start
+	}()
+	for {
+		fieldStart := cb.index
+		// read a field tag
+		_, wireType, err := cb.DecodeTagAndWireType()
+		if err != nil {
+			return 0, 0, err
+		}
+		if wireType == proto.WireEndGroup {
+			return cb.index, fieldStart, nil
+		}
+		// skip past the field's data
+		if err := cb.SkipField(wireType); err != nil {
+			return 0, 0, err
+		}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/encode.go b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go
new file mode 100644
index 0000000..524f1bc
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go
@@ -0,0 +1,147 @@
+package codec
+
+import (
+	"github.com/golang/protobuf/proto"
+)
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		cb.buf = append(cb.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	cb.buf = append(cb.buf, uint8(x))
+	return nil
+}
+
+// EncodeTagAndWireType encodes the given field tag and wire type to the
+// buffer. This combines the two values and then writes them as a varint.
+func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error {
+	v := uint64((int64(tag) << 3) | int64(wireType))
+	return cb.EncodeVarint(v)
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) EncodeFixed64(x uint64) error {
+	cb.buf = append(cb.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) EncodeFixed32(x uint64) error {
+	cb.buf = append(cb.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) EncodeRawBytes(b []byte) error {
+	if err := cb.EncodeVarint(uint64(len(b))); err != nil {
+		return err
+	}
+	cb.buf = append(cb.buf, b...)
+	return nil
+}
+
+// EncodeMessage writes the given message to the buffer.
+func (cb *Buffer) EncodeMessage(pm proto.Message) error {
+	bytes, err := marshalMessage(cb.buf, pm, cb.deterministic)
+	if err != nil {
+		return err
+	}
+	cb.buf = bytes
+	return nil
+}
+
+// EncodeDelimitedMessage writes the given message to the buffer with a
+// varint-encoded length prefix (the delimiter).
+func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error {
+	bytes, err := marshalMessage(cb.tmp, pm, cb.deterministic)
+	if err != nil {
+		return err
+	}
+	// save truncated buffer if it was grown (so we can re-use it and
+	// curtail future allocations)
+	if cap(bytes) > cap(cb.tmp) {
+		cb.tmp = bytes[:0]
+	}
+	return cb.EncodeRawBytes(bytes)
+}
+
+func marshalMessage(b []byte, pm proto.Message, deterministic bool) ([]byte, error) {
+	// We try to use the most efficient way to marshal to existing slice.
+
+	if deterministic {
+		// see if the message has custom deterministic methods, preferring an
+		// "append" method over one that must always re-allocate
+		madm, ok := pm.(interface {
+			MarshalAppendDeterministic(b []byte) ([]byte, error)
+		})
+		if ok {
+			return madm.MarshalAppendDeterministic(b)
+		}
+
+		mdm, ok := pm.(interface {
+			MarshalDeterministic() ([]byte, error)
+		})
+		if ok {
+			bytes, err := mdm.MarshalDeterministic()
+			if err != nil {
+				return nil, err
+			}
+			if len(b) == 0 {
+				return bytes, nil
+			}
+			return append(b, bytes...), nil
+		}
+
+		var buf proto.Buffer
+		buf.SetDeterministic(true)
+		if err := buf.Marshal(pm); err != nil {
+			return nil, err
+		}
+		bytes := buf.Bytes()
+		if len(b) == 0 {
+			return bytes, nil
+		}
+		return append(b, bytes...), nil
+	}
+
+	mam, ok := pm.(interface {
+		// see if we can append the message, vs. having to re-allocate
+		MarshalAppend(b []byte) ([]byte, error)
+	})
+	if ok {
+		return mam.MarshalAppend(b)
+	}
+
+	// lowest common denominator
+	bytes, err := proto.Marshal(pm)
+	if err != nil {
+		return nil, err
+	}
+	if len(b) == 0 {
+		return bytes, nil
+	}
+	return append(b, bytes...), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
new file mode 100644
index 0000000..4a8b47a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -0,0 +1,127 @@
+// Package internal contains some code that should not be exported but needs to
+// be shared across more than one of the protoreflect sub-packages.
+package internal
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// TODO: replace this alias configuration with desc.RegisterImportPath?
+
+// StdFileAliases are the standard protos included with protoc, but older versions of
+// their respective packages registered them using incorrect paths.
+var StdFileAliases = map[string]string{
+	// Files for the github.com/golang/protobuf/ptypes package at one point were
+	// registered using the path where the proto files are mirrored in GOPATH,
+	// inside the golang/protobuf repo.
+	// (Fixed as of https://github.com/golang/protobuf/pull/412)
+	"google/protobuf/any.proto":       "github.com/golang/protobuf/ptypes/any/any.proto",
+	"google/protobuf/duration.proto":  "github.com/golang/protobuf/ptypes/duration/duration.proto",
+	"google/protobuf/empty.proto":     "github.com/golang/protobuf/ptypes/empty/empty.proto",
+	"google/protobuf/struct.proto":    "github.com/golang/protobuf/ptypes/struct/struct.proto",
+	"google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto",
+	"google/protobuf/wrappers.proto":  "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto",
+	// Files for the google.golang.org/genproto/protobuf package at one point
+	// were registered with an anomalous "src/" prefix.
+	// (Fixed as of https://github.com/google/go-genproto/pull/31)
+	"google/protobuf/api.proto":            "src/google/protobuf/api.proto",
+	"google/protobuf/field_mask.proto":     "src/google/protobuf/field_mask.proto",
+	"google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto",
+	"google/protobuf/type.proto":           "src/google/protobuf/type.proto",
+
+	// Other standard files (descriptor.proto and compiler/plugin.proto) are
+	// registered correctly, so we don't need rules for them here.
+}
+
+func init() {
+	// We provide aliasing in both directions, to support files with the
+	// proper import path linked against older versions of the generated
+	// files AND files that used the aliased import path but linked against
+	// newer versions of the generated files (which register with the
+	// correct path).
+
+	// Get all files defined above
+	keys := make([]string, 0, len(StdFileAliases))
+	for k := range StdFileAliases {
+		keys = append(keys, k)
+	}
+	// And add inverse mappings
+	for _, k := range keys {
+		alias := StdFileAliases[k]
+		StdFileAliases[alias] = k
+	}
+}
+
+type ErrNoSuchFile string
+
+func (e ErrNoSuchFile) Error() string {
+	return fmt.Sprintf("no such file: %q", string(e))
+}
+
+// LoadFileDescriptor loads a registered descriptor and decodes it. If the given
+// name cannot be loaded but is a known standard name, an alias will be tried,
+// so the standard files can be loaded even if linked against older "known bad"
+// versions of packages.
+func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+	fdb := proto.FileDescriptor(file)
+	aliased := false
+	if fdb == nil {
+		var ok bool
+		alias, ok := StdFileAliases[file]
+		if ok {
+			aliased = true
+			if fdb = proto.FileDescriptor(alias); fdb == nil {
+				return nil, ErrNoSuchFile(file)
+			}
+		} else {
+			return nil, ErrNoSuchFile(file)
+		}
+	}
+
+	fd, err := DecodeFileDescriptor(file, fdb)
+	if err != nil {
+		return nil, err
+	}
+
+	if aliased {
+		// the file descriptor will have the alias used to load it, but
+		// we need it to have the specified name in order to link it
+		fd.Name = proto.String(file)
+	}
+
+	return fd, nil
+}
+
+// DecodeFileDescriptor decodes the bytes of a registered file descriptor.
+// Registered file descriptors are first "proto encoded" (e.g. binary format
+// for the descriptor protos) and then gzipped. So this function gunzips and
+// then unmarshals into a descriptor proto.
+func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+	raw, err := decompress(fdb)
+	if err != nil {
+		return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
+	}
+	fd := dpb.FileDescriptorProto{}
+	if err := proto.Unmarshal(raw, &fd); err != nil {
+		return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
+	}
+	return &fd, nil
+}
+
+func decompress(b []byte) ([]byte, error) {
+	r, err := gzip.NewReader(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+	}
+	out, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+	}
+	return out, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
new file mode 100644
index 0000000..c903d4b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
@@ -0,0 +1,86 @@
+package internal
+
+import (
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+)
+
+var typeOfBytes = reflect.TypeOf([]byte(nil))
+
+// GetUnrecognized fetches the bytes of unrecognized fields for the given message.
+func GetUnrecognized(msg proto.Message) []byte {
+	val := reflect.Indirect(reflect.ValueOf(msg))
+	u := val.FieldByName("XXX_unrecognized")
+	if u.IsValid() && u.Type() == typeOfBytes {
+		return u.Interface().([]byte)
+	}
+
+	// Fallback to reflection for API v2 messages
+	get, _, _, ok := unrecognizedGetSetMethods(val)
+	if !ok {
+		return nil
+	}
+
+	return get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+}
+
+// SetUnrecognized adds the given bytes to the unrecognized fields for the given message.
+func SetUnrecognized(msg proto.Message, data []byte) {
+	val := reflect.Indirect(reflect.ValueOf(msg))
+	u := val.FieldByName("XXX_unrecognized")
+	if u.IsValid() && u.Type() == typeOfBytes {
+		// Just store the bytes in the unrecognized field
+		ub := u.Interface().([]byte)
+		ub = append(ub, data...)
+		u.Set(reflect.ValueOf(ub))
+		return
+	}
+
+	// Fallback to reflection for API v2 messages
+	get, set, argType, ok := unrecognizedGetSetMethods(val)
+	if !ok {
+		return
+	}
+
+	existing := get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+	if len(existing) > 0 {
+		data = append(existing, data...)
+	}
+	set.Call([]reflect.Value{reflect.ValueOf(data).Convert(argType)})
+}
+
+func unrecognizedGetSetMethods(val reflect.Value) (get reflect.Value, set reflect.Value, argType reflect.Type, ok bool) {
+	// val could be an APIv2 message. We use reflection to interact with
+	// this message so that we don't have a hard dependency on the new
+	// version of the protobuf package.
+	refMethod := val.MethodByName("ProtoReflect")
+	if !refMethod.IsValid() {
+		if val.CanAddr() {
+			refMethod = val.Addr().MethodByName("ProtoReflect")
+		}
+		if !refMethod.IsValid() {
+			return
+		}
+	}
+	refType := refMethod.Type()
+	if refType.NumIn() != 0 || refType.NumOut() != 1 {
+		return
+	}
+	ref := refMethod.Call([]reflect.Value(nil))
+	getMethod, setMethod := ref[0].MethodByName("GetUnknown"), ref[0].MethodByName("SetUnknown")
+	if !getMethod.IsValid() || !setMethod.IsValid() {
+		return
+	}
+	getType := getMethod.Type()
+	setType := setMethod.Type()
+	if getType.NumIn() != 0 || getType.NumOut() != 1 || setType.NumIn() != 1 || setType.NumOut() != 0 {
+		return
+	}
+	arg := setType.In(0)
+	if !arg.ConvertibleTo(typeOfBytes) || getType.Out(0) != arg {
+		return
+	}
+
+	return getMethod, setMethod, arg, true
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
index 41b615a..1e50a63 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
@@ -1378,6 +1378,12 @@
 	return filteredDR
 }
 
+func (dr *DeviceRules) RemoveRule(deviceId string) {
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	delete(dr.Rules, deviceId)
+}
+
 func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
 	dr.rulesLock.Lock()
 	defer dr.rulesLock.Unlock()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
index 9b66d85..294983f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
@@ -25,31 +25,26 @@
 
 	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
 	grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+	"github.com/jhump/protoreflect/dynamic/grpcdynamic"
+	"github.com/jhump/protoreflect/grpcreflect"
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 	"github.com/opencord/voltha-lib-go/v7/pkg/probe"
+	"github.com/opencord/voltha-protos/v5/go/adapter_service"
 	"github.com/opencord/voltha-protos/v5/go/common"
 	"github.com/opencord/voltha-protos/v5/go/core_service"
 	"github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service"
 	"github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service"
 	"google.golang.org/grpc"
-	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/codes"
+	rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+	"google.golang.org/grpc/status"
 )
 
 type event byte
 type state byte
-type SetAndTestServiceHandler func(context.Context, *grpc.ClientConn, *common.Connection) interface{}
+type GetServiceClient func(context.Context, *grpc.ClientConn) interface{}
 type RestartedHandler func(ctx context.Context, endPoint string) error
 
-type contextKey string
-
-func (c contextKey) String() string {
-	return string(c)
-}
-
-var (
-	grpcMonitorContextKey = contextKey("grpc-monitor")
-)
-
 const (
 	grpcBackoffInitialInterval = "GRPC_BACKOFF_INITIAL_INTERVAL"
 	grpcBackoffMaxInterval     = "GRPC_BACKOFF_MAX_INTERVAL"
@@ -65,27 +60,24 @@
 )
 
 const (
-	connectionErrorSubString  = "SubConns are in TransientFailure"
-	connectionClosedSubstring = "client connection is closing"
-	connectionError           = "connection error"
-	connectionSystemNotReady  = "system is not ready"
-)
-
-const (
 	eventConnecting = event(iota)
+	eventValidatingConnection
 	eventConnected
 	eventDisconnected
 	eventStopped
 	eventError
 
 	stateConnected = state(iota)
+	stateValidatingConnection
 	stateConnecting
 	stateDisconnected
 )
 
 type Client struct {
 	clientEndpoint         string
+	clientContextData      string
 	serverEndPoint         string
+	remoteServiceName      string
 	connection             *grpc.ClientConn
 	connectionLock         sync.RWMutex
 	stateLock              sync.RWMutex
@@ -98,17 +90,26 @@
 	backoffMaxElapsedTime  time.Duration
 	monitorInterval        time.Duration
 	done                   bool
+	livenessLock           sync.RWMutex
 	livenessCallback       func(timestamp time.Time)
 }
 
 type ClientOption func(*Client)
 
-func NewClient(clientEndpoint, serverEndpoint string, onRestart RestartedHandler, opts ...ClientOption) (*Client, error) {
+func ClientContextData(data string) ClientOption {
+	return func(args *Client) {
+		args.clientContextData = data
+	}
+}
+
+func NewClient(clientEndpoint, serverEndpoint, remoteServiceName string, onRestart RestartedHandler,
+	opts ...ClientOption) (*Client, error) {
 	c := &Client{
 		clientEndpoint:         clientEndpoint,
 		serverEndPoint:         serverEndpoint,
+		remoteServiceName:      remoteServiceName,
 		onRestart:              onRestart,
-		events:                 make(chan event, 1),
+		events:                 make(chan event, 5),
 		state:                  stateDisconnected,
 		backoffInitialInterval: DefaultBackoffInitialInterval,
 		backoffMaxInterval:     DefaultBackoffMaxInterval,
@@ -143,6 +144,8 @@
 		return nil, fmt.Errorf("initial retry delay %v is greater than maximum retry delay %v", c.backoffInitialInterval, c.backoffMaxInterval)
 	}
 
+	grpc.EnableTracing = true
+
 	return c, nil
 }
 
@@ -200,8 +203,23 @@
 	return nil, fmt.Errorf("invalid-service-%s", reflect.TypeOf(c.service))
 }
 
+// GetAdapterServiceClient is a helper function that returns a concrete service instead of the GetClient() API
+// which returns an interface
+func (c *Client) GetAdapterServiceClient() (adapter_service.AdapterServiceClient, error) {
+	c.connectionLock.RLock()
+	defer c.connectionLock.RUnlock()
+	if c.service == nil {
+		return nil, fmt.Errorf("no adapter service connection to %s", c.serverEndPoint)
+	}
+	client, ok := c.service.(adapter_service.AdapterServiceClient)
+	if ok {
+		return client, nil
+	}
+	return nil, fmt.Errorf("invalid-service-%s", reflect.TypeOf(c.service))
+}
+
 func (c *Client) Reset(ctx context.Context) {
-	logger.Debugw(ctx, "resetting-client-connection", log.Fields{"endpoint": c.serverEndPoint})
+	logger.Debugw(ctx, "resetting-client-connection", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 	c.stateLock.Lock()
 	defer c.stateLock.Unlock()
 	if c.state == stateConnected {
@@ -210,128 +228,173 @@
 	}
 }
 
-func (c *Client) clientInterceptor(ctx context.Context, method string, req interface{}, reply interface{},
-	cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
-	// Nothing to do before intercepting the call
-	err := invoker(ctx, method, req, reply, cc, opts...)
-	// On connection failure, start the reconnect process depending on the error response
-	if err != nil {
-		logger.Errorw(ctx, "received-error", log.Fields{"error": err, "context": ctx, "endpoint": c.serverEndPoint})
-		if strings.Contains(err.Error(), connectionErrorSubString) ||
-			strings.Contains(err.Error(), connectionError) ||
-			strings.Contains(err.Error(), connectionSystemNotReady) ||
-			isGrpcMonitorKeyPresentInContext(ctx) {
-			c.stateLock.Lock()
-			if c.state == stateConnected {
-				c.state = stateDisconnected
-				logger.Warnw(context.Background(), "sending-disconnect-event", log.Fields{"endpoint": c.serverEndPoint, "error": err, "curr-state": stateConnected, "new-state": c.state})
-				c.events <- eventDisconnected
-			}
-			c.stateLock.Unlock()
-		} else if strings.Contains(err.Error(), connectionClosedSubstring) {
-			logger.Errorw(context.Background(), "invalid-client-connection-closed", log.Fields{"endpoint": c.serverEndPoint, "error": err})
+// executeWithTimeout runs a sending function (sf) along with a receiving one(rf) and returns an error, if any.
+// If the deadline d elapses first, it returns a grpc DeadlineExceeded error instead.
+func (c *Client) executeWithTimeout(sf func(*common.Connection) error, rf func() (interface{}, error), conn *common.Connection, d time.Duration) error {
+	errChan := make(chan error, 1)
+	go func() {
+		err := sf(conn)
+		logger.Debugw(context.Background(), "message-sent", log.Fields{"error": err, "qpi-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+		if err == nil {
+			response, err := rf()
+			logger.Debugw(context.Background(), "message-received", log.Fields{"error": err, "qpi-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "health": response})
+		}
+		errChan <- err
+		close(errChan)
+	}()
+	t := time.NewTimer(d)
+	select {
+	case <-t.C:
+		return status.Errorf(codes.DeadlineExceeded, "timeout-on-sending-message")
+	case err := <-errChan:
+		if !t.Stop() {
+			<-t.C
 		}
 		return err
 	}
-	// Update activity on success only
-	c.updateActivity(ctx)
-	return nil
 }
 
-// updateActivity updates the liveness channel
-func (c *Client) updateActivity(ctx context.Context) {
-	logger.Debugw(ctx, "update-activity", log.Fields{"api-endpoint": c.serverEndPoint})
+func (c *Client) monitorConnection(ctx context.Context) {
+	logger.Debugw(ctx, "monitor-connection-started", log.Fields{"qpi-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 
-	// Update liveness only in connected state
-	if c.livenessCallback != nil {
-		c.stateLock.RLock()
-		if c.state == stateConnected {
-			c.livenessCallback(time.Now())
+	// If we exit, assume disconnected
+	defer func() {
+		c.stateLock.Lock()
+		if !c.done && (c.state == stateConnected || c.state == stateValidatingConnection) {
+			// Handle only connected state here.  We need the validating state to know if we need to backoff before a retry
+			if c.state == stateConnected {
+				c.state = stateDisconnected
+			}
+			logger.Warnw(ctx, "sending-disconnect-event", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "curr-state": stateConnected, "new-state": c.state})
+			c.events <- eventDisconnected
+		} else {
+			logger.Debugw(ctx, "no-state-change-needed", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "state": c.state, "client-done": c.done})
 		}
-		c.stateLock.RUnlock()
+		c.stateLock.Unlock()
+		logger.Debugw(ctx, "monitor-connection-ended", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+	}()
+
+	c.connectionLock.RLock()
+	conn := c.connection
+	c.connectionLock.RUnlock()
+	if conn == nil {
+		logger.Errorw(ctx, "connection-nil", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+		return
 	}
-}
 
-func WithGrpcMonitorContext(ctx context.Context, name string) context.Context {
-	ctx = context.WithValue(ctx, grpcMonitorContextKey, name)
-	return ctx
-}
-
-func isGrpcMonitorKeyPresentInContext(ctx context.Context) bool {
-	if ctx != nil {
-		_, present := ctx.Value(grpcMonitorContextKey).(string)
-		return present
+	// Get a new client using reflection. The server can implement any grpc service, but it
+	// needs to also implement the "StartKeepAliveStream" API
+	grpcReflectClient := grpcreflect.NewClient(ctx, rpb.NewServerReflectionClient(conn))
+	if grpcReflectClient == nil {
+		logger.Errorw(ctx, "grpc-reflect-client-nil", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+		return
 	}
-	return false
-}
 
-// monitorActivity monitors the activity on the gRPC connection.   If there are no activity after a specified
-// timeout, it will send a default API request on that connection.   If the connection is good then nothing
-// happens.  If it's bad this will trigger reconnection attempts.
-func (c *Client) monitorActivity(ctx context.Context, handler SetAndTestServiceHandler) {
-	logger.Infow(ctx, "start-activity-monitor", log.Fields{"endpoint": c.serverEndPoint})
+	// Get the list of services - there should be 2 services: a server reflection and the voltha service we are interested in
+	services, err := grpcReflectClient.ListServices()
+	if err != nil {
+		logger.Errorw(ctx, "list-services-error", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "error": err})
+		return
+	}
 
-	grpcMonitorCheckRunning := false
-	var grpcMonitorCheckRunningLock sync.RWMutex
+	// Filter out the service
+	logger.Debugw(ctx, "services", log.Fields{"services": services})
+	serviceOfInterest := ""
+	for _, service := range services {
+		if strings.EqualFold(service, c.remoteServiceName) {
+			serviceOfInterest = service
+			break
+		}
+	}
+	if serviceOfInterest == "" {
+		logger.Errorw(ctx, "no-service-found", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "services": services, "expected-remote-service": c.remoteServiceName})
+		return
+	}
 
-	// Interval to wait for no activity before probing the connection
-	timeout := c.monitorInterval
+	// Resolve the service
+	resolvedService, err := grpcReflectClient.ResolveService(serviceOfInterest)
+	if err != nil {
+		logger.Errorw(ctx, "service-error", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "service": resolvedService, "error": err})
+		return
+	}
+
+	// Find the method of interest
+	method := resolvedService.FindMethodByName("GetHealthStatus")
+	if method == nil {
+		logger.Errorw(ctx, "nil-method", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "service": resolvedService})
+		return
+	}
+	logger.Debugw(ctx, "resolved-to-method", log.Fields{"service": resolvedService.GetName(), "method": method.GetName()})
+
+	// Get a dynamic connection
+	dynamicConn := grpcdynamic.NewStub(conn)
+
+	// Get the stream and send this client information
+	streamCtx, streamDone := context.WithCancel(log.WithSpanFromContext(context.Background(), ctx))
+	defer streamDone()
+	stream, err := dynamicConn.InvokeRpcBidiStream(streamCtx, method)
+	if err != nil {
+		logger.Errorw(ctx, "stream-error", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "service": resolvedService, "error": err})
+		return
+	}
+
+	clientInfo := &common.Connection{
+		Endpoint:          c.clientEndpoint,
+		ContextInfo:       c.clientContextData,
+		KeepAliveInterval: int64(c.monitorInterval),
+	}
+
+	initialConnection := true
 loop:
 	for {
-		timeoutTimer := time.NewTimer(timeout)
-		select {
+		// Let's send a keep alive message with our info
+		err := c.executeWithTimeout(
+			func(conn *common.Connection) error { return stream.SendMsg(conn) },
+			func() (interface{}, error) { return stream.RecvMsg() },
+			clientInfo,
+			c.monitorInterval)
 
-		case <-ctx.Done():
-			// Stop and drain timer
-			if !timeoutTimer.Stop() {
-				select {
-				case <-timeoutTimer.C:
-				default:
-				}
-			}
+		if err != nil {
+			// Any error means the far end is gone
+			logger.Errorw(ctx, "sending-stream-error", log.Fields{"error": err, "api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "context": stream.Context().Err()})
 			break loop
+		}
+		// Send a connect event
+		if initialConnection {
+			logger.Debugw(ctx, "first-stream-data-sent", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+			c.events <- eventConnected
+			initialConnection = false
+		}
+		logger.Debugw(ctx, "stream-data-sent", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+		// Update liveness, if configured
+		c.livenessLock.RLock()
+		if c.livenessCallback != nil {
+			go c.livenessCallback(time.Now())
+		}
+		c.livenessLock.RUnlock()
 
-		case <-timeoutTimer.C:
-			// Trigger an activity check if the state is connected.  If the state is not connected then there is already
-			// a backoff retry mechanism in place to retry establishing connection.
-			c.stateLock.RLock()
-			grpcMonitorCheckRunningLock.RLock()
-			runCheck := (c.state == stateConnected) && !grpcMonitorCheckRunning
-			grpcMonitorCheckRunningLock.RUnlock()
-			c.stateLock.RUnlock()
-			if runCheck {
-				go func() {
-					grpcMonitorCheckRunningLock.Lock()
-					if grpcMonitorCheckRunning {
-						grpcMonitorCheckRunningLock.Unlock()
-						logger.Debugw(ctx, "connection-check-already-in-progress", log.Fields{"api-endpoint": c.serverEndPoint})
-						return
-					}
-					grpcMonitorCheckRunning = true
-					grpcMonitorCheckRunningLock.Unlock()
-
-					logger.Debugw(ctx, "connection-check-start", log.Fields{"api-endpoint": c.serverEndPoint})
-					subCtx, cancel := context.WithTimeout(ctx, c.backoffMaxInterval)
-					defer cancel()
-					subCtx = WithGrpcMonitorContext(subCtx, "grpc-monitor")
-					c.connectionLock.RLock()
-					defer c.connectionLock.RUnlock()
-					if c.connection != nil {
-						response := handler(subCtx, c.connection, &common.Connection{Endpoint: c.clientEndpoint, KeepAliveInterval: int64(c.monitorInterval)})
-						logger.Debugw(ctx, "connection-check-response", log.Fields{"api-endpoint": c.serverEndPoint, "up": response != nil})
-					}
-					grpcMonitorCheckRunningLock.Lock()
-					grpcMonitorCheckRunning = false
-					grpcMonitorCheckRunningLock.Unlock()
-				}()
-			}
+		// Wait to send the next keep alive
+		keepAliveTimer := time.NewTimer(time.Duration(clientInfo.KeepAliveInterval))
+		select {
+		case <-ctx.Done():
+			logger.Warnw(ctx, "context-done", log.Fields{"api-endpont": c.serverEndPoint, "client": c.clientEndpoint})
+			break loop
+		case <-stream.Context().Done():
+			logger.Debugw(ctx, "stream-context-done", log.Fields{"api-endpoint": c.serverEndPoint, "stream-info": stream.Context(), "client": c.clientEndpoint})
+			break loop
+		case <-keepAliveTimer.C:
+			continue
 		}
 	}
-	logger.Infow(ctx, "activity-monitor-stopping", log.Fields{"endpoint": c.serverEndPoint})
+	if stream != nil {
+		if err := stream.CloseSend(); err != nil {
+			logger.Warnw(ctx, "closing-stream-error", log.Fields{"error": err, "api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+		}
+	}
 }
 
 // Start kicks off the adapter agent by trying to connect to the adapter
-func (c *Client) Start(ctx context.Context, handler SetAndTestServiceHandler) {
+func (c *Client) Start(ctx context.Context, handler GetServiceClient) {
 	logger.Debugw(ctx, "Starting GRPC - Client", log.Fields{"api-endpoint": c.serverEndPoint})
 
 	// If the context contains a k8s probe then register services
@@ -340,8 +403,8 @@
 		p.RegisterService(ctx, c.serverEndPoint)
 	}
 
-	// Enable activity check
-	go c.monitorActivity(ctx, handler)
+	var monitorConnectionCtx context.Context
+	var monitorConnectionDone func()
 
 	initialConnection := true
 	c.events <- eventConnecting
@@ -351,38 +414,45 @@
 	for {
 		select {
 		case <-ctx.Done():
-			logger.Debugw(ctx, "context-closing", log.Fields{"endpoint": c.serverEndPoint})
-			break loop
+			logger.Warnw(ctx, "context-closing", log.Fields{"api_endpoint": c.serverEndPoint, "client": c.clientEndpoint, "context": ctx})
+			c.connectionLock.Lock()
+			if !c.done {
+				c.done = true
+				c.events <- eventStopped
+				close(c.events)
+			}
+			c.connectionLock.Unlock()
+			// break loop
 		case event := <-c.events:
-			logger.Debugw(ctx, "received-event", log.Fields{"event": event, "endpoint": c.serverEndPoint})
+			logger.Debugw(ctx, "received-event", log.Fields{"event": event, "api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 			c.connectionLock.RLock()
 			// On a client stopped, just allow the stop event to go through
 			if c.done && event != eventStopped {
 				c.connectionLock.RUnlock()
-				logger.Debugw(ctx, "ignoring-event-on-client-stop", log.Fields{"event": event, "endpoint": c.serverEndPoint})
+				logger.Debugw(ctx, "ignoring-event-on-client-stop", log.Fields{"event": event, "api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 				continue
 			}
 			c.connectionLock.RUnlock()
 			switch event {
 			case eventConnecting:
 				c.stateLock.Lock()
-				logger.Debugw(ctx, "connection-start", log.Fields{"endpoint": c.serverEndPoint, "attempts": attempt, "curr-state": c.state})
+				logger.Debugw(ctx, "connection-start", log.Fields{"api-endpoint": c.serverEndPoint, "attempts": attempt, "curr-state": c.state, "client": c.clientEndpoint})
 				if c.state == stateConnected {
 					c.state = stateDisconnected
 				}
 				if c.state != stateConnecting {
 					c.state = stateConnecting
 					go func() {
-						if err := c.connectToEndpoint(ctx, handler, p); err != nil {
+						if err := c.connectToEndpoint(ctx, p); err != nil {
 							c.stateLock.Lock()
 							c.state = stateDisconnected
 							c.stateLock.Unlock()
-							logger.Errorw(ctx, "connection-failed", log.Fields{"endpoint": c.serverEndPoint, "attempt": attempt, "error": err})
+							logger.Errorw(ctx, "connection-failed", log.Fields{"api-endpoint": c.serverEndPoint, "attempt": attempt, "client": c.clientEndpoint, "error": err})
 
 							// Retry connection after a delay
 							if err = backoff.Backoff(ctx); err != nil {
 								// Context has closed or reached maximum elapsed time, if set
-								logger.Errorw(ctx, "retry-aborted", log.Fields{"endpoint": c.serverEndPoint, "error": err})
+								logger.Errorw(ctx, "retry-aborted", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "error": err})
 								return
 							}
 							attempt += 1
@@ -391,29 +461,57 @@
 								c.events <- eventConnecting
 							}
 							c.connectionLock.RUnlock()
-						} else {
-							backoff.Reset()
 						}
 					}()
 				}
 				c.stateLock.Unlock()
 
+			case eventValidatingConnection:
+				logger.Debugw(ctx, "connection-validation", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+				c.stateLock.Lock()
+				if c.state != stateConnected {
+					c.state = stateValidatingConnection
+				}
+				c.stateLock.Unlock()
+				monitorConnectionCtx, monitorConnectionDone = context.WithCancel(context.Background())
+				go c.monitorConnection(monitorConnectionCtx)
+
 			case eventConnected:
 				attempt = 1
+				backoff.Reset()
 				c.stateLock.Lock()
-				logger.Debugw(ctx, "endpoint-connected", log.Fields{"endpoint": c.serverEndPoint, "curr-state": c.state})
+				logger.Debugw(ctx, "endpoint-connected", log.Fields{"api-endpoint": c.serverEndPoint, "curr-state": c.state, "client": c.clientEndpoint})
 				if c.state != stateConnected {
+					// Setup the service
+					c.connectionLock.RLock()
+					conn := c.connection
+					c.connectionLock.RUnlock()
+
+					subCtx, cancel := context.WithTimeout(ctx, c.backoffMaxInterval)
+					svc := handler(subCtx, conn)
+					if svc != nil {
+						c.service = svc
+						if p != nil {
+							p.UpdateStatus(ctx, c.serverEndPoint, probe.ServiceStatusRunning)
+						}
+						logger.Infow(ctx, "connected-to-endpoint", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+					} else {
+						// Should never happen, but just in case
+						logger.Warnw(ctx, "service-is-nil", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+						c.events <- eventDisconnected
+					}
+					cancel()
 					c.state = stateConnected
 					if initialConnection {
-						logger.Debugw(ctx, "initial-endpoint-connection", log.Fields{"endpoint": c.serverEndPoint})
+						logger.Debugw(ctx, "initial-endpoint-connection", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 						initialConnection = false
 					} else {
-						logger.Debugw(ctx, "endpoint-reconnection", log.Fields{"endpoint": c.serverEndPoint})
+						logger.Debugw(ctx, "endpoint-reconnection", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 						// Trigger any callback on a restart
 						go func() {
 							err := c.onRestart(log.WithSpanFromContext(context.Background(), ctx), c.serverEndPoint)
 							if err != nil {
-								logger.Errorw(ctx, "unable-to-restart-endpoint", log.Fields{"error": err, "endpoint": c.serverEndPoint})
+								logger.Errorw(ctx, "unable-to-restart-endpoint", log.Fields{"error": err, "api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 							}
 						}()
 					}
@@ -424,32 +522,64 @@
 				if p != nil {
 					p.UpdateStatus(ctx, c.serverEndPoint, probe.ServiceStatusNotReady)
 				}
-				c.stateLock.RLock()
-				logger.Debugw(ctx, "endpoint-disconnected", log.Fields{"endpoint": c.serverEndPoint, "curr-state": c.state})
-				c.stateLock.RUnlock()
+				connectionValidationFail := false
+				c.stateLock.Lock()
+				logger.Debugw(ctx, "endpoint-disconnected", log.Fields{"api-endpoint": c.serverEndPoint, "curr-state": c.state, "client": c.clientEndpoint})
+				if c.state == stateValidatingConnection {
+					connectionValidationFail = true
+					c.state = stateDisconnected
+				}
+				c.stateLock.Unlock()
 
-				// Try to connect again
-				c.events <- eventConnecting
+				// Stop the streaming connection
+				if monitorConnectionDone != nil {
+					monitorConnectionDone()
+					monitorConnectionDone = nil
+				}
+
+				if connectionValidationFail {
+					// Retry connection after a delay
+					if err := backoff.Backoff(ctx); err != nil {
+						// Context has closed or reached maximum elapsed time, if set
+						logger.Errorw(ctx, "retry-aborted", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "error": err})
+						return
+					}
+				}
+				c.connectionLock.RLock()
+				if !c.done {
+					c.events <- eventConnecting
+				}
+				c.connectionLock.RUnlock()
 
 			case eventStopped:
-				logger.Debugw(ctx, "endPoint-stopped", log.Fields{"adapter": c.serverEndPoint})
-				go func() {
-					if err := c.closeConnection(ctx, p); err != nil {
-						logger.Errorw(ctx, "endpoint-closing-connection-failed", log.Fields{"endpoint": c.serverEndPoint, "error": err})
-					}
-				}()
+				logger.Debugw(ctx, "endpoint-stopped", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+
+				if monitorConnectionDone != nil {
+					monitorConnectionDone()
+					monitorConnectionDone = nil
+				}
+				if err := c.closeConnection(ctx, p); err != nil {
+					logger.Errorw(ctx, "endpoint-closing-connection-failed", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "error": err})
+				}
 				break loop
 			case eventError:
-				logger.Errorw(ctx, "endpoint-error-event", log.Fields{"endpoint": c.serverEndPoint})
+				logger.Errorw(ctx, "endpoint-error-event", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 			default:
-				logger.Errorw(ctx, "endpoint-unknown-event", log.Fields{"endpoint": c.serverEndPoint, "error": event})
+				logger.Errorw(ctx, "endpoint-unknown-event", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "error": event})
 			}
 		}
 	}
-	logger.Infow(ctx, "endpoint-stopped", log.Fields{"endpoint": c.serverEndPoint})
+
+	// Stop the streaming connection
+	if monitorConnectionDone != nil {
+		logger.Debugw(ctx, "closing-connection-monitoring", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
+		monitorConnectionDone()
+	}
+
+	logger.Infow(ctx, "client-stopped", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 }
 
-func (c *Client) connectToEndpoint(ctx context.Context, handler SetAndTestServiceHandler, p *probe.Probe) error {
+func (c *Client) connectToEndpoint(ctx context.Context, p *probe.Probe) error {
 	if p != nil {
 		p.UpdateStatus(ctx, c.serverEndPoint, probe.ServiceStatusPreparing)
 	}
@@ -476,52 +606,34 @@
 		grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(
 			grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
 		)),
-		grpc.WithUnaryInterceptor(c.clientInterceptor),
-		// Set keealive parameter - use default grpc values
-		grpc.WithKeepaliveParams(keepalive.ClientParameters{
-			Time:                c.monitorInterval,
-			Timeout:             c.backoffMaxInterval,
-			PermitWithoutStream: true,
-		}),
 	)
 
 	if err == nil {
-		subCtx, cancel := context.WithTimeout(ctx, c.backoffMaxInterval)
-		defer cancel()
-		svc := handler(subCtx, conn, &common.Connection{Endpoint: c.clientEndpoint, KeepAliveInterval: int64(c.monitorInterval)})
-		if svc != nil {
-			c.connection = conn
-			c.service = svc
-			if p != nil {
-				p.UpdateStatus(ctx, c.serverEndPoint, probe.ServiceStatusRunning)
-			}
-			logger.Infow(ctx, "connected-to-endpoint", log.Fields{"endpoint": c.serverEndPoint})
-			c.events <- eventConnected
-			return nil
-		}
+		c.connection = conn
+		c.events <- eventValidatingConnection
+		return nil
+	} else {
+		logger.Warnw(ctx, "no-connection-to-endpoint", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint, "error": err})
 	}
-	logger.Warnw(ctx, "Failed to connect to endpoint",
-		log.Fields{
-			"endpoint": c.serverEndPoint,
-			"error":    err,
-		})
 
 	if p != nil {
 		p.UpdateStatus(ctx, c.serverEndPoint, probe.ServiceStatusFailed)
 	}
-	return fmt.Errorf("no connection to endpoint %s", c.serverEndPoint)
+	return fmt.Errorf("no connection to api endpoint %s", c.serverEndPoint)
 }
 
 func (c *Client) closeConnection(ctx context.Context, p *probe.Probe) error {
 	if p != nil {
 		p.UpdateStatus(ctx, c.serverEndPoint, probe.ServiceStatusStopped)
 	}
+	logger.Infow(ctx, "client-closing-connection", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 
 	c.connectionLock.Lock()
 	defer c.connectionLock.Unlock()
 
 	if c.connection != nil {
 		err := c.connection.Close()
+		c.service = nil
 		c.connection = nil
 		return err
 	}
@@ -530,6 +642,7 @@
 }
 
 func (c *Client) Stop(ctx context.Context) {
+	logger.Infow(ctx, "client-stop-request-event-received", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 	c.connectionLock.Lock()
 	defer c.connectionLock.Unlock()
 	if !c.done {
@@ -537,7 +650,7 @@
 		c.events <- eventStopped
 		close(c.events)
 	}
-	logger.Infow(ctx, "client-stopped", log.Fields{"endpoint": c.serverEndPoint})
+	logger.Infow(ctx, "client-stop-request-event-sent", log.Fields{"api-endpoint": c.serverEndPoint, "client": c.clientEndpoint})
 }
 
 // SetService is used for testing only
@@ -548,5 +661,7 @@
 }
 
 func (c *Client) SubscribeForLiveness(callback func(timestamp time.Time)) {
+	c.livenessLock.Lock()
+	defer c.livenessLock.Unlock()
 	c.livenessCallback = callback
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go
index 22becce..8365956 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go
@@ -17,18 +17,36 @@
 
 import (
 	"context"
+	"fmt"
 	"strconv"
 	"time"
 
 	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 	"github.com/opencord/voltha-protos/v5/go/common"
 	ca "github.com/opencord/voltha-protos/v5/go/core_adapter"
+	"github.com/opencord/voltha-protos/v5/go/core_service"
 	"github.com/opencord/voltha-protos/v5/go/health"
 	"github.com/opencord/voltha-protos/v5/go/voltha"
 )
 
 //MockCoreServiceHandler implements the methods in the core service
-type MockCoreServiceHandler struct{}
+type MockCoreServiceHandler struct {
+	exitChannel chan struct{}
+}
+
+func NewMockCoreServiceHandler() *MockCoreServiceHandler {
+	return &MockCoreServiceHandler{exitChannel: make(chan struct{})}
+}
+
+func (handler *MockCoreServiceHandler) Start() {
+	logger.Debug(context.Background(), "starting-mock-core-service")
+}
+
+func (handler *MockCoreServiceHandler) Stop() {
+	logger.Debug(context.Background(), "stopping-mock-core-service")
+	close(handler.exitChannel)
+}
 
 func (handler *MockCoreServiceHandler) RegisterAdapter(ctx context.Context, reg *ca.AdapterRegistration) (*empty.Empty, error) {
 	//logger.Debugw(ctx, "registration-received", log.Fields{"input": reg})
@@ -131,6 +149,41 @@
 	return &empty.Empty{}, nil
 }
 
-func (handler *MockCoreServiceHandler) GetHealthStatus(ctx context.Context, conn *common.Connection) (*health.HealthStatus, error) {
-	return &health.HealthStatus{State: health.HealthStatus_HEALTHY}, nil
+func (handler *MockCoreServiceHandler) GetHealthStatus(stream core_service.CoreService_GetHealthStatusServer) error {
+	logger.Debugw(context.Background(), "keep-alive-connection", log.Fields{"stream": stream})
+	if stream == nil {
+		return fmt.Errorf("stream-is-nil %v", stream)
+	}
+	var err error
+	var remoteClient *common.Connection
+	var tempClient *common.Connection
+	ctx := context.Background()
+loop:
+	for {
+		tempClient, err = stream.Recv()
+		if err != nil {
+			logger.Warnw(ctx, "received-stream-error", log.Fields{"remote-client": remoteClient, "error": err})
+			break loop
+		}
+		// Send a response back
+		err = stream.Send(&health.HealthStatus{State: health.HealthStatus_HEALTHY})
+		if err != nil {
+			logger.Warnw(ctx, "sending-stream-error", log.Fields{"remote-client": remoteClient, "error": err})
+			break loop
+		}
+
+		remoteClient = tempClient
+		logger.Debugw(ctx, "received-keep-alive", log.Fields{"remote-client": remoteClient})
+		select {
+		case <-stream.Context().Done():
+			logger.Infow(ctx, "stream-keep-alive-context-done", log.Fields{"remote-client": remoteClient, "error": stream.Context().Err()})
+			break loop
+		case <-handler.exitChannel:
+			logger.Warnw(ctx, "received-stop", log.Fields{"remote-client": remoteClient})
+			break loop
+		default:
+		}
+	}
+	logger.Errorw(context.Background(), "connection-down", log.Fields{"remote-client": remoteClient, "error": err})
+	return err
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
index 84a2d5f..7ba1a57 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -275,6 +275,8 @@
 }
 
 func (p *Probe) IsReady() bool {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
 	return p.isReady
 }
 
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/adapter_service/adapter_service.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/adapter_service/adapter_service.pb.go
index e8aa01c..31f5673 100644
--- a/vendor/github.com/opencord/voltha-protos/v5/go/adapter_service/adapter_service.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/adapter_service/adapter_service.pb.go
@@ -36,60 +36,61 @@
 }
 
 var fileDescriptor_038e6ec340f67698 = []byte{
-	// 846 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x6f, 0x6f, 0xdb, 0x36,
-	0x10, 0xc6, 0xb1, 0x0d, 0xd8, 0x8b, 0x4b, 0x6c, 0x27, 0x6c, 0x96, 0x0e, 0x0a, 0x56, 0x74, 0xed,
-	0xeb, 0xca, 0x40, 0x8b, 0xb6, 0x1b, 0x8a, 0x01, 0xf3, 0xbf, 0xba, 0xde, 0x5a, 0x38, 0xb0, 0xe6,
-	0x62, 0xd8, 0x9b, 0x80, 0x96, 0xcf, 0x32, 0x51, 0x8a, 0xd4, 0xc8, 0x93, 0xd3, 0x7e, 0x83, 0x7d,
-	0x80, 0x7d, 0xe0, 0x42, 0x12, 0xe5, 0x58, 0xaa, 0xdd, 0xd6, 0x49, 0xde, 0x59, 0xf7, 0xf0, 0xf9,
-	0xf1, 0x78, 0x47, 0x93, 0x84, 0x87, 0x2b, 0x2d, 0x69, 0xc9, 0x2f, 0x12, 0xa3, 0x49, 0xdb, 0x36,
-	0x9f, 0xf3, 0x84, 0xd0, 0x5c, 0x58, 0x34, 0x2b, 0x11, 0xa2, 0x9f, 0x87, 0x59, 0xab, 0x16, 0xf6,
-	0xce, 0x22, 0xad, 0x23, 0x89, 0xed, 0x5c, 0x9e, 0xa5, 0x8b, 0x36, 0xc6, 0x09, 0x7d, 0x28, 0x46,
-	0x7b, 0x5e, 0x15, 0x19, 0xea, 0x38, 0xd6, 0xca, 0x69, 0xf7, 0xeb, 0x9a, 0xc1, 0x0b, 0x07, 0xdf,
-	0xee, 0x9e, 0xe3, 0x55, 0x1e, 0xde, 0xbd, 0xaa, 0x86, 0xef, 0x09, 0x95, 0x15, 0x5a, 0xd9, 0xed,
-	0xde, 0x25, 0x72, 0x49, 0xcb, 0xed, 0x5a, 0xf1, 0xe5, 0xb4, 0x9f, 0xaa, 0x9a, 0x8e, 0x43, 0x71,
-	0x41, 0x68, 0x69, 0xbb, 0x15, 0x57, 0xa8, 0xc8, 0x4d, 0xf9, 0xf8, 0xff, 0x3b, 0xd0, 0xec, 0x14,
-	0x0b, 0x08, 0x8a, 0xe2, 0xb0, 0x17, 0xd0, 0x1a, 0x22, 0xbd, 0xca, 0x27, 0x0f, 0x88, 0x53, 0x6a,
-	0x19, 0xf3, 0x5d, 0x15, 0x7a, 0x5a, 0x29, 0x0c, 0x49, 0x68, 0xe5, 0x9d, 0xf8, 0x2e, 0xbf, 0xca,
-	0xc8, 0xa7, 0x70, 0xd0, 0x99, 0xeb, 0x84, 0xfa, 0xf9, 0xba, 0x59, 0xd3, 0x77, 0x05, 0x28, 0xbe,
-	0xbd, 0x53, 0xbf, 0xa8, 0xbc, 0x5f, 0x56, 0xde, 0x1f, 0x64, 0x95, 0x67, 0xbf, 0x42, 0x6b, 0x82,
-	0xa1, 0x56, 0xa1, 0x90, 0xb8, 0xa7, 0xf5, 0x19, 0x1c, 0xf6, 0x51, 0x22, 0xed, 0xeb, 0x7b, 0x0e,
-	0x8d, 0xbe, 0xb0, 0x7c, 0xb6, 0xf7, 0x84, 0xbf, 0x40, 0x73, 0x82, 0x03, 0x75, 0x0d, 0xe7, 0x33,
-	0x38, 0x9c, 0xe0, 0x4c, 0x6b, 0xda, 0x7f, 0xc6, 0x00, 0xe5, 0xe2, 0x2f, 0xb4, 0xfb, 0x3a, 0xbb,
-	0x70, 0x34, 0x44, 0x1a, 0x2f, 0x92, 0x62, 0xdc, 0x48, 0x2d, 0xf4, 0x27, 0xde, 0x7b, 0x7e, 0x65,
-	0x1b, 0x07, 0x97, 0x82, 0xc2, 0x65, 0x8f, 0x27, 0x7c, 0x26, 0xa4, 0x28, 0x7a, 0xd3, 0x5b, 0x0a,
-	0x39, 0x2f, 0x86, 0xbf, 0xd6, 0x96, 0xbe, 0x7a, 0xfa, 0xc7, 0x00, 0x45, 0xa1, 0xce, 0xb5, 0x21,
-	0x76, 0x58, 0xba, 0xb2, 0xaf, 0x9d, 0x9e, 0x27, 0x70, 0xe0, 0xfa, 0xb2, 0x87, 0xa9, 0x0b, 0xad,
-	0x69, 0x32, 0xe7, 0x84, 0x2f, 0xa5, 0xbe, 0xb4, 0xdd, 0x54, 0xbe, 0x63, 0x77, 0xab, 0xcb, 0xca,
-	0x62, 0xb9, 0xb8, 0x93, 0x31, 0x81, 0x1f, 0x37, 0x18, 0x23, 0x15, 0x1a, 0x8c, 0x51, 0x11, 0x97,
-	0xf2, 0x03, 0xab, 0xd5, 0x68, 0x43, 0xfc, 0x3c, 0xf3, 0x77, 0x68, 0x04, 0xa8, 0xe6, 0xe7, 0x3c,
-	0x7c, 0x87, 0x34, 0x4e, 0xa9, 0x9e, 0xd5, 0x5a, 0xd8, 0x49, 0x18, 0x40, 0xb3, 0xc8, 0xea, 0x3c,
-	0xee, 0x69, 0xb5, 0x10, 0x11, 0x3b, 0xab, 0x21, 0x5c, 0xdc, 0x66, 0xcd, 0xdd, 0x89, 0x09, 0xe0,
-	0xa8, 0xaf, 0x2f, 0x95, 0xd4, 0x7c, 0x3e, 0x56, 0xe9, 0x28, 0xe6, 0x11, 0xb2, 0x07, 0xd5, 0x2e,
-	0xe6, 0xc1, 0x72, 0xd0, 0x04, 0xff, 0x4d, 0xd1, 0x92, 0x77, 0xb6, 0x65, 0xcc, 0x04, 0x6d, 0xa2,
-	0x95, 0x45, 0xf6, 0x1a, 0x8e, 0xb3, 0xdd, 0xe5, 0x78, 0xee, 0x04, 0xf0, 0xb6, 0x3a, 0xbe, 0x82,
-	0x36, 0x86, 0x93, 0xce, 0x4c, 0x9b, 0x35, 0x6f, 0x9a, 0x44, 0x86, 0xcf, 0xf1, 0xfa, 0xc0, 0x47,
-	0x70, 0xb8, 0x91, 0x9e, 0x65, 0x50, 0x9e, 0x62, 0xa3, 0xbe, 0x77, 0x5c, 0x1a, 0xaf, 0xe4, 0x3f,
-	0xe1, 0xa8, 0x13, 0x92, 0x58, 0x71, 0xc2, 0x75, 0x89, 0xae, 0x3d, 0xf7, 0x08, 0x9a, 0x3d, 0x1d,
-	0xc7, 0x82, 0x6e, 0x8e, 0x1a, 0x43, 0xa3, 0xec, 0x4a, 0xd9, 0xb7, 0xea, 0x66, 0xdc, 0xec, 0xdb,
-	0x1b, 0xb4, 0x96, 0x47, 0xe8, 0xfd, 0x50, 0x12, 0x2b, 0xea, 0x83, 0xef, 0xfe, 0xfb, 0xf6, 0x1b,
-	0xf6, 0x37, 0x9c, 0x0e, 0x91, 0x2a, 0x82, 0xeb, 0xdd, 0x4d, 0xc9, 0x53, 0xb8, 0xd3, 0xe3, 0x2a,
-	0x44, 0x59, 0xd1, 0x6e, 0x03, 0x5b, 0x76, 0xc6, 0xed, 0x8c, 0xec, 0x0f, 0x71, 0x63, 0x6c, 0x00,
-	0xc7, 0x13, 0x5c, 0xa1, 0xa1, 0xdb, 0x84, 0xbe, 0x80, 0x46, 0x40, 0xdc, 0xd0, 0x38, 0x0e, 0x45,
-	0x76, 0x60, 0xb3, 0xd3, 0x2a, 0x70, 0xfc, 0xa6, 0x37, 0xca, 0xe2, 0x1e, 0xf3, 0xb3, 0x7b, 0xda,
-	0xcf, 0x7e, 0xaf, 0x5b, 0xfd, 0x07, 0x34, 0x02, 0x11, 0xa7, 0x92, 0x13, 0x76, 0x24, 0x37, 0x71,
-	0x3d, 0x9b, 0x8a, 0x78, 0x95, 0x8d, 0xdb, 0xd6, 0xe3, 0x04, 0x0d, 0xcf, 0xee, 0xe6, 0x8c, 0x97,
-	0x27, 0x92, 0x26, 0x89, 0x41, 0x6b, 0x07, 0xd9, 0x8d, 0xcf, 0x98, 0x9f, 0xdf, 0xfc, 0x7e, 0xfe,
-	0xf5, 0x52, 0x48, 0x42, 0xb3, 0xf3, 0xb8, 0xf8, 0x0d, 0x5a, 0x53, 0x75, 0x7d, 0xfb, 0x2b, 0x38,
-	0x18, 0x22, 0x0d, 0xde, 0xd3, 0x5b, 0x2e, 0x53, 0x64, 0xf7, 0xab, 0xab, 0xd8, 0x90, 0xca, 0x35,
-	0xdc, 0xf5, 0xd7, 0x8f, 0x21, 0x7f, 0x82, 0x94, 0x1a, 0x95, 0xcb, 0x96, 0x0d, 0xe1, 0x20, 0xd8,
-	0x4d, 0x0a, 0x3e, 0x25, 0xed, 0x4a, 0x69, 0x0a, 0xcd, 0x21, 0x52, 0x20, 0x54, 0x24, 0xb1, 0x64,
-	0x5d, 0xcd, 0x59, 0xc4, 0x87, 0x58, 0xb0, 0xca, 0xbf, 0xe5, 0xcf, 0x9f, 0x19, 0xe1, 0x3a, 0x36,
-	0xcd, 0xae, 0xe6, 0x2f, 0x60, 0x83, 0x2f, 0x62, 0x83, 0x1a, 0xb6, 0x4b, 0xf0, 0x50, 0x9b, 0xc8,
-	0xd7, 0x09, 0xaa, 0x50, 0x9b, 0xb9, 0xef, 0x9e, 0x7b, 0xb5, 0x77, 0x6c, 0xf7, 0xe4, 0x6d, 0x1e,
-	0xaf, 0x3e, 0xe0, 0xfe, 0x79, 0x1e, 0x09, 0x5a, 0xa6, 0xb3, 0x6c, 0x5b, 0xb4, 0x4b, 0x82, 0x7b,
-	0x30, 0x3e, 0x2a, 0x9f, 0x8f, 0x4f, 0xdb, 0x91, 0xae, 0xbf, 0x96, 0x67, 0xdf, 0xe7, 0xea, 0x93,
-	0x8f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x51, 0x44, 0xf1, 0x55, 0x0b, 0x00, 0x00,
+	// 853 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xdf, 0x6e, 0xdb, 0x36,
+	0x14, 0xc6, 0x91, 0x0d, 0xd8, 0xc5, 0x49, 0x6c, 0x27, 0x6c, 0x96, 0x0e, 0x0a, 0x56, 0x74, 0xed,
+	0xcd, 0x6e, 0x2a, 0x0f, 0x2d, 0xda, 0x6e, 0x18, 0x86, 0xcd, 0xff, 0xea, 0x7a, 0x6b, 0xe1, 0xc0,
+	0x9a, 0x8b, 0x61, 0x37, 0x01, 0x2d, 0x1d, 0xcb, 0x44, 0x29, 0x52, 0x13, 0x8f, 0x9c, 0xf6, 0x0d,
+	0xf6, 0x08, 0x7b, 0xdc, 0x41, 0x12, 0xe5, 0x58, 0xaa, 0xdd, 0xd6, 0x4e, 0xee, 0xac, 0xf3, 0xf1,
+	0xfb, 0xf1, 0xf0, 0x1c, 0x9a, 0x24, 0x3c, 0x5c, 0x6a, 0x49, 0x0b, 0x7e, 0x19, 0x27, 0x9a, 0xb4,
+	0x69, 0xf3, 0x80, 0xc7, 0x84, 0xc9, 0xa5, 0xc1, 0x64, 0x29, 0x7c, 0x74, 0xf3, 0x30, 0x6b, 0xd5,
+	0xc2, 0xce, 0x79, 0xa8, 0x75, 0x28, 0xb1, 0x9d, 0xcb, 0xb3, 0x74, 0xde, 0xc6, 0x28, 0xa6, 0xf7,
+	0xc5, 0x68, 0xc7, 0xa9, 0x22, 0x7d, 0x1d, 0x45, 0x5a, 0x59, 0xed, 0x7e, 0x5d, 0x4b, 0xf0, 0xd2,
+	0xc2, 0x37, 0xbb, 0x03, 0xbc, 0xce, 0xc3, 0xb9, 0x57, 0xd5, 0xf0, 0x1d, 0xa1, 0x32, 0x42, 0x2b,
+	0xb3, 0xd9, 0xbb, 0x40, 0x2e, 0x69, 0xb1, 0x59, 0x2b, 0xbe, 0xac, 0xf6, 0x6d, 0x55, 0xd3, 0x91,
+	0x2f, 0x2e, 0x09, 0x0d, 0x6d, 0xb6, 0xe2, 0x12, 0x15, 0xd9, 0x29, 0x1f, 0xff, 0x77, 0x07, 0x9a,
+	0x9d, 0x62, 0x01, 0x5e, 0x51, 0x1c, 0xf6, 0x2b, 0xb4, 0x86, 0x48, 0x2f, 0xf3, 0xc9, 0x3d, 0xe2,
+	0x94, 0x1a, 0xc6, 0x5c, 0x5b, 0x85, 0x9e, 0x56, 0x0a, 0x7d, 0x12, 0x5a, 0x39, 0xa7, 0xae, 0xcd,
+	0x6f, 0x7d, 0xe4, 0xf7, 0x07, 0x3f, 0x1c, 0xb0, 0xa7, 0x70, 0xd8, 0x09, 0x74, 0x4c, 0xfd, 0x7c,
+	0xed, 0xac, 0xe9, 0xda, 0x22, 0x14, 0xdf, 0xce, 0x99, 0x5b, 0x54, 0xdf, 0x2d, 0xab, 0xef, 0x0e,
+	0xb2, 0xea, 0xb3, 0x9f, 0xa0, 0x35, 0x41, 0x5f, 0x2b, 0x5f, 0x48, 0xdc, 0xd1, 0xfa, 0x0c, 0x8e,
+	0xfa, 0x28, 0x91, 0x76, 0xf5, 0x3d, 0x87, 0x46, 0x5f, 0x18, 0x3e, 0xdb, 0x79, 0xc2, 0x1f, 0xa1,
+	0x39, 0xc1, 0x81, 0xda, 0xc3, 0xf9, 0x0c, 0x8e, 0x26, 0x38, 0xd3, 0x9a, 0x76, 0x9f, 0xd1, 0x43,
+	0x39, 0xff, 0x13, 0xcd, 0xae, 0xce, 0x2e, 0x1c, 0x0f, 0x91, 0xc6, 0xf3, 0xb8, 0x18, 0x37, 0x52,
+	0x73, 0xfd, 0x81, 0xf7, 0x9e, 0x5b, 0xd9, 0xca, 0xde, 0x95, 0x20, 0x7f, 0xd1, 0xe3, 0x31, 0x9f,
+	0x09, 0x29, 0x8a, 0xde, 0xf4, 0x16, 0x42, 0x06, 0xc5, 0xf0, 0x57, 0xda, 0xd0, 0x67, 0x4f, 0xff,
+	0x18, 0xa0, 0x28, 0xd4, 0x85, 0x4e, 0x88, 0x1d, 0x95, 0xae, 0xec, 0x6b, 0xab, 0xe7, 0x09, 0x1c,
+	0xda, 0xbe, 0xec, 0x60, 0xea, 0x42, 0x6b, 0x1a, 0x07, 0x9c, 0xf0, 0x85, 0xd4, 0x57, 0xa6, 0x9b,
+	0xca, 0xb7, 0xec, 0x6e, 0x75, 0x59, 0x59, 0x2c, 0x17, 0xb7, 0x32, 0x26, 0xf0, 0xcd, 0x1a, 0x63,
+	0xa4, 0xfc, 0x04, 0x23, 0x54, 0xc4, 0xa5, 0x7c, 0xcf, 0x6a, 0x35, 0x5a, 0x13, 0x3f, 0xce, 0xfc,
+	0x0d, 0x1a, 0x1e, 0xaa, 0xe0, 0x82, 0xfb, 0x6f, 0x91, 0xc6, 0x29, 0xd5, 0xb3, 0x5a, 0x09, 0x5b,
+	0x09, 0x03, 0x68, 0x16, 0x59, 0x5d, 0x44, 0x3d, 0xad, 0xe6, 0x22, 0x64, 0xe7, 0x35, 0x84, 0x8d,
+	0x9b, 0xac, 0xb9, 0x5b, 0x31, 0x1e, 0x1c, 0xf7, 0xf5, 0x95, 0x92, 0x9a, 0x07, 0x63, 0x95, 0x8e,
+	0x22, 0x1e, 0x22, 0x7b, 0x50, 0xed, 0x62, 0x1e, 0x2c, 0x07, 0x4d, 0xf0, 0x9f, 0x14, 0x0d, 0x39,
+	0xe7, 0x1b, 0xc6, 0x4c, 0xd0, 0xc4, 0x5a, 0x19, 0x64, 0xaf, 0xe0, 0x24, 0xdb, 0x5d, 0x96, 0x67,
+	0xcf, 0x0b, 0x67, 0xa3, 0xe3, 0x33, 0x68, 0x63, 0x38, 0xed, 0xcc, 0x74, 0xb2, 0xe2, 0x4d, 0xe3,
+	0x30, 0xe1, 0x01, 0xee, 0x0f, 0x7c, 0x04, 0x47, 0x6b, 0xe9, 0x19, 0x06, 0xe5, 0x49, 0x36, 0xea,
+	0x3b, 0x27, 0xa5, 0xf1, 0x5a, 0xfe, 0x03, 0x8e, 0x3b, 0x3e, 0x89, 0x25, 0x27, 0x5c, 0x95, 0x68,
+	0xef, 0xb9, 0x47, 0xd0, 0xec, 0xe9, 0x28, 0x12, 0x74, 0x73, 0xd4, 0x18, 0x1a, 0x65, 0x57, 0xca,
+	0xbe, 0x55, 0x37, 0xe3, 0x7a, 0xdf, 0x5e, 0xa3, 0x31, 0x3c, 0x44, 0xe7, 0xeb, 0x92, 0x58, 0x51,
+	0x1f, 0x7c, 0xf9, 0xef, 0x17, 0x07, 0xec, 0x2f, 0x38, 0x1b, 0x22, 0x55, 0x04, 0xdb, 0xbb, 0x9b,
+	0x92, 0xa7, 0x70, 0xa7, 0xc7, 0x95, 0x8f, 0xb2, 0xa2, 0xdd, 0x06, 0xb6, 0xec, 0x8c, 0xdd, 0x19,
+	0xd9, 0x1f, 0xe2, 0xc6, 0x58, 0x0f, 0x4e, 0x26, 0xb8, 0xc4, 0x84, 0x6e, 0x13, 0xfa, 0x33, 0x34,
+	0x3c, 0xe2, 0x09, 0x8d, 0x23, 0x5f, 0x64, 0x07, 0x36, 0x3b, 0xab, 0x02, 0xc7, 0xaf, 0x7b, 0xa3,
+	0x2c, 0xee, 0x30, 0x37, 0xbb, 0xab, 0xdd, 0xec, 0xf7, 0xaa, 0xd5, 0xbf, 0x43, 0xc3, 0x13, 0x51,
+	0x2a, 0x39, 0x61, 0x47, 0xf2, 0x24, 0xaa, 0x67, 0x53, 0x11, 0xaf, 0xb3, 0xb1, 0xdb, 0x7a, 0x1c,
+	0x63, 0xc2, 0xb3, 0xfb, 0x39, 0xe3, 0xe5, 0x89, 0xa4, 0x71, 0x9c, 0xa0, 0x31, 0x83, 0xec, 0xd6,
+	0x67, 0xcc, 0xcd, 0x6f, 0x7f, 0x37, 0xff, 0x7a, 0x21, 0x24, 0x61, 0xb2, 0xf5, 0xb8, 0xf8, 0x05,
+	0x5a, 0x53, 0xb5, 0xbf, 0xfd, 0x25, 0x1c, 0x0e, 0x91, 0x06, 0xef, 0xe8, 0x0d, 0x97, 0x29, 0xb2,
+	0xfb, 0xd5, 0x55, 0xac, 0x49, 0xe5, 0x1a, 0xee, 0xba, 0xab, 0x07, 0x91, 0x3b, 0x41, 0x4a, 0x13,
+	0x95, 0xcb, 0x86, 0x0d, 0xe1, 0xd0, 0xdb, 0x4e, 0xf2, 0x3e, 0x24, 0x6d, 0x4b, 0x69, 0x0a, 0xcd,
+	0x21, 0x92, 0x27, 0x54, 0x28, 0xb1, 0x64, 0x5d, 0xcf, 0x59, 0xc4, 0x87, 0x58, 0xb0, 0xca, 0xbf,
+	0xe5, 0x77, 0x1f, 0x19, 0x61, 0x3b, 0x36, 0xcd, 0xae, 0xe6, 0x4f, 0x60, 0xbd, 0x4f, 0x62, 0xbd,
+	0x1a, 0xb6, 0x4b, 0xf0, 0x50, 0x27, 0xa1, 0xab, 0x63, 0x54, 0xbe, 0x4e, 0x02, 0xd7, 0x3e, 0xf9,
+	0x6a, 0x6f, 0xd9, 0xee, 0xe9, 0x9b, 0x3c, 0x5e, 0x7d, 0xc4, 0xfd, 0xfd, 0x3c, 0x14, 0xb4, 0x48,
+	0x67, 0xd9, 0xb6, 0x68, 0x97, 0x04, 0xfb, 0x68, 0x7c, 0x54, 0x3e, 0x21, 0x9f, 0xb6, 0x43, 0x5d,
+	0x7f, 0x31, 0xcf, 0xbe, 0xca, 0xd5, 0x27, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x54, 0xa5,
+	0xf6, 0x59, 0x0b, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -106,7 +107,7 @@
 type AdapterServiceClient interface {
 	// GetHealthStatus is used by an AdapterService client to verify connectivity
 	// to the gRPC server hosting the AdapterService service
-	GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error)
+	GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (AdapterService_GetHealthStatusClient, error)
 	// Device
 	AdoptDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
 	ReconcileDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
@@ -161,13 +162,35 @@
 	return &adapterServiceClient{cc}
 }
 
-func (c *adapterServiceClient) GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error) {
-	out := new(health.HealthStatus)
-	err := c.cc.Invoke(ctx, "/adapter_service.AdapterService/GetHealthStatus", in, out, opts...)
+func (c *adapterServiceClient) GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (AdapterService_GetHealthStatusClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_AdapterService_serviceDesc.Streams[0], "/adapter_service.AdapterService/GetHealthStatus", opts...)
 	if err != nil {
 		return nil, err
 	}
-	return out, nil
+	x := &adapterServiceGetHealthStatusClient{stream}
+	return x, nil
+}
+
+type AdapterService_GetHealthStatusClient interface {
+	Send(*common.Connection) error
+	Recv() (*health.HealthStatus, error)
+	grpc.ClientStream
+}
+
+type adapterServiceGetHealthStatusClient struct {
+	grpc.ClientStream
+}
+
+func (x *adapterServiceGetHealthStatusClient) Send(m *common.Connection) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *adapterServiceGetHealthStatusClient) Recv() (*health.HealthStatus, error) {
+	m := new(health.HealthStatus)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
 }
 
 func (c *adapterServiceClient) AdoptDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
@@ -485,7 +508,7 @@
 type AdapterServiceServer interface {
 	// GetHealthStatus is used by an AdapterService client to verify connectivity
 	// to the gRPC server hosting the AdapterService service
-	GetHealthStatus(context.Context, *common.Connection) (*health.HealthStatus, error)
+	GetHealthStatus(AdapterService_GetHealthStatusServer) error
 	// Device
 	AdoptDevice(context.Context, *voltha.Device) (*empty.Empty, error)
 	ReconcileDevice(context.Context, *voltha.Device) (*empty.Empty, error)
@@ -536,8 +559,8 @@
 type UnimplementedAdapterServiceServer struct {
 }
 
-func (*UnimplementedAdapterServiceServer) GetHealthStatus(ctx context.Context, req *common.Connection) (*health.HealthStatus, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+func (*UnimplementedAdapterServiceServer) GetHealthStatus(srv AdapterService_GetHealthStatusServer) error {
+	return status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
 }
 func (*UnimplementedAdapterServiceServer) AdoptDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method AdoptDevice not implemented")
@@ -646,22 +669,30 @@
 	s.RegisterService(&_AdapterService_serviceDesc, srv)
 }
 
-func _AdapterService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(common.Connection)
-	if err := dec(in); err != nil {
+func _AdapterService_GetHealthStatus_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(AdapterServiceServer).GetHealthStatus(&adapterServiceGetHealthStatusServer{stream})
+}
+
+type AdapterService_GetHealthStatusServer interface {
+	Send(*health.HealthStatus) error
+	Recv() (*common.Connection, error)
+	grpc.ServerStream
+}
+
+type adapterServiceGetHealthStatusServer struct {
+	grpc.ServerStream
+}
+
+func (x *adapterServiceGetHealthStatusServer) Send(m *health.HealthStatus) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *adapterServiceGetHealthStatusServer) Recv() (*common.Connection, error) {
+	m := new(common.Connection)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
 		return nil, err
 	}
-	if interceptor == nil {
-		return srv.(AdapterServiceServer).GetHealthStatus(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/adapter_service.AdapterService/GetHealthStatus",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AdapterServiceServer).GetHealthStatus(ctx, req.(*common.Connection))
-	}
-	return interceptor(ctx, in, info, handler)
+	return m, nil
 }
 
 func _AdapterService_AdoptDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
@@ -1281,10 +1312,6 @@
 	HandlerType: (*AdapterServiceServer)(nil),
 	Methods: []grpc.MethodDesc{
 		{
-			MethodName: "GetHealthStatus",
-			Handler:    _AdapterService_GetHealthStatus_Handler,
-		},
-		{
 			MethodName: "AdoptDevice",
 			Handler:    _AdapterService_AdoptDevice_Handler,
 		},
@@ -1421,6 +1448,13 @@
 			Handler:    _AdapterService_SetSingleValue_Handler,
 		},
 	},
-	Streams:  []grpc.StreamDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "GetHealthStatus",
+			Handler:       _AdapterService_GetHealthStatus_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
 	Metadata: "voltha_protos/adapter_service.proto",
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/core_service/core_services.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/core_service/core_services.pb.go
index 5816ad2..265d0c8 100644
--- a/vendor/github.com/opencord/voltha-protos/v5/go/core_service/core_services.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/core_service/core_services.pb.go
@@ -32,46 +32,47 @@
 func init() { proto.RegisterFile("voltha_protos/core_services.proto", fileDescriptor_979c43850713f141) }
 
 var fileDescriptor_979c43850713f141 = []byte{
-	// 619 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xdf, 0x4f, 0x14, 0x31,
-	0x10, 0x7e, 0x33, 0x3a, 0x1c, 0x1c, 0x94, 0x1f, 0x9a, 0x25, 0x28, 0x3c, 0xf9, 0xa2, 0xdd, 0x44,
-	0x10, 0x13, 0x49, 0x4c, 0x8e, 0x5b, 0x3d, 0x2e, 0x01, 0x43, 0x20, 0x6a, 0xe2, 0x0b, 0x29, 0xbb,
-	0xc3, 0xee, 0x86, 0xbd, 0xf6, 0xd2, 0x96, 0x53, 0xfe, 0x74, 0xdf, 0xcc, 0x6e, 0xbb, 0x97, 0x76,
-	0xb9, 0x46, 0x1e, 0x7c, 0x9d, 0x99, 0xef, 0xfb, 0x66, 0xbe, 0xe9, 0xa4, 0xb0, 0x37, 0x13, 0x95,
-	0x2e, 0xd8, 0xd5, 0x54, 0x0a, 0x2d, 0x54, 0x9c, 0x0a, 0x89, 0x57, 0x0a, 0xe5, 0xac, 0x4c, 0x51,
-	0xd1, 0x26, 0x48, 0x7a, 0x6e, 0x30, 0xda, 0xce, 0x85, 0xc8, 0x2b, 0x8c, 0x9b, 0xdc, 0xf5, 0xdd,
-	0x4d, 0x8c, 0x93, 0xa9, 0xbe, 0x37, 0xa5, 0xd1, 0xee, 0x02, 0x36, 0x96, 0xb1, 0xa9, 0x46, 0x69,
-	0x2b, 0xa2, 0x6e, 0xc5, 0x64, 0x22, 0xf8, 0xe2, 0x5c, 0x86, 0xb5, 0xe0, 0xe2, 0x5c, 0x81, 0xac,
-	0xd2, 0x85, 0xc9, 0xbd, 0xfb, 0xb3, 0x04, 0x4b, 0x43, 0x21, 0xf1, 0xd2, 0xb4, 0x48, 0x8e, 0xa0,
-	0x3f, 0x42, 0x7d, 0xd2, 0x94, 0x5c, 0x6a, 0xa6, 0xef, 0x14, 0x21, 0xd4, 0x2a, 0x0d, 0x05, 0xe7,
-	0x98, 0xea, 0x52, 0xf0, 0x68, 0x83, 0x5a, 0x16, 0xaf, 0xf2, 0x14, 0xfa, 0x17, 0x98, 0x97, 0x4a,
-	0xa3, 0x1c, 0x98, 0xce, 0xc9, 0x1e, 0xf5, 0x06, 0xb1, 0x61, 0x53, 0x25, 0x59, 0xc3, 0xb5, 0x45,
-	0x8d, 0x2d, 0xb4, 0xb5, 0x85, 0x7e, 0xae, 0x6d, 0x21, 0x87, 0xd0, 0x4b, 0x9a, 0x31, 0xbe, 0x4d,
-	0x33, 0xa6, 0x91, 0xac, 0x50, 0x3b, 0x95, 0x89, 0x06, 0x71, 0xfb, 0xb0, 0x74, 0x2e, 0xa4, 0x1e,
-	0x4a, 0x64, 0x1a, 0x33, 0xd2, 0x6b, 0x61, 0x75, 0x30, 0x08, 0x1a, 0xc3, 0x6a, 0x9d, 0x57, 0xf5,
-	0x24, 0xad, 0xe0, 0x8e, 0xdf, 0x7b, 0x9d, 0x6f, 0xd2, 0x5f, 0xca, 0x4a, 0xa3, 0x0c, 0x52, 0x1d,
-	0xc0, 0x4a, 0x82, 0x15, 0x6a, 0x1c, 0x54, 0x55, 0xc3, 0x49, 0xa0, 0x75, 0x70, 0x9c, 0x04, 0x51,
-	0x1f, 0x60, 0x79, 0x84, 0xda, 0x8c, 0x56, 0xa3, 0xc8, 0x8b, 0x87, 0xea, 0x56, 0xd8, 0x9b, 0x88,
-	0xbc, 0x81, 0xfe, 0x69, 0xa9, 0x1c, 0xa4, 0xaf, 0xb7, 0xec, 0x16, 0xd7, 0x2b, 0x5a, 0x33, 0x95,
-	0xee, 0xa0, 0xaf, 0x7c, 0x29, 0xa7, 0xe0, 0x1f, 0xa3, 0x0e, 0x60, 0xc3, 0xea, 0x9e, 0x0d, 0x05,
-	0xbf, 0x29, 0x73, 0x4b, 0xb8, 0x36, 0x17, 0x9d, 0x98, 0xb8, 0x0a, 0x52, 0x24, 0xb0, 0x3e, 0x2c,
-	0xca, 0x2a, 0x33, 0x3c, 0x09, 0x6a, 0x4c, 0xeb, 0xad, 0xed, 0x2c, 0x6a, 0x29, 0x29, 0x55, 0x2a,
-	0x66, 0x28, 0xef, 0xa3, 0xce, 0x5b, 0x20, 0x87, 0xb0, 0xea, 0xb0, 0xa8, 0x53, 0xa1, 0xf4, 0xa3,
-	0x5c, 0xff, 0x08, 0x1b, 0x2e, 0x6e, 0x2e, 0xff, 0x18, 0xec, 0x6b, 0x78, 0x36, 0xdf, 0x98, 0x07,
-	0xe8, 0x36, 0x37, 0x80, 0x95, 0x11, 0x6a, 0x47, 0xa7, 0x6b, 0xb8, 0x93, 0xb2, 0x86, 0x77, 0x29,
-	0x68, 0x73, 0x96, 0x6e, 0xab, 0x9e, 0x62, 0xdf, 0x2f, 0x57, 0xe4, 0x13, 0xf4, 0x2e, 0x91, 0x67,
-	0xe7, 0x2c, 0xbd, 0x45, 0x3d, 0xe6, 0x64, 0xab, 0xf3, 0x98, 0x6c, 0x3c, 0x38, 0xdb, 0x09, 0x10,
-	0x43, 0x75, 0x81, 0x4c, 0x09, 0x6e, 0xd7, 0x1a, 0x2d, 0x5a, 0x8a, 0xa9, 0x08, 0x32, 0x1d, 0x43,
-	0x7f, 0x7e, 0x38, 0x96, 0xe6, 0x79, 0xe0, 0xae, 0x82, 0x1c, 0x47, 0xb0, 0x79, 0x81, 0xa9, 0xe0,
-	0x69, 0x59, 0x61, 0xd0, 0x83, 0x10, 0xf8, 0x0c, 0x5e, 0xfa, 0xd6, 0xfd, 0x28, 0x75, 0x71, 0x2e,
-	0xc5, 0xef, 0xfb, 0x41, 0x96, 0x49, 0x54, 0x8a, 0x6c, 0xfb, 0xee, 0x51, 0x37, 0xf9, 0x60, 0x13,
-	0xfb, 0xf0, 0x74, 0x84, 0xda, 0x1c, 0x53, 0xf8, 0x44, 0x3b, 0x57, 0xf7, 0xd5, 0x3e, 0x72, 0x89,
-	0xfc, 0xbf, 0xdc, 0x5d, 0x02, 0xeb, 0x86, 0x62, 0x3c, 0x61, 0x39, 0x26, 0xe2, 0x17, 0xaf, 0x04,
-	0xcb, 0xc8, 0x66, 0xab, 0xea, 0x85, 0x43, 0x2c, 0xc7, 0xb7, 0xb0, 0x2b, 0x64, 0x4e, 0xc5, 0x14,
-	0x79, 0x2a, 0x64, 0x46, 0xcd, 0x37, 0x41, 0xdd, 0x2f, 0xeb, 0x78, 0xed, 0x7b, 0x13, 0x74, 0xbe,
-	0x88, 0x9f, 0x07, 0x79, 0xa9, 0x8b, 0xbb, 0xeb, 0xda, 0xfa, 0xb8, 0xc5, 0xc6, 0x06, 0xfb, 0xd6,
-	0x7e, 0x31, 0xb3, 0xf7, 0x71, 0x2e, 0xbc, 0x0f, 0xf1, 0xfa, 0x49, 0x93, 0xda, 0xff, 0x1b, 0x00,
-	0x00, 0xff, 0xff, 0xe7, 0xe2, 0x9c, 0x32, 0x35, 0x07, 0x00, 0x00,
+	// 625 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x4f, 0x14, 0x31,
+	0x14, 0x0f, 0x17, 0xa3, 0x8f, 0x85, 0x85, 0xf2, 0xa1, 0x59, 0x82, 0xc2, 0x49, 0x0f, 0xda, 0x35,
+	0x82, 0x98, 0x68, 0xa2, 0x59, 0x76, 0x74, 0xd9, 0x64, 0x31, 0x04, 0xa2, 0x26, 0x5e, 0x48, 0x99,
+	0x79, 0xcc, 0x4c, 0x98, 0x6d, 0x37, 0x6d, 0x59, 0xdd, 0x3f, 0xde, 0xc4, 0xcc, 0xb4, 0xb3, 0x69,
+	0x87, 0x6d, 0xe4, 0xe0, 0xf5, 0xbd, 0xdf, 0x47, 0xdf, 0xaf, 0x7d, 0x29, 0xec, 0x4f, 0x45, 0xa1,
+	0x33, 0x76, 0x39, 0x91, 0x42, 0x0b, 0xd5, 0x8d, 0x85, 0xc4, 0x4b, 0x85, 0x72, 0x9a, 0xc7, 0xa8,
+	0x68, 0x55, 0x24, 0x2d, 0xb7, 0xd8, 0xd9, 0x49, 0x85, 0x48, 0x0b, 0xec, 0x56, 0xbd, 0xab, 0xdb,
+	0xeb, 0x2e, 0x8e, 0x27, 0x7a, 0x66, 0xa0, 0x9d, 0xbd, 0x05, 0x6a, 0x2c, 0x61, 0x13, 0x8d, 0xd2,
+	0x22, 0x3a, 0x4d, 0xc4, 0x78, 0x2c, 0xf8, 0xe2, 0x5e, 0x82, 0xa5, 0xe1, 0xe2, 0x5e, 0x86, 0xac,
+	0xd0, 0x99, 0xe9, 0xbd, 0xf9, 0xb3, 0x0c, 0xcb, 0x7d, 0x21, 0xf1, 0xc2, 0x1c, 0x91, 0x7c, 0x82,
+	0xf6, 0x00, 0xf5, 0x49, 0x05, 0xb9, 0xd0, 0x4c, 0xdf, 0x2a, 0x42, 0xa8, 0x75, 0xea, 0x0b, 0xce,
+	0x31, 0xd6, 0xb9, 0xe0, 0x9d, 0x4d, 0x6a, 0x55, 0x5c, 0xe4, 0x8b, 0xa5, 0xd7, 0x4b, 0x64, 0x04,
+	0xed, 0x73, 0x4c, 0x73, 0xa5, 0x51, 0xf6, 0xcc, 0xe9, 0xc9, 0x3e, 0xf5, 0x86, 0xb1, 0x65, 0x83,
+	0x92, 0xac, 0xd2, 0xdb, 0xa6, 0x26, 0x1a, 0x5a, 0x47, 0x43, 0x3f, 0x97, 0xd1, 0x90, 0x23, 0x68,
+	0x45, 0xd5, 0x28, 0xdf, 0x26, 0x09, 0xd3, 0x48, 0x56, 0xa9, 0x9d, 0xcc, 0x54, 0x83, 0xbc, 0x03,
+	0x58, 0x3e, 0x13, 0x52, 0xf7, 0x25, 0x32, 0x8d, 0x09, 0x69, 0xd5, 0xb4, 0xb2, 0x18, 0x24, 0x0d,
+	0x61, 0xad, 0xec, 0xab, 0x72, 0x9a, 0xda, 0x70, 0xd7, 0x3f, 0x7b, 0xd9, 0xaf, 0xda, 0x5f, 0xf2,
+	0x42, 0xa3, 0x0c, 0x4a, 0x1d, 0xc2, 0x6a, 0x84, 0x05, 0x6a, 0xec, 0x15, 0x45, 0xa5, 0x49, 0xa0,
+	0x4e, 0x71, 0x18, 0x05, 0x59, 0xef, 0x60, 0x65, 0x80, 0xda, 0x8c, 0x56, 0xb2, 0xc8, 0x93, 0xbb,
+	0xee, 0xd6, 0xd8, 0x9b, 0x88, 0xbc, 0x84, 0xf6, 0x28, 0x57, 0x0e, 0xd3, 0xf7, 0x5b, 0x71, 0xc1,
+	0x8a, 0x8c, 0x60, 0xdd, 0x20, 0xdd, 0x41, 0x9f, 0xf9, 0x56, 0x0e, 0xe0, 0x1f, 0xa3, 0xf6, 0x60,
+	0xd3, 0xfa, 0x9e, 0xf6, 0x05, 0xbf, 0xce, 0x53, 0x2b, 0xb8, 0x3e, 0x37, 0x1d, 0x9b, 0xba, 0x0a,
+	0x4a, 0x44, 0xb0, 0xd1, 0xcf, 0xf2, 0x22, 0x31, 0x3a, 0x11, 0x6a, 0x8c, 0xcb, 0x5b, 0xdb, 0x5d,
+	0x74, 0xa4, 0x28, 0x57, 0xb1, 0x98, 0xa2, 0x9c, 0x75, 0x1a, 0x6f, 0x81, 0x1c, 0xc1, 0x9a, 0xa3,
+	0xa2, 0x46, 0x42, 0xe9, 0x7b, 0xa5, 0xfe, 0x1e, 0x36, 0x5d, 0xde, 0xdc, 0xfe, 0x3e, 0xdc, 0xe7,
+	0xf0, 0x68, 0x7e, 0x63, 0x1e, 0xa1, 0x79, 0xb8, 0x1e, 0xac, 0x0e, 0x50, 0x3b, 0x3e, 0xcd, 0xc0,
+	0x9d, 0x96, 0x0d, 0xbc, 0x29, 0x41, 0xab, 0xd5, 0x74, 0x8f, 0xea, 0x39, 0xb6, 0x7d, 0xb8, 0x22,
+	0x1f, 0xa1, 0x75, 0x81, 0x3c, 0x39, 0x63, 0xf1, 0x0d, 0xea, 0x21, 0x27, 0xdb, 0x8d, 0xc7, 0x64,
+	0xeb, 0xc1, 0xd9, 0x4e, 0x80, 0x18, 0xa9, 0x73, 0x64, 0x4a, 0x70, 0x7b, 0xad, 0x9d, 0x45, 0x97,
+	0x62, 0x10, 0x41, 0xa5, 0x63, 0x68, 0xcf, 0x17, 0xc7, 0xca, 0x3c, 0x0e, 0xec, 0x55, 0x50, 0xe3,
+	0x03, 0x6c, 0x9d, 0x63, 0x2c, 0x78, 0x9c, 0x17, 0x18, 0xcc, 0x20, 0x44, 0x3e, 0x85, 0xa7, 0x7e,
+	0x74, 0x3f, 0x72, 0x9d, 0x9d, 0x49, 0xf1, 0x7b, 0xd6, 0x4b, 0x12, 0x89, 0x4a, 0x91, 0x1d, 0x3f,
+	0x3d, 0xea, 0x36, 0xef, 0xdc, 0xc4, 0x01, 0x3c, 0x1c, 0xa0, 0x36, 0xcb, 0x14, 0x5e, 0xd1, 0xc6,
+	0xd6, 0x7d, 0xb5, 0x8f, 0x5c, 0x22, 0xff, 0x2f, 0x7b, 0x17, 0xc1, 0x86, 0x91, 0x18, 0x8e, 0x59,
+	0x8a, 0x91, 0xf8, 0xc5, 0x0b, 0xc1, 0x12, 0xb2, 0x55, 0xbb, 0x7a, 0xe5, 0x90, 0xca, 0xf1, 0x0d,
+	0xec, 0x09, 0x99, 0x52, 0x31, 0x41, 0x1e, 0x0b, 0x99, 0x50, 0xf3, 0x55, 0x50, 0xf7, 0xdb, 0x3a,
+	0x5e, 0xff, 0x5e, 0x15, 0x9d, 0x6f, 0xe2, 0xe7, 0x61, 0x9a, 0xeb, 0xec, 0xf6, 0xaa, 0x8c, 0xbe,
+	0x5b, 0x73, 0xbb, 0x86, 0xfb, 0xca, 0x7e, 0x33, 0xd3, 0xb7, 0xdd, 0x54, 0x78, 0x9f, 0xe2, 0xd5,
+	0x83, 0xaa, 0x75, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x44, 0xa5, 0x7d, 0x39, 0x07, 0x00,
+	0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -86,8 +87,9 @@
 //
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type CoreServiceClient interface {
-	//	 in coreProxy interface
-	GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error)
+	// GetHealthStatus is used by a CoreService client to verify connectivity
+	// to the gRPC server hosting the CoreService service
+	GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (CoreService_GetHealthStatusClient, error)
 	RegisterAdapter(ctx context.Context, in *core_adapter.AdapterRegistration, opts ...grpc.CallOption) (*empty.Empty, error)
 	DeviceUpdate(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
 	PortCreated(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error)
@@ -122,13 +124,35 @@
 	return &coreServiceClient{cc}
 }
 
-func (c *coreServiceClient) GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error) {
-	out := new(health.HealthStatus)
-	err := c.cc.Invoke(ctx, "/core_service.CoreService/GetHealthStatus", in, out, opts...)
+func (c *coreServiceClient) GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (CoreService_GetHealthStatusClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_CoreService_serviceDesc.Streams[0], "/core_service.CoreService/GetHealthStatus", opts...)
 	if err != nil {
 		return nil, err
 	}
-	return out, nil
+	x := &coreServiceGetHealthStatusClient{stream}
+	return x, nil
+}
+
+type CoreService_GetHealthStatusClient interface {
+	Send(*common.Connection) error
+	Recv() (*health.HealthStatus, error)
+	grpc.ClientStream
+}
+
+type coreServiceGetHealthStatusClient struct {
+	grpc.ClientStream
+}
+
+func (x *coreServiceGetHealthStatusClient) Send(m *common.Connection) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *coreServiceGetHealthStatusClient) Recv() (*health.HealthStatus, error) {
+	m := new(health.HealthStatus)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
 }
 
 func (c *coreServiceClient) RegisterAdapter(ctx context.Context, in *core_adapter.AdapterRegistration, opts ...grpc.CallOption) (*empty.Empty, error) {
@@ -340,8 +364,9 @@
 
 // CoreServiceServer is the server API for CoreService service.
 type CoreServiceServer interface {
-	//	 in coreProxy interface
-	GetHealthStatus(context.Context, *common.Connection) (*health.HealthStatus, error)
+	// GetHealthStatus is used by a CoreService client to verify connectivity
+	// to the gRPC server hosting the CoreService service
+	GetHealthStatus(CoreService_GetHealthStatusServer) error
 	RegisterAdapter(context.Context, *core_adapter.AdapterRegistration) (*empty.Empty, error)
 	DeviceUpdate(context.Context, *voltha.Device) (*empty.Empty, error)
 	PortCreated(context.Context, *voltha.Port) (*empty.Empty, error)
@@ -372,8 +397,8 @@
 type UnimplementedCoreServiceServer struct {
 }
 
-func (*UnimplementedCoreServiceServer) GetHealthStatus(ctx context.Context, req *common.Connection) (*health.HealthStatus, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+func (*UnimplementedCoreServiceServer) GetHealthStatus(srv CoreService_GetHealthStatusServer) error {
+	return status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
 }
 func (*UnimplementedCoreServiceServer) RegisterAdapter(ctx context.Context, req *core_adapter.AdapterRegistration) (*empty.Empty, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method RegisterAdapter not implemented")
@@ -449,22 +474,30 @@
 	s.RegisterService(&_CoreService_serviceDesc, srv)
 }
 
-func _CoreService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(common.Connection)
-	if err := dec(in); err != nil {
+func _CoreService_GetHealthStatus_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(CoreServiceServer).GetHealthStatus(&coreServiceGetHealthStatusServer{stream})
+}
+
+type CoreService_GetHealthStatusServer interface {
+	Send(*health.HealthStatus) error
+	Recv() (*common.Connection, error)
+	grpc.ServerStream
+}
+
+type coreServiceGetHealthStatusServer struct {
+	grpc.ServerStream
+}
+
+func (x *coreServiceGetHealthStatusServer) Send(m *health.HealthStatus) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *coreServiceGetHealthStatusServer) Recv() (*common.Connection, error) {
+	m := new(common.Connection)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
 		return nil, err
 	}
-	if interceptor == nil {
-		return srv.(CoreServiceServer).GetHealthStatus(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/core_service.CoreService/GetHealthStatus",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(CoreServiceServer).GetHealthStatus(ctx, req.(*common.Connection))
-	}
-	return interceptor(ctx, in, info, handler)
+	return m, nil
 }
 
 func _CoreService_RegisterAdapter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
@@ -886,10 +919,6 @@
 	HandlerType: (*CoreServiceServer)(nil),
 	Methods: []grpc.MethodDesc{
 		{
-			MethodName: "GetHealthStatus",
-			Handler:    _CoreService_GetHealthStatus_Handler,
-		},
-		{
 			MethodName: "RegisterAdapter",
 			Handler:    _CoreService_RegisterAdapter_Handler,
 		},
@@ -982,6 +1011,13 @@
 			Handler:    _CoreService_UpdateImageDownload_Handler,
 		},
 	},
-	Streams:  []grpc.StreamDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "GetHealthStatus",
+			Handler:       _CoreService_GetHealthStatus_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
 	Metadata: "voltha_protos/core_services.proto",
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service/olt_inter_adapter_service.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service/olt_inter_adapter_service.pb.go
index 93dade6..1e5c21c 100644
--- a/vendor/github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service/olt_inter_adapter_service.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/olt_inter_adapter_service/olt_inter_adapter_service.pb.go
@@ -33,28 +33,29 @@
 }
 
 var fileDescriptor_3ddb40a5aae0f6e1 = []byte{
-	// 330 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x3f, 0x4f, 0xf3, 0x30,
-	0x10, 0x87, 0xf5, 0xbe, 0x03, 0x83, 0x17, 0xc0, 0x42, 0x95, 0x48, 0x59, 0x18, 0x19, 0xea, 0x20,
-	0x10, 0x13, 0x53, 0xcb, 0x9f, 0xb6, 0x03, 0x6a, 0x45, 0x11, 0x03, 0x4b, 0xe5, 0xba, 0xd7, 0x24,
-	0x92, 0xe3, 0x0b, 0xf6, 0xa5, 0xd0, 0x6f, 0xc1, 0x27, 0xe4, 0xb3, 0xa0, 0xc4, 0xee, 0x90, 0xa8,
-	0x99, 0xa2, 0xe8, 0x9e, 0xdf, 0xe3, 0x3b, 0xdd, 0xb1, 0xc1, 0x16, 0x35, 0xa5, 0x72, 0x59, 0x58,
-	0x24, 0x74, 0x31, 0x6a, 0x5a, 0x66, 0x86, 0xc0, 0x2e, 0xe5, 0x5a, 0x16, 0xd5, 0xd7, 0x81, 0xdd,
-	0x66, 0x0a, 0x44, 0x0d, 0xf0, 0xf3, 0x4e, 0x20, 0x8a, 0x9a, 0x26, 0x85, 0x79, 0x8e, 0xc6, 0xc7,
-	0xa2, 0x7e, 0x82, 0x98, 0x68, 0x88, 0xeb, 0xbf, 0x55, 0xb9, 0x89, 0x21, 0x2f, 0x68, 0x17, 0x8a,
-	0x97, 0xcd, 0x60, 0xc3, 0x1e, 0x90, 0x96, 0x3b, 0x05, 0xa9, 0x29, 0xf5, 0xb5, 0x9b, 0xdf, 0xff,
-	0xac, 0x37, 0xd3, 0x34, 0xad, 0x62, 0x43, 0x9f, 0x5a, 0xf8, 0x96, 0xf8, 0x3d, 0x3b, 0x1e, 0x03,
-	0x4d, 0x6a, 0x7a, 0x41, 0x92, 0x4a, 0xc7, 0xb9, 0x08, 0x8d, 0x3d, 0xa0, 0x31, 0xa0, 0x28, 0x43,
-	0x13, 0x9d, 0x89, 0x20, 0x6c, 0x90, 0xcf, 0xec, 0x64, 0x6e, 0xf1, 0x7b, 0x37, 0xcb, 0x55, 0xf6,
-	0x0a, 0x9f, 0x25, 0x38, 0xe2, 0x91, 0x68, 0x76, 0x57, 0xd5, 0x5e, 0xc0, 0x39, 0x99, 0x40, 0xd4,
-	0x13, 0x7e, 0x48, 0xb1, 0x1f, 0x52, 0x3c, 0x55, 0x43, 0xf2, 0x09, 0x3b, 0x6d, 0x7b, 0x1c, 0xef,
-	0x77, 0x8b, 0x5c, 0xa7, 0xa9, 0x64, 0xbd, 0x31, 0xd0, 0x1b, 0xa8, 0x74, 0x6e, 0x71, 0x93, 0x69,
-	0x98, 0x1a, 0x47, 0xd2, 0x28, 0xe0, 0xd7, 0x2d, 0xdd, 0x01, 0x26, 0x3c, 0xbd, 0xef, 0xf6, 0xaa,
-	0x3b, 0xf1, 0x88, 0x5f, 0x46, 0xa3, 0x5c, 0x07, 0x74, 0xf4, 0xf3, 0x8f, 0x0d, 0xd0, 0x26, 0x02,
-	0x0b, 0x30, 0x0a, 0xed, 0x5a, 0xf8, 0x65, 0x88, 0xce, 0x53, 0x18, 0x5d, 0xbc, 0xd7, 0xc4, 0xe1,
-	0xad, 0x7c, 0x0c, 0x93, 0x8c, 0xd2, 0x72, 0x55, 0x2d, 0x22, 0xde, 0x3b, 0x63, 0xef, 0x1c, 0x84,
-	0x05, 0x6f, 0xef, 0xe2, 0x04, 0xbb, 0x8f, 0x71, 0x75, 0x54, 0x73, 0xb7, 0x7f, 0x01, 0x00, 0x00,
-	0xff, 0xff, 0xac, 0x65, 0xa9, 0x8b, 0xbe, 0x02, 0x00, 0x00,
+	// 337 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x4e, 0xe3, 0x30,
+	0x10, 0x87, 0xd5, 0x3d, 0xec, 0xc1, 0x97, 0xdd, 0xb5, 0x56, 0x95, 0x48, 0xb9, 0x70, 0x83, 0x43,
+	0x9d, 0x0a, 0xc4, 0x19, 0xb5, 0xfc, 0x69, 0x7b, 0x40, 0xad, 0x28, 0xe2, 0xc0, 0xa5, 0x72, 0xdd,
+	0x69, 0x12, 0xc9, 0xf1, 0x04, 0x7b, 0x52, 0xe8, 0x5b, 0xf0, 0x8c, 0x3c, 0x09, 0x4a, 0xec, 0x4a,
+	0xa4, 0x6a, 0x4e, 0x51, 0x34, 0xdf, 0xef, 0xf3, 0x8c, 0x66, 0x58, 0x7f, 0x8b, 0x9a, 0x52, 0xb9,
+	0x2c, 0x2c, 0x12, 0xba, 0x18, 0x35, 0x2d, 0x33, 0x43, 0x60, 0x97, 0x72, 0x2d, 0x8b, 0xea, 0xeb,
+	0xc0, 0x6e, 0x33, 0x05, 0xa2, 0x06, 0xf8, 0x49, 0x2b, 0x10, 0x45, 0x4d, 0x93, 0xc2, 0x3c, 0x47,
+	0xe3, 0x63, 0x51, 0x2f, 0x41, 0x4c, 0x34, 0xc4, 0xf5, 0xdf, 0xaa, 0xdc, 0xc4, 0x90, 0x17, 0xb4,
+	0x0b, 0xc5, 0xb3, 0x66, 0xb0, 0x61, 0x0f, 0xc8, 0x81, 0x3b, 0x05, 0xa9, 0x29, 0xf5, 0xb5, 0xcb,
+	0xaf, 0x5f, 0xac, 0x3b, 0xd3, 0x34, 0xad, 0x62, 0x43, 0x9f, 0x5a, 0xf8, 0x96, 0xf8, 0x0d, 0xfb,
+	0x33, 0x06, 0x9a, 0xd4, 0xf4, 0x82, 0x24, 0x95, 0x8e, 0x73, 0x11, 0x1a, 0xbb, 0x45, 0x63, 0x40,
+	0x51, 0x86, 0x26, 0xfa, 0x2f, 0x82, 0xf0, 0x27, 0x79, 0xde, 0x19, 0x74, 0xf8, 0x03, 0xfb, 0x3b,
+	0xb7, 0xf8, 0xb1, 0x9b, 0xe5, 0x2a, 0x7b, 0x82, 0xb7, 0x12, 0x1c, 0xf1, 0x48, 0x34, 0x3b, 0xac,
+	0x6a, 0x8f, 0xe0, 0x9c, 0x4c, 0x20, 0xea, 0x0a, 0x3f, 0xa8, 0xd8, 0x0f, 0x2a, 0xee, 0xab, 0x41,
+	0xf9, 0x84, 0xfd, 0x3b, 0xf4, 0x38, 0xde, 0x6b, 0x17, 0xb9, 0x56, 0x53, 0xc9, 0xba, 0x63, 0xa0,
+	0x67, 0x50, 0xe9, 0xdc, 0xe2, 0x26, 0xd3, 0x30, 0x35, 0x8e, 0xa4, 0x51, 0xc0, 0x07, 0x07, 0xba,
+	0x23, 0x4c, 0x78, 0x7a, 0xdf, 0xed, 0x45, 0x7b, 0xe2, 0x0e, 0xdf, 0x8d, 0x46, 0xb9, 0x0e, 0xe8,
+	0xe8, 0xb3, 0xc3, 0xfa, 0x68, 0x13, 0x81, 0x05, 0x18, 0x85, 0x76, 0x2d, 0xfc, 0x42, 0x44, 0xeb,
+	0x39, 0x8c, 0x4e, 0x5f, 0x6a, 0xe2, 0xf8, 0x66, 0x5e, 0x87, 0x49, 0x46, 0x69, 0xb9, 0xaa, 0x96,
+	0x11, 0xef, 0x9d, 0xb1, 0x77, 0xf6, 0xc3, 0x92, 0xb7, 0xd7, 0x71, 0x82, 0xed, 0x07, 0xb9, 0xfa,
+	0x5d, 0x73, 0x57, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x9f, 0xd5, 0x8e, 0xc2, 0x02, 0x00,
+	0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -69,9 +70,9 @@
 //
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type OltInterAdapterServiceClient interface {
-	// GetHealthStatus is used by an OltInterAdapterService client to verify connectivity
-	// to the gRPC server hosting the OltInterAdapterService service
-	GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error)
+	// GetHealthStatus is used by a OltInterAdapterService client to detect a connection
+	// lost with the gRPC server hosting the OltInterAdapterService service
+	GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (OltInterAdapterService_GetHealthStatusClient, error)
 	ProxyOmciRequest(ctx context.Context, in *inter_adapter.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error)
 	ProxyOmciRequests(ctx context.Context, in *inter_adapter.OmciMessages, opts ...grpc.CallOption) (*empty.Empty, error)
 	GetTechProfileInstance(ctx context.Context, in *inter_adapter.TechProfileInstanceRequestMessage, opts ...grpc.CallOption) (*inter_adapter.TechProfileDownloadMessage, error)
@@ -85,13 +86,35 @@
 	return &oltInterAdapterServiceClient{cc}
 }
 
-func (c *oltInterAdapterServiceClient) GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error) {
-	out := new(health.HealthStatus)
-	err := c.cc.Invoke(ctx, "/olt_inter_adapter_service.OltInterAdapterService/GetHealthStatus", in, out, opts...)
+func (c *oltInterAdapterServiceClient) GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (OltInterAdapterService_GetHealthStatusClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_OltInterAdapterService_serviceDesc.Streams[0], "/olt_inter_adapter_service.OltInterAdapterService/GetHealthStatus", opts...)
 	if err != nil {
 		return nil, err
 	}
-	return out, nil
+	x := &oltInterAdapterServiceGetHealthStatusClient{stream}
+	return x, nil
+}
+
+type OltInterAdapterService_GetHealthStatusClient interface {
+	Send(*common.Connection) error
+	Recv() (*health.HealthStatus, error)
+	grpc.ClientStream
+}
+
+type oltInterAdapterServiceGetHealthStatusClient struct {
+	grpc.ClientStream
+}
+
+func (x *oltInterAdapterServiceGetHealthStatusClient) Send(m *common.Connection) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *oltInterAdapterServiceGetHealthStatusClient) Recv() (*health.HealthStatus, error) {
+	m := new(health.HealthStatus)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
 }
 
 func (c *oltInterAdapterServiceClient) ProxyOmciRequest(ctx context.Context, in *inter_adapter.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
@@ -123,9 +146,9 @@
 
 // OltInterAdapterServiceServer is the server API for OltInterAdapterService service.
 type OltInterAdapterServiceServer interface {
-	// GetHealthStatus is used by an OltInterAdapterService client to verify connectivity
-	// to the gRPC server hosting the OltInterAdapterService service
-	GetHealthStatus(context.Context, *common.Connection) (*health.HealthStatus, error)
+	// GetHealthStatus is used by a OltInterAdapterService client to detect a connection
+	// lost with the gRPC server hosting the OltInterAdapterService service
+	GetHealthStatus(OltInterAdapterService_GetHealthStatusServer) error
 	ProxyOmciRequest(context.Context, *inter_adapter.OmciMessage) (*empty.Empty, error)
 	ProxyOmciRequests(context.Context, *inter_adapter.OmciMessages) (*empty.Empty, error)
 	GetTechProfileInstance(context.Context, *inter_adapter.TechProfileInstanceRequestMessage) (*inter_adapter.TechProfileDownloadMessage, error)
@@ -135,8 +158,8 @@
 type UnimplementedOltInterAdapterServiceServer struct {
 }
 
-func (*UnimplementedOltInterAdapterServiceServer) GetHealthStatus(ctx context.Context, req *common.Connection) (*health.HealthStatus, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+func (*UnimplementedOltInterAdapterServiceServer) GetHealthStatus(srv OltInterAdapterService_GetHealthStatusServer) error {
+	return status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
 }
 func (*UnimplementedOltInterAdapterServiceServer) ProxyOmciRequest(ctx context.Context, req *inter_adapter.OmciMessage) (*empty.Empty, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method ProxyOmciRequest not implemented")
@@ -152,22 +175,30 @@
 	s.RegisterService(&_OltInterAdapterService_serviceDesc, srv)
 }
 
-func _OltInterAdapterService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(common.Connection)
-	if err := dec(in); err != nil {
+func _OltInterAdapterService_GetHealthStatus_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(OltInterAdapterServiceServer).GetHealthStatus(&oltInterAdapterServiceGetHealthStatusServer{stream})
+}
+
+type OltInterAdapterService_GetHealthStatusServer interface {
+	Send(*health.HealthStatus) error
+	Recv() (*common.Connection, error)
+	grpc.ServerStream
+}
+
+type oltInterAdapterServiceGetHealthStatusServer struct {
+	grpc.ServerStream
+}
+
+func (x *oltInterAdapterServiceGetHealthStatusServer) Send(m *health.HealthStatus) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *oltInterAdapterServiceGetHealthStatusServer) Recv() (*common.Connection, error) {
+	m := new(common.Connection)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
 		return nil, err
 	}
-	if interceptor == nil {
-		return srv.(OltInterAdapterServiceServer).GetHealthStatus(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/olt_inter_adapter_service.OltInterAdapterService/GetHealthStatus",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(OltInterAdapterServiceServer).GetHealthStatus(ctx, req.(*common.Connection))
-	}
-	return interceptor(ctx, in, info, handler)
+	return m, nil
 }
 
 func _OltInterAdapterService_ProxyOmciRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
@@ -229,10 +260,6 @@
 	HandlerType: (*OltInterAdapterServiceServer)(nil),
 	Methods: []grpc.MethodDesc{
 		{
-			MethodName: "GetHealthStatus",
-			Handler:    _OltInterAdapterService_GetHealthStatus_Handler,
-		},
-		{
 			MethodName: "ProxyOmciRequest",
 			Handler:    _OltInterAdapterService_ProxyOmciRequest_Handler,
 		},
@@ -245,6 +272,13 @@
 			Handler:    _OltInterAdapterService_GetTechProfileInstance_Handler,
 		},
 	},
-	Streams:  []grpc.StreamDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "GetHealthStatus",
+			Handler:       _OltInterAdapterService_GetHealthStatus_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
 	Metadata: "voltha_protos/olt_inter_adapter_service.proto",
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service/onu_inter_adapter_service.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service/onu_inter_adapter_service.pb.go
index a2cf537..9094c10 100644
--- a/vendor/github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service/onu_inter_adapter_service.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/onu_inter_adapter_service/onu_inter_adapter_service.pb.go
@@ -33,29 +33,30 @@
 }
 
 var fileDescriptor_f951f30caeee9ccd = []byte{
-	// 351 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0x4f, 0x4b, 0xf3, 0x40,
-	0x10, 0xc6, 0x79, 0x79, 0xc1, 0xc3, 0x4a, 0x15, 0x56, 0x29, 0x18, 0xbd, 0x14, 0x4f, 0x1e, 0xba,
-	0x01, 0xc5, 0x93, 0xa7, 0xfe, 0x91, 0x56, 0xa8, 0xb4, 0xd0, 0x22, 0xe2, 0xa5, 0x6c, 0x37, 0xd3,
-	0x64, 0x21, 0xd9, 0x09, 0xc9, 0xa4, 0xe2, 0xb7, 0xf0, 0xfb, 0xf9, 0x65, 0x24, 0xd9, 0x14, 0xba,
-	0xb5, 0x25, 0xa7, 0x10, 0x9e, 0xdf, 0xfc, 0x98, 0x81, 0x67, 0x59, 0x77, 0x83, 0x31, 0x45, 0x72,
-	0x99, 0x66, 0x48, 0x98, 0xfb, 0x68, 0x8a, 0xa5, 0x36, 0x04, 0xd9, 0x52, 0x06, 0x32, 0x2d, 0xbf,
-	0x39, 0x64, 0x1b, 0xad, 0x40, 0x54, 0x00, 0xbf, 0x3a, 0x0a, 0x78, 0x9e, 0x6b, 0x52, 0x98, 0x24,
-	0x68, 0xec, 0x98, 0x77, 0x1d, 0x22, 0x86, 0x31, 0xf8, 0xd5, 0xdf, 0xaa, 0x58, 0xfb, 0x90, 0xa4,
-	0xf4, 0x55, 0x87, 0x1d, 0x77, 0xd0, 0xb1, 0xd7, 0xc8, 0x9e, 0x3b, 0x02, 0x19, 0x53, 0x64, 0xb3,
-	0xfb, 0x9f, 0xff, 0xac, 0x3d, 0x35, 0xc5, 0x4b, 0x39, 0xd6, 0xb3, 0x53, 0x73, 0xbb, 0x12, 0x7f,
-	0x62, 0xe7, 0x23, 0xa0, 0x71, 0x45, 0xcf, 0x49, 0x52, 0x91, 0x73, 0x2e, 0xea, 0xc5, 0x06, 0x68,
-	0x0c, 0x28, 0xd2, 0x68, 0xbc, 0x4b, 0x51, 0x0b, 0x1d, 0x72, 0xc2, 0x5a, 0x95, 0x36, 0xd0, 0x4a,
-	0x96, 0x18, 0xbf, 0x15, 0xee, 0x6a, 0x4e, 0xfa, 0x0a, 0x79, 0x2e, 0x43, 0xf0, 0xda, 0xc2, 0x9e,
-	0x2a, 0xb6, 0xa7, 0x8a, 0xe7, 0xf2, 0x54, 0x3e, 0x64, 0x67, 0xd3, 0x44, 0xe9, 0x1d, 0x9d, 0xb7,
-	0xaf, 0x4b, 0x94, 0x6e, 0xb2, 0xbc, 0xb3, 0x8b, 0x21, 0x7e, 0x9a, 0x18, 0x65, 0xb0, 0x00, 0x15,
-	0xcd, 0x32, 0x5c, 0xeb, 0x18, 0xf8, 0xdd, 0x9e, 0x6a, 0x27, 0xdb, 0xe2, 0x4d, 0xe6, 0x09, 0x6b,
-	0x0d, 0x21, 0x06, 0x82, 0x11, 0x24, 0x33, 0xcc, 0xe8, 0xcf, 0xb5, 0x4e, 0xda, 0x64, 0x1b, 0xb3,
-	0x53, 0xcb, 0x2f, 0x06, 0x68, 0x88, 0x77, 0x0e, 0xba, 0x16, 0x0a, 0x4d, 0x93, 0xa9, 0xff, 0xfd,
-	0x8f, 0x75, 0x31, 0x0b, 0x05, 0xa6, 0x60, 0x14, 0x66, 0x81, 0xb0, 0x4d, 0x10, 0x47, 0x7b, 0xd8,
-	0xbf, 0x79, 0xab, 0x88, 0xc3, 0x95, 0xf8, 0xe8, 0x85, 0x9a, 0xa2, 0x62, 0x55, 0xb6, 0xc0, 0xdf,
-	0x3a, 0x7d, 0xeb, 0xec, 0xd6, 0xed, 0xda, 0x3c, 0xfa, 0x21, 0x1e, 0x7f, 0x09, 0xab, 0x93, 0x8a,
-	0x7b, 0xf8, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x10, 0x75, 0x34, 0x3b, 0x03, 0x00, 0x00,
+	// 354 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x4a, 0xc3, 0x40,
+	0x10, 0xc6, 0x29, 0x82, 0x87, 0x95, 0x2a, 0xac, 0x52, 0x30, 0x7a, 0x29, 0x5e, 0xf4, 0xd0, 0x8d,
+	0x28, 0x9e, 0xa5, 0x7f, 0xa4, 0x15, 0x2a, 0x2d, 0xb4, 0x88, 0x78, 0x29, 0xdb, 0xcd, 0x34, 0x59,
+	0x48, 0x76, 0x42, 0x32, 0xa9, 0xf8, 0x16, 0xbe, 0xa1, 0xaf, 0x22, 0xc9, 0xa6, 0xd0, 0xd4, 0x96,
+	0x9c, 0x42, 0xf8, 0x7e, 0xf3, 0x63, 0x06, 0xbe, 0x65, 0x9d, 0x35, 0x86, 0x14, 0xc8, 0x45, 0x9c,
+	0x20, 0x61, 0xea, 0xa2, 0xc9, 0x16, 0xda, 0x10, 0x24, 0x0b, 0xe9, 0xc9, 0x38, 0xff, 0xa6, 0x90,
+	0xac, 0xb5, 0x02, 0x51, 0x00, 0xfc, 0xf2, 0x20, 0xe0, 0x38, 0x55, 0x93, 0xc2, 0x28, 0x42, 0x63,
+	0xc7, 0x9c, 0x2b, 0x1f, 0xd1, 0x0f, 0xc1, 0x2d, 0xfe, 0x96, 0xd9, 0xca, 0x85, 0x28, 0xa6, 0xef,
+	0x32, 0x6c, 0x57, 0x07, 0x2b, 0xf6, 0x12, 0xd9, 0x71, 0x07, 0x20, 0x43, 0x0a, 0x6c, 0xf6, 0xf0,
+	0x7b, 0xc4, 0x5a, 0x13, 0x93, 0xbd, 0xe6, 0x63, 0x5d, 0x3b, 0x35, 0xb3, 0x2b, 0xf1, 0x67, 0x76,
+	0x36, 0x04, 0x1a, 0x15, 0xf4, 0x8c, 0x24, 0x65, 0x29, 0xe7, 0xa2, 0x5c, 0xac, 0x8f, 0xc6, 0x80,
+	0x22, 0x8d, 0xc6, 0xb9, 0x10, 0xa5, 0x70, 0x9b, 0xbc, 0x6d, 0xdc, 0x37, 0xf8, 0x98, 0x35, 0x0b,
+	0xb5, 0xa7, 0x95, 0xcc, 0x51, 0x7e, 0x23, 0xaa, 0xeb, 0x55, 0xd2, 0x37, 0x48, 0x53, 0xe9, 0x83,
+	0xd3, 0x12, 0xf6, 0x5c, 0xb1, 0x39, 0x57, 0xbc, 0xe4, 0xe7, 0xf2, 0x01, 0x3b, 0x9d, 0x44, 0x4a,
+	0x6f, 0xe9, 0x9c, 0x5d, 0x5d, 0xa4, 0x74, 0x9d, 0xe5, 0x83, 0x9d, 0x0f, 0xf0, 0xcb, 0x84, 0x28,
+	0xbd, 0x39, 0xa8, 0x60, 0x9a, 0xe0, 0x4a, 0x87, 0xc0, 0xef, 0x76, 0x54, 0x5b, 0xd9, 0x06, 0xaf,
+	0x33, 0x8f, 0x59, 0x73, 0x00, 0x21, 0x10, 0x0c, 0x21, 0x9a, 0x62, 0x42, 0xff, 0xae, 0xad, 0xa4,
+	0x75, 0xb6, 0x11, 0x3b, 0xb1, 0xfc, 0xbc, 0x8f, 0x86, 0x78, 0x7b, 0xaf, 0x6b, 0xae, 0xd0, 0xd4,
+	0x99, 0x7a, 0x3f, 0x0d, 0xd6, 0xc1, 0xc4, 0x17, 0x18, 0x83, 0x51, 0x98, 0x78, 0xc2, 0xb6, 0x41,
+	0x1c, 0xec, 0x62, 0xef, 0xfa, 0xbd, 0x20, 0xf6, 0xd7, 0xe2, 0xb3, 0xeb, 0x6b, 0x0a, 0xb2, 0x65,
+	0xde, 0x04, 0x77, 0xe3, 0x74, 0xad, 0xb3, 0x53, 0x36, 0x6c, 0xfd, 0xe4, 0xfa, 0x78, 0xf8, 0x35,
+	0x2c, 0x8f, 0x0b, 0xee, 0xf1, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x5f, 0x8c, 0x88, 0x3f, 0x03,
+	0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -70,9 +71,9 @@
 //
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type OnuInterAdapterServiceClient interface {
-	// GetHealthStatus is used by an OnuInterAdapterService client to verify connectivity
-	// to the gRPC server hosting the OnuInterAdapterService service
-	GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error)
+	// GetHealthStatus is used by a OnuInterAdapterService client to detect a connection
+	// lost with the gRPC server hosting the OnuInterAdapterService service
+	GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (OnuInterAdapterService_GetHealthStatusClient, error)
 	OnuIndication(ctx context.Context, in *inter_adapter.OnuIndicationMessage, opts ...grpc.CallOption) (*empty.Empty, error)
 	OmciIndication(ctx context.Context, in *inter_adapter.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error)
 	DownloadTechProfile(ctx context.Context, in *inter_adapter.TechProfileDownloadMessage, opts ...grpc.CallOption) (*empty.Empty, error)
@@ -88,13 +89,35 @@
 	return &onuInterAdapterServiceClient{cc}
 }
 
-func (c *onuInterAdapterServiceClient) GetHealthStatus(ctx context.Context, in *common.Connection, opts ...grpc.CallOption) (*health.HealthStatus, error) {
-	out := new(health.HealthStatus)
-	err := c.cc.Invoke(ctx, "/onu_inter_adapter_service.OnuInterAdapterService/GetHealthStatus", in, out, opts...)
+func (c *onuInterAdapterServiceClient) GetHealthStatus(ctx context.Context, opts ...grpc.CallOption) (OnuInterAdapterService_GetHealthStatusClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_OnuInterAdapterService_serviceDesc.Streams[0], "/onu_inter_adapter_service.OnuInterAdapterService/GetHealthStatus", opts...)
 	if err != nil {
 		return nil, err
 	}
-	return out, nil
+	x := &onuInterAdapterServiceGetHealthStatusClient{stream}
+	return x, nil
+}
+
+type OnuInterAdapterService_GetHealthStatusClient interface {
+	Send(*common.Connection) error
+	Recv() (*health.HealthStatus, error)
+	grpc.ClientStream
+}
+
+type onuInterAdapterServiceGetHealthStatusClient struct {
+	grpc.ClientStream
+}
+
+func (x *onuInterAdapterServiceGetHealthStatusClient) Send(m *common.Connection) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *onuInterAdapterServiceGetHealthStatusClient) Recv() (*health.HealthStatus, error) {
+	m := new(health.HealthStatus)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
 }
 
 func (c *onuInterAdapterServiceClient) OnuIndication(ctx context.Context, in *inter_adapter.OnuIndicationMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
@@ -144,9 +167,9 @@
 
 // OnuInterAdapterServiceServer is the server API for OnuInterAdapterService service.
 type OnuInterAdapterServiceServer interface {
-	// GetHealthStatus is used by an OnuInterAdapterService client to verify connectivity
-	// to the gRPC server hosting the OnuInterAdapterService service
-	GetHealthStatus(context.Context, *common.Connection) (*health.HealthStatus, error)
+	// GetHealthStatus is used by a OnuInterAdapterService client to detect a connection
+	// lost with the gRPC server hosting the OnuInterAdapterService service
+	GetHealthStatus(OnuInterAdapterService_GetHealthStatusServer) error
 	OnuIndication(context.Context, *inter_adapter.OnuIndicationMessage) (*empty.Empty, error)
 	OmciIndication(context.Context, *inter_adapter.OmciMessage) (*empty.Empty, error)
 	DownloadTechProfile(context.Context, *inter_adapter.TechProfileDownloadMessage) (*empty.Empty, error)
@@ -158,8 +181,8 @@
 type UnimplementedOnuInterAdapterServiceServer struct {
 }
 
-func (*UnimplementedOnuInterAdapterServiceServer) GetHealthStatus(ctx context.Context, req *common.Connection) (*health.HealthStatus, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+func (*UnimplementedOnuInterAdapterServiceServer) GetHealthStatus(srv OnuInterAdapterService_GetHealthStatusServer) error {
+	return status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
 }
 func (*UnimplementedOnuInterAdapterServiceServer) OnuIndication(ctx context.Context, req *inter_adapter.OnuIndicationMessage) (*empty.Empty, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method OnuIndication not implemented")
@@ -181,22 +204,30 @@
 	s.RegisterService(&_OnuInterAdapterService_serviceDesc, srv)
 }
 
-func _OnuInterAdapterService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(common.Connection)
-	if err := dec(in); err != nil {
+func _OnuInterAdapterService_GetHealthStatus_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(OnuInterAdapterServiceServer).GetHealthStatus(&onuInterAdapterServiceGetHealthStatusServer{stream})
+}
+
+type OnuInterAdapterService_GetHealthStatusServer interface {
+	Send(*health.HealthStatus) error
+	Recv() (*common.Connection, error)
+	grpc.ServerStream
+}
+
+type onuInterAdapterServiceGetHealthStatusServer struct {
+	grpc.ServerStream
+}
+
+func (x *onuInterAdapterServiceGetHealthStatusServer) Send(m *health.HealthStatus) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *onuInterAdapterServiceGetHealthStatusServer) Recv() (*common.Connection, error) {
+	m := new(common.Connection)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
 		return nil, err
 	}
-	if interceptor == nil {
-		return srv.(OnuInterAdapterServiceServer).GetHealthStatus(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/onu_inter_adapter_service.OnuInterAdapterService/GetHealthStatus",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(OnuInterAdapterServiceServer).GetHealthStatus(ctx, req.(*common.Connection))
-	}
-	return interceptor(ctx, in, info, handler)
+	return m, nil
 }
 
 func _OnuInterAdapterService_OnuIndication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
@@ -294,10 +325,6 @@
 	HandlerType: (*OnuInterAdapterServiceServer)(nil),
 	Methods: []grpc.MethodDesc{
 		{
-			MethodName: "GetHealthStatus",
-			Handler:    _OnuInterAdapterService_GetHealthStatus_Handler,
-		},
-		{
 			MethodName: "OnuIndication",
 			Handler:    _OnuInterAdapterService_OnuIndication_Handler,
 		},
@@ -318,6 +345,13 @@
 			Handler:    _OnuInterAdapterService_DeleteTCont_Handler,
 		},
 	},
-	Streams:  []grpc.StreamDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "GetHealthStatus",
+			Handler:       _OnuInterAdapterService_GetHealthStatus_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
 	Metadata: "voltha_protos/onu_inter_adapter_service.proto",
 }
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 0000000..a3c021d
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context.  https://golang.org/pkg/context.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// 	func DoSomething(ctx context.Context, arg Arg) error {
+// 		// ... use ctx ...
+// 	}
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+	return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter).  TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+	return todo
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 0000000..344bd14
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package context
+
+import (
+	"context" // standard library's context, as of Go 1.7
+	"time"
+)
+
+var (
+	todo       = context.TODO()
+	background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	ctx, f := context.WithCancel(parent)
+	return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	ctx, f := context.WithDeadline(parent, deadline)
+	return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 0000000..64d31ec
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.9
+// +build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 0000000..5270db5
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,301 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+// +build !go1.7
+
+package context
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+	return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+	return nil
+}
+
+func (*emptyCtx) Err() error {
+	return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+	return nil
+}
+
+func (e *emptyCtx) String() string {
+	switch e {
+	case background:
+		return "context.Background"
+	case todo:
+		return "context.TODO"
+	}
+	return "unknown empty Context"
+}
+
+var (
+	background = new(emptyCtx)
+	todo       = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	c := newCancelCtx(parent)
+	propagateCancel(parent, c)
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+	return &cancelCtx{
+		Context: parent,
+		done:    make(chan struct{}),
+	}
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+	if parent.Done() == nil {
+		return // parent is never canceled
+	}
+	if p, ok := parentCancelCtx(parent); ok {
+		p.mu.Lock()
+		if p.err != nil {
+			// parent has already been canceled
+			child.cancel(false, p.err)
+		} else {
+			if p.children == nil {
+				p.children = make(map[canceler]bool)
+			}
+			p.children[child] = true
+		}
+		p.mu.Unlock()
+	} else {
+		go func() {
+			select {
+			case <-parent.Done():
+				child.cancel(false, parent.Err())
+			case <-child.Done():
+			}
+		}()
+	}
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+	for {
+		switch c := parent.(type) {
+		case *cancelCtx:
+			return c, true
+		case *timerCtx:
+			return c.cancelCtx, true
+		case *valueCtx:
+			parent = c.Context
+		default:
+			return nil, false
+		}
+	}
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+	p, ok := parentCancelCtx(parent)
+	if !ok {
+		return
+	}
+	p.mu.Lock()
+	if p.children != nil {
+		delete(p.children, child)
+	}
+	p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+	cancel(removeFromParent bool, err error)
+	Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+	Context
+
+	done chan struct{} // closed by the first cancel call.
+
+	mu       sync.Mutex
+	children map[canceler]bool // set to nil by the first cancel call
+	err      error             // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+	return c.done
+}
+
+func (c *cancelCtx) Err() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.err
+}
+
+func (c *cancelCtx) String() string {
+	return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+	if err == nil {
+		panic("context: internal error: missing cancel error")
+	}
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return // already canceled
+	}
+	c.err = err
+	close(c.done)
+	for child := range c.children {
+		// NOTE: acquiring the child's lock while holding parent's lock.
+		child.cancel(false, err)
+	}
+	c.children = nil
+	c.mu.Unlock()
+
+	if removeFromParent {
+		removeChild(c.Context, c)
+	}
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+		// The current deadline is already sooner than the new one.
+		return WithCancel(parent)
+	}
+	c := &timerCtx{
+		cancelCtx: newCancelCtx(parent),
+		deadline:  deadline,
+	}
+	propagateCancel(parent, c)
+	d := deadline.Sub(time.Now())
+	if d <= 0 {
+		c.cancel(true, DeadlineExceeded) // deadline has already passed
+		return c, func() { c.cancel(true, Canceled) }
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err == nil {
+		c.timer = time.AfterFunc(d, func() {
+			c.cancel(true, DeadlineExceeded)
+		})
+	}
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+	*cancelCtx
+	timer *time.Timer // Under cancelCtx.mu.
+
+	deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+	return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+	return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+	c.cancelCtx.cancel(false, err)
+	if removeFromParent {
+		// Remove this timerCtx from its parent cancelCtx's children.
+		removeChild(c.cancelCtx.Context, c)
+	}
+	c.mu.Lock()
+	if c.timer != nil {
+		c.timer.Stop()
+		c.timer = nil
+	}
+	c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+	Context
+	key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+	return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+	if c.key == key {
+		return c.val
+	}
+	return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 0000000..1f97153
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,110 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.9
+// +build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+	// Deadline returns the time when work done on behalf of this context
+	// should be canceled. Deadline returns ok==false when no deadline is
+	// set. Successive calls to Deadline return the same results.
+	Deadline() (deadline time.Time, ok bool)
+
+	// Done returns a channel that's closed when work done on behalf of this
+	// context should be canceled. Done may return nil if this context can
+	// never be canceled. Successive calls to Done return the same value.
+	//
+	// WithCancel arranges for Done to be closed when cancel is called;
+	// WithDeadline arranges for Done to be closed when the deadline
+	// expires; WithTimeout arranges for Done to be closed when the timeout
+	// elapses.
+	//
+	// Done is provided for use in select statements:
+	//
+	//  // Stream generates values with DoSomething and sends them to out
+	//  // until DoSomething returns an error or ctx.Done is closed.
+	//  func Stream(ctx context.Context, out chan<- Value) error {
+	//  	for {
+	//  		v, err := DoSomething(ctx)
+	//  		if err != nil {
+	//  			return err
+	//  		}
+	//  		select {
+	//  		case <-ctx.Done():
+	//  			return ctx.Err()
+	//  		case out <- v:
+	//  		}
+	//  	}
+	//  }
+	//
+	// See http://blog.golang.org/pipelines for more examples of how to use
+	// a Done channel for cancelation.
+	Done() <-chan struct{}
+
+	// Err returns a non-nil error value after Done is closed. Err returns
+	// Canceled if the context was canceled or DeadlineExceeded if the
+	// context's deadline passed. No other values for Err are defined.
+	// After Done is closed, successive calls to Err return the same value.
+	Err() error
+
+	// Value returns the value associated with this context for key, or nil
+	// if no value is associated with key. Successive calls to Value with
+	// the same key returns the same result.
+	//
+	// Use context values only for request-scoped data that transits
+	// processes and API boundaries, not for passing optional parameters to
+	// functions.
+	//
+	// A key identifies a specific value in a Context. Functions that wish
+	// to store values in Context typically allocate a key in a global
+	// variable then use that key as the argument to context.WithValue and
+	// Context.Value. A key can be any type that supports equality;
+	// packages should define keys as an unexported type to avoid
+	// collisions.
+	//
+	// Packages that define a Context key should provide type-safe accessors
+	// for the values stores using that key:
+	//
+	// 	// Package user defines a User type that's stored in Contexts.
+	// 	package user
+	//
+	// 	import "golang.org/x/net/context"
+	//
+	// 	// User is the type of value stored in the Contexts.
+	// 	type User struct {...}
+	//
+	// 	// key is an unexported type for keys defined in this package.
+	// 	// This prevents collisions with keys defined in other packages.
+	// 	type key int
+	//
+	// 	// userKey is the key for user.User values in Contexts. It is
+	// 	// unexported; clients use user.NewContext and user.FromContext
+	// 	// instead of using this key directly.
+	// 	var userKey key = 0
+	//
+	// 	// NewContext returns a new Context that carries value u.
+	// 	func NewContext(ctx context.Context, u *User) context.Context {
+	// 		return context.WithValue(ctx, userKey, u)
+	// 	}
+	//
+	// 	// FromContext returns the User value stored in ctx, if any.
+	// 	func FromContext(ctx context.Context) (*User, bool) {
+	// 		u, ok := ctx.Value(userKey).(*User)
+	// 		return u, ok
+	// 	}
+	Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
index 3571ad6..6515668 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
@@ -163,11 +163,7 @@
 //       // For Kubernetes resources, the format is {api group}/{kind}.
 //       option (google.api.resource) = {
 //         type: "pubsub.googleapis.com/Topic"
-//         name_descriptor: {
-//           pattern: "projects/{project}/topics/{topic}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Project"
-//           parent_name_extractor: "projects/{project}"
-//         }
+//         pattern: "projects/{project}/topics/{topic}"
 //       };
 //     }
 //
@@ -175,10 +171,7 @@
 //
 //     resources:
 //     - type: "pubsub.googleapis.com/Topic"
-//       name_descriptor:
-//         - pattern: "projects/{project}/topics/{topic}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Project"
-//           parent_name_extractor: "projects/{project}"
+//       pattern: "projects/{project}/topics/{topic}"
 //
 // Sometimes, resources have multiple patterns, typically because they can
 // live under multiple parents.
@@ -188,26 +181,10 @@
 //     message LogEntry {
 //       option (google.api.resource) = {
 //         type: "logging.googleapis.com/LogEntry"
-//         name_descriptor: {
-//           pattern: "projects/{project}/logs/{log}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Project"
-//           parent_name_extractor: "projects/{project}"
-//         }
-//         name_descriptor: {
-//           pattern: "folders/{folder}/logs/{log}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Folder"
-//           parent_name_extractor: "folders/{folder}"
-//         }
-//         name_descriptor: {
-//           pattern: "organizations/{organization}/logs/{log}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Organization"
-//           parent_name_extractor: "organizations/{organization}"
-//         }
-//         name_descriptor: {
-//           pattern: "billingAccounts/{billing_account}/logs/{log}"
-//           parent_type: "billing.googleapis.com/BillingAccount"
-//           parent_name_extractor: "billingAccounts/{billing_account}"
-//         }
+//         pattern: "projects/{project}/logs/{log}"
+//         pattern: "folders/{folder}/logs/{log}"
+//         pattern: "organizations/{organization}/logs/{log}"
+//         pattern: "billingAccounts/{billing_account}/logs/{log}"
 //       };
 //     }
 //
@@ -215,48 +192,10 @@
 //
 //     resources:
 //     - type: 'logging.googleapis.com/LogEntry'
-//       name_descriptor:
-//         - pattern: "projects/{project}/logs/{log}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Project"
-//           parent_name_extractor: "projects/{project}"
-//         - pattern: "folders/{folder}/logs/{log}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Folder"
-//           parent_name_extractor: "folders/{folder}"
-//         - pattern: "organizations/{organization}/logs/{log}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Organization"
-//           parent_name_extractor: "organizations/{organization}"
-//         - pattern: "billingAccounts/{billing_account}/logs/{log}"
-//           parent_type: "billing.googleapis.com/BillingAccount"
-//           parent_name_extractor: "billingAccounts/{billing_account}"
-//
-// For flexible resources, the resource name doesn't contain parent names, but
-// the resource itself has parents for policy evaluation.
-//
-// Example:
-//
-//     message Shelf {
-//       option (google.api.resource) = {
-//         type: "library.googleapis.com/Shelf"
-//         name_descriptor: {
-//           pattern: "shelves/{shelf}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Project"
-//         }
-//         name_descriptor: {
-//           pattern: "shelves/{shelf}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Folder"
-//         }
-//       };
-//     }
-//
-// The ResourceDescriptor Yaml config will look like:
-//
-//     resources:
-//     - type: 'library.googleapis.com/Shelf'
-//       name_descriptor:
-//         - pattern: "shelves/{shelf}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Project"
-//         - pattern: "shelves/{shelf}"
-//           parent_type: "cloudresourcemanager.googleapis.com/Folder"
+//       pattern: "projects/{project}/logs/{log}"
+//       pattern: "folders/{folder}/logs/{log}"
+//       pattern: "organizations/{organization}/logs/{log}"
+//       pattern: "billingAccounts/{billing_account}/logs/{log}"
 type ResourceDescriptor struct {
 	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
new file mode 100644
index 0000000..5866905
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -0,0 +1,810 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+// Package structpb contains generated types for google/protobuf/struct.proto.
+//
+// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
+// used to represent arbitrary JSON. The Value message represents a JSON value,
+// the Struct message represents a JSON object, and the ListValue message
+// represents a JSON array. See https://json.org for more information.
+//
+// The Value, Struct, and ListValue types have generated MarshalJSON and
+// UnmarshalJSON methods such that they serialize JSON equivalent to what the
+// messages themselves represent. Use of these types with the
+// "google.golang.org/protobuf/encoding/protojson" package
+// ensures that they will be serialized as their JSON equivalent.
+//
+//
+// Conversion to and from a Go interface
+//
+// The standard Go "encoding/json" package has functionality to serialize
+// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
+// ListValue.AsSlice methods can convert the protobuf message representation into
+// a form represented by interface{}, map[string]interface{}, and []interface{}.
+// This form can be used with other packages that operate on such data structures
+// and also directly with the standard json package.
+//
+// In order to convert the interface{}, map[string]interface{}, and []interface{}
+// forms back as Value, Struct, and ListValue messages, use the NewStruct,
+// NewList, and NewValue constructor functions.
+//
+//
+// Example usage
+//
+// Consider the following example JSON object:
+//
+//	{
+//		"firstName": "John",
+//		"lastName": "Smith",
+//		"isAlive": true,
+//		"age": 27,
+//		"address": {
+//			"streetAddress": "21 2nd Street",
+//			"city": "New York",
+//			"state": "NY",
+//			"postalCode": "10021-3100"
+//		},
+//		"phoneNumbers": [
+//			{
+//				"type": "home",
+//				"number": "212 555-1234"
+//			},
+//			{
+//				"type": "office",
+//				"number": "646 555-4567"
+//			}
+//		],
+//		"children": [],
+//		"spouse": null
+//	}
+//
+// To construct a Value message representing the above JSON object:
+//
+//	m, err := structpb.NewValue(map[string]interface{}{
+//		"firstName": "John",
+//		"lastName":  "Smith",
+//		"isAlive":   true,
+//		"age":       27,
+//		"address": map[string]interface{}{
+//			"streetAddress": "21 2nd Street",
+//			"city":          "New York",
+//			"state":         "NY",
+//			"postalCode":    "10021-3100",
+//		},
+//		"phoneNumbers": []interface{}{
+//			map[string]interface{}{
+//				"type":   "home",
+//				"number": "212 555-1234",
+//			},
+//			map[string]interface{}{
+//				"type":   "office",
+//				"number": "646 555-4567",
+//			},
+//		},
+//		"children": []interface{}{},
+//		"spouse":   nil,
+//	})
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of m as a *structpb.Value
+//
+package structpb
+
+import (
+	base64 "encoding/base64"
+	protojson "google.golang.org/protobuf/encoding/protojson"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	math "math"
+	reflect "reflect"
+	sync "sync"
+	utf8 "unicode/utf8"
+)
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+// Enum value maps for NullValue.
+var (
+	NullValue_name = map[int32]string{
+		0: "NULL_VALUE",
+	}
+	NullValue_value = map[string]int32{
+		"NULL_VALUE": 0,
+	}
+)
+
+func (x NullValue) Enum() *NullValue {
+	p := new(NullValue)
+	*p = x
+	return p
+}
+
+func (x NullValue) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NullValue) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_struct_proto_enumTypes[0].Descriptor()
+}
+
+func (NullValue) Type() protoreflect.EnumType {
+	return &file_google_protobuf_struct_proto_enumTypes[0]
+}
+
+func (x NullValue) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NullValue.Descriptor instead.
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Unordered map of dynamically typed values.
+	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+// NewStruct constructs a Struct from a general-purpose Go map.
+// The map keys must be valid UTF-8.
+// The map values are converted using NewValue.
+func NewStruct(v map[string]interface{}) (*Struct, error) {
+	x := &Struct{Fields: make(map[string]*Value, len(v))}
+	for k, v := range v {
+		if !utf8.ValidString(k) {
+			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k)
+		}
+		var err error
+		x.Fields[k], err = NewValue(v)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return x, nil
+}
+
+// AsMap converts x to a general-purpose Go map.
+// The map values are converted by calling Value.AsInterface.
+func (x *Struct) AsMap() map[string]interface{} {
+	vs := make(map[string]interface{})
+	for k, v := range x.GetFields() {
+		vs[k] = v.AsInterface()
+	}
+	return vs
+}
+
+func (x *Struct) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *Struct) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *Struct) Reset() {
+	*x = Struct{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_struct_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Struct) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Struct) ProtoMessage() {}
+
+func (x *Struct) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Struct.ProtoReflect.Descriptor instead.
+func (*Struct) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Struct) GetFields() map[string]*Value {
+	if x != nil {
+		return x.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The kind of value.
+	//
+	// Types that are assignable to Kind:
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+// NewValue constructs a Value from a general-purpose Go interface.
+//
+//	╔════════════════════════╤════════════════════════════════════════════╗
+//	║ Go type                │ Conversion                                 ║
+//	╠════════════════════════╪════════════════════════════════════════════╣
+//	║ nil                    │ stored as NullValue                        ║
+//	║ bool                   │ stored as BoolValue                        ║
+//	║ int, int32, int64      │ stored as NumberValue                      ║
+//	║ uint, uint32, uint64   │ stored as NumberValue                      ║
+//	║ float32, float64       │ stored as NumberValue                      ║
+//	║ string                 │ stored as StringValue; must be valid UTF-8 ║
+//	║ []byte                 │ stored as StringValue; base64-encoded      ║
+//	║ map[string]interface{} │ stored as StructValue                      ║
+//	║ []interface{}          │ stored as ListValue                        ║
+//	╚════════════════════════╧════════════════════════════════════════════╝
+//
+// When converting an int64 or uint64 to a NumberValue, numeric precision loss
+// is possible since they are stored as a float64.
+func NewValue(v interface{}) (*Value, error) {
+	switch v := v.(type) {
+	case nil:
+		return NewNullValue(), nil
+	case bool:
+		return NewBoolValue(v), nil
+	case int:
+		return NewNumberValue(float64(v)), nil
+	case int32:
+		return NewNumberValue(float64(v)), nil
+	case int64:
+		return NewNumberValue(float64(v)), nil
+	case uint:
+		return NewNumberValue(float64(v)), nil
+	case uint32:
+		return NewNumberValue(float64(v)), nil
+	case uint64:
+		return NewNumberValue(float64(v)), nil
+	case float32:
+		return NewNumberValue(float64(v)), nil
+	case float64:
+		return NewNumberValue(float64(v)), nil
+	case string:
+		if !utf8.ValidString(v) {
+			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
+		}
+		return NewStringValue(v), nil
+	case []byte:
+		s := base64.StdEncoding.EncodeToString(v)
+		return NewStringValue(s), nil
+	case map[string]interface{}:
+		v2, err := NewStruct(v)
+		if err != nil {
+			return nil, err
+		}
+		return NewStructValue(v2), nil
+	case []interface{}:
+		v2, err := NewList(v)
+		if err != nil {
+			return nil, err
+		}
+		return NewListValue(v2), nil
+	default:
+		return nil, protoimpl.X.NewError("invalid type: %T", v)
+	}
+}
+
+// NewNullValue constructs a new null Value.
+func NewNullValue() *Value {
+	return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
+}
+
+// NewBoolValue constructs a new boolean Value.
+func NewBoolValue(v bool) *Value {
+	return &Value{Kind: &Value_BoolValue{BoolValue: v}}
+}
+
+// NewNumberValue constructs a new number Value.
+func NewNumberValue(v float64) *Value {
+	return &Value{Kind: &Value_NumberValue{NumberValue: v}}
+}
+
+// NewStringValue constructs a new string Value.
+func NewStringValue(v string) *Value {
+	return &Value{Kind: &Value_StringValue{StringValue: v}}
+}
+
+// NewStructValue constructs a new struct Value.
+func NewStructValue(v *Struct) *Value {
+	return &Value{Kind: &Value_StructValue{StructValue: v}}
+}
+
+// NewListValue constructs a new list Value.
+func NewListValue(v *ListValue) *Value {
+	return &Value{Kind: &Value_ListValue{ListValue: v}}
+}
+
+// AsInterface converts x to a general-purpose Go interface.
+//
+// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce
+// semantically equivalent JSON (assuming no errors occur).
+//
+// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
+// converted as strings to remain compatible with MarshalJSON.
+func (x *Value) AsInterface() interface{} {
+	switch v := x.GetKind().(type) {
+	case *Value_NumberValue:
+		if v != nil {
+			switch {
+			case math.IsNaN(v.NumberValue):
+				return "NaN"
+			case math.IsInf(v.NumberValue, +1):
+				return "Infinity"
+			case math.IsInf(v.NumberValue, -1):
+				return "-Infinity"
+			default:
+				return v.NumberValue
+			}
+		}
+	case *Value_StringValue:
+		if v != nil {
+			return v.StringValue
+		}
+	case *Value_BoolValue:
+		if v != nil {
+			return v.BoolValue
+		}
+	case *Value_StructValue:
+		if v != nil {
+			return v.StructValue.AsMap()
+		}
+	case *Value_ListValue:
+		if v != nil {
+			return v.ListValue.AsSlice()
+		}
+	}
+	return nil
+}
+
+func (x *Value) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *Value) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *Value) Reset() {
+	*x = Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_struct_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (x *Value) GetNullValue() NullValue {
+	if x, ok := x.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (x *Value) GetNumberValue() float64 {
+	if x, ok := x.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (x *Value) GetStringValue() string {
+	if x, ok := x.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *Value) GetBoolValue() bool {
+	if x, ok := x.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *Value) GetStructValue() *Struct {
+	if x, ok := x.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+	if x, ok := x.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	// Represents a null value.
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+	// Represents a double value.
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	// Represents a string value.
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+	// Represents a boolean value.
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+	// Represents a structured value.
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	// Represents a repeated `Value`.
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Repeated field of dynamically typed values.
+	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+// NewList constructs a ListValue from a general-purpose Go slice.
+// The slice elements are converted using NewValue.
+func NewList(v []interface{}) (*ListValue, error) {
+	x := &ListValue{Values: make([]*Value, len(v))}
+	for i, v := range v {
+		var err error
+		x.Values[i], err = NewValue(v)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return x, nil
+}
+
+// AsSlice converts x to a general-purpose Go slice.
+// The slice elements are converted by calling Value.AsInterface.
+func (x *ListValue) AsSlice() []interface{} {
+	vs := make([]interface{}, len(x.GetValues()))
+	for i, v := range x.GetValues() {
+		vs[i] = v.AsInterface()
+	}
+	return vs
+}
+
+func (x *ListValue) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *ListValue) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *ListValue) Reset() {
+	*x = ListValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_struct_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+var File_google_protobuf_struct_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_struct_proto_rawDesc = []byte{
+	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
+	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
+	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
+	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
+	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
+	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
+	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
+	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
+	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
+	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
+	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
+	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
+	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
+	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
+	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
+	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
+	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_struct_proto_rawDescOnce sync.Once
+	file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+)
+
+func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
+	file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+	})
+	return file_google_protobuf_struct_proto_rawDescData
+}
+
+var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_protobuf_struct_proto_goTypes = []interface{}{
+	(NullValue)(0),    // 0: google.protobuf.NullValue
+	(*Struct)(nil),    // 1: google.protobuf.Struct
+	(*Value)(nil),     // 2: google.protobuf.Value
+	(*ListValue)(nil), // 3: google.protobuf.ListValue
+	nil,               // 4: google.protobuf.Struct.FieldsEntry
+}
+var file_google_protobuf_struct_proto_depIdxs = []int32{
+	4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry
+	0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue
+	1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct
+	3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue
+	2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value
+	2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value
+	6, // [6:6] is the sub-list for method output_type
+	6, // [6:6] is the sub-list for method input_type
+	6, // [6:6] is the sub-list for extension type_name
+	6, // [6:6] is the sub-list for extension extendee
+	0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_struct_proto_init() }
+func file_google_protobuf_struct_proto_init() {
+	if File_google_protobuf_struct_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Struct); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   4,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_struct_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_struct_proto_depIdxs,
+		EnumInfos:         file_google_protobuf_struct_proto_enumTypes,
+		MessageInfos:      file_google_protobuf_struct_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_struct_proto = out.File
+	file_google_protobuf_struct_proto_rawDesc = nil
+	file_google_protobuf_struct_proto_goTypes = nil
+	file_google_protobuf_struct_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
new file mode 100644
index 0000000..895a804
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -0,0 +1,760 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+//
+// These wrappers have no meaningful use within repeated fields as they lack
+// the ability to detect presence on individual elements.
+// These wrappers have no meaningful use within a map or a oneof since
+// individual entries of a map or fields of a oneof can already detect presence.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrapperspb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The double value.
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Double stores v in a new DoubleValue and returns a pointer to it.
+func Double(v float64) *DoubleValue {
+	return &DoubleValue{Value: v}
+}
+
+func (x *DoubleValue) Reset() {
+	*x = DoubleValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DoubleValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleValue) ProtoMessage() {}
+
+func (x *DoubleValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead.
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DoubleValue) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The float value.
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Float stores v in a new FloatValue and returns a pointer to it.
+func Float(v float32) *FloatValue {
+	return &FloatValue{Value: v}
+}
+
+func (x *FloatValue) Reset() {
+	*x = FloatValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FloatValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FloatValue) ProtoMessage() {}
+
+func (x *FloatValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead.
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FloatValue) GetValue() float32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int64 value.
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int64 stores v in a new Int64Value and returns a pointer to it.
+func Int64(v int64) *Int64Value {
+	return &Int64Value{Value: v}
+}
+
+func (x *Int64Value) Reset() {
+	*x = Int64Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Int64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Value) ProtoMessage() {}
+
+func (x *Int64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead.
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Int64Value) GetValue() int64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint64 value.
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt64 stores v in a new UInt64Value and returns a pointer to it.
+func UInt64(v uint64) *UInt64Value {
+	return &UInt64Value{Value: v}
+}
+
+func (x *UInt64Value) Reset() {
+	*x = UInt64Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UInt64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt64Value) ProtoMessage() {}
+
+func (x *UInt64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead.
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UInt64Value) GetValue() uint64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int32 value.
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int32 stores v in a new Int32Value and returns a pointer to it.
+func Int32(v int32) *Int32Value {
+	return &Int32Value{Value: v}
+}
+
+func (x *Int32Value) Reset() {
+	*x = Int32Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Int32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Value) ProtoMessage() {}
+
+func (x *Int32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead.
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Int32Value) GetValue() int32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint32 value.
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt32 stores v in a new UInt32Value and returns a pointer to it.
+func UInt32(v uint32) *UInt32Value {
+	return &UInt32Value{Value: v}
+}
+
+func (x *UInt32Value) Reset() {
+	*x = UInt32Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UInt32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt32Value) ProtoMessage() {}
+
+func (x *UInt32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead.
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UInt32Value) GetValue() uint32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bool value.
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bool stores v in a new BoolValue and returns a pointer to it.
+func Bool(v bool) *BoolValue {
+	return &BoolValue{Value: v}
+}
+
+func (x *BoolValue) Reset() {
+	*x = BoolValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BoolValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BoolValue) ProtoMessage() {}
+
+func (x *BoolValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead.
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *BoolValue) GetValue() bool {
+	if x != nil {
+		return x.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The string value.
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// String stores v in a new StringValue and returns a pointer to it.
+func String(v string) *StringValue {
+	return &StringValue{Value: v}
+}
+
+func (x *StringValue) Reset() {
+	*x = StringValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *StringValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringValue) ProtoMessage() {}
+
+func (x *StringValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringValue.ProtoReflect.Descriptor instead.
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *StringValue) GetValue() string {
+	if x != nil {
+		return x.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bytes value.
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bytes stores v in a new BytesValue and returns a pointer to it.
+func Bytes(v []byte) *BytesValue {
+	return &BytesValue{Value: v}
+}
+
+func (x *BytesValue) Reset() {
+	*x = BytesValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BytesValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BytesValue) ProtoMessage() {}
+
+func (x *BytesValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead.
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BytesValue) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_wrappers_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
+	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
+	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
+	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
+	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
+	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
+	file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+)
+
+func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
+	file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+	})
+	return file_google_protobuf_wrappers_proto_rawDescData
+}
+
+var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
+	(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
+	(*FloatValue)(nil),  // 1: google.protobuf.FloatValue
+	(*Int64Value)(nil),  // 2: google.protobuf.Int64Value
+	(*UInt64Value)(nil), // 3: google.protobuf.UInt64Value
+	(*Int32Value)(nil),  // 4: google.protobuf.Int32Value
+	(*UInt32Value)(nil), // 5: google.protobuf.UInt32Value
+	(*BoolValue)(nil),   // 6: google.protobuf.BoolValue
+	(*StringValue)(nil), // 7: google.protobuf.StringValue
+	(*BytesValue)(nil),  // 8: google.protobuf.BytesValue
+}
+var file_google_protobuf_wrappers_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_wrappers_proto_init() }
+func file_google_protobuf_wrappers_proto_init() {
+	if File_google_protobuf_wrappers_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DoubleValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FloatValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Int64Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UInt64Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Int32Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UInt32Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BoolValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*StringValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BytesValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   9,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_wrappers_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_wrappers_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_wrappers_proto = out.File
+	file_google_protobuf_wrappers_proto_rawDesc = nil
+	file_google_protobuf_wrappers_proto_goTypes = nil
+	file_google_protobuf_wrappers_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f92a375..152fe64 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -53,7 +53,9 @@
 github.com/golang/protobuf/ptypes/any
 github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/empty
+github.com/golang/protobuf/ptypes/struct
 github.com/golang/protobuf/ptypes/timestamp
+github.com/golang/protobuf/ptypes/wrappers
 # github.com/golang/snappy v0.0.3
 github.com/golang/snappy
 # github.com/google/uuid v1.3.0
@@ -107,12 +109,21 @@
 # github.com/jcmturner/rpc/v2 v2.0.3
 github.com/jcmturner/rpc/v2/mstypes
 github.com/jcmturner/rpc/v2/ndr
+# github.com/jhump/protoreflect v1.10.2
+github.com/jhump/protoreflect/codec
+github.com/jhump/protoreflect/desc
+github.com/jhump/protoreflect/desc/internal
+github.com/jhump/protoreflect/dynamic
+github.com/jhump/protoreflect/dynamic/grpcdynamic
+github.com/jhump/protoreflect/grpcreflect
+github.com/jhump/protoreflect/internal
+github.com/jhump/protoreflect/internal/codec
 # github.com/klauspost/compress v1.12.2
 github.com/klauspost/compress/fse
 github.com/klauspost/compress/huff0
 github.com/klauspost/compress/zstd
 github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/opencord/voltha-lib-go/v7 v7.1.3
+# github.com/opencord/voltha-lib-go/v7 v7.1.5
 ## explicit
 github.com/opencord/voltha-lib-go/v7/pkg/config
 github.com/opencord/voltha-lib-go/v7/pkg/db
@@ -131,7 +142,7 @@
 github.com/opencord/voltha-lib-go/v7/pkg/probe
 github.com/opencord/voltha-lib-go/v7/pkg/techprofile
 github.com/opencord/voltha-lib-go/v7/pkg/version
-# github.com/opencord/voltha-protos/v5 v5.2.1
+# github.com/opencord/voltha-protos/v5 v5.2.2
 ## explicit
 github.com/opencord/voltha-protos/v5/go/adapter_service
 github.com/opencord/voltha-protos/v5/go/common
@@ -207,6 +218,7 @@
 golang.org/x/crypto/md4
 golang.org/x/crypto/pbkdf2
 # golang.org/x/net v0.0.0-20210614182718-04defd469f4e
+golang.org/x/net/context
 golang.org/x/net/http/httpguts
 golang.org/x/net/http2
 golang.org/x/net/http2/hpack
@@ -223,10 +235,10 @@
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# google.golang.org/genproto v0.0.0-20211207154714-918901c715cf
+# google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068
 google.golang.org/genproto/googleapis/api/annotations
 google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.42.0 => google.golang.org/grpc v1.25.1
+# google.golang.org/grpc v1.44.0 => google.golang.org/grpc v1.25.1
 ## explicit
 google.golang.org/grpc
 google.golang.org/grpc/backoff
@@ -300,7 +312,9 @@
 google.golang.org/protobuf/types/known/anypb
 google.golang.org/protobuf/types/known/durationpb
 google.golang.org/protobuf/types/known/emptypb
+google.golang.org/protobuf/types/known/structpb
 google.golang.org/protobuf/types/known/timestamppb
+google.golang.org/protobuf/types/known/wrapperspb
 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
 gopkg.in/yaml.v3
 # github.com/coreos/bbolt v1.3.4 => go.etcd.io/bbolt v1.3.4