Import of https://github.com/ciena/voltctl at commit 40d61fbf3f910ed4017cf67c9c79e8e1f82a33a5
Change-Id: I8464c59e60d76cb8612891db3303878975b5416c
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
new file mode 100644
index 0000000..6573c90
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -0,0 +1,216 @@
+package desc
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc/internal"
+ intn "github.com/jhump/protoreflect/internal"
+)
+
+// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto.
+// The file's direct dependencies must be provided. If the given dependencies do not include
+// all of the file's dependencies or if the contents of the descriptors are internally
+// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
+func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+ return createFileDescriptor(fd, deps, nil)
+}
+
+func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+ ret := &FileDescriptor{
+ proto: fd,
+ symbols: map[string]Descriptor{},
+ fieldIndex: map[string]map[int32]*FieldDescriptor{},
+ }
+ pkg := fd.GetPackage()
+
+ // populate references to file descriptor dependencies
+ files := map[string]*FileDescriptor{}
+ for _, f := range deps {
+ files[f.proto.GetName()] = f
+ }
+ ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
+ for i, d := range fd.GetDependency() {
+ resolved := r.ResolveImport(fd.GetName(), d)
+ ret.deps[i] = files[resolved]
+ if ret.deps[i] == nil {
+ if resolved != d {
+ ret.deps[i] = files[d]
+ }
+ if ret.deps[i] == nil {
+ return nil, intn.ErrNoSuchFile(d)
+ }
+ }
+ }
+ ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
+ for i, pd := range fd.GetPublicDependency() {
+ ret.publicDeps[i] = ret.deps[pd]
+ }
+ ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency()))
+ for i, wd := range fd.GetWeakDependency() {
+ ret.weakDeps[i] = ret.deps[wd]
+ }
+ ret.isProto3 = fd.GetSyntax() == "proto3"
+
+ // populate all tables of child descriptors
+ for _, m := range fd.GetMessageType() {
+ md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
+ ret.symbols[n] = md
+ ret.messages = append(ret.messages, md)
+ }
+ for _, e := range fd.GetEnumType() {
+ ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
+ ret.symbols[n] = ed
+ ret.enums = append(ret.enums, ed)
+ }
+ for _, ex := range fd.GetExtension() {
+ exd, n := createFieldDescriptor(ret, ret, pkg, ex)
+ ret.symbols[n] = exd
+ ret.extensions = append(ret.extensions, exd)
+ }
+ for _, s := range fd.GetService() {
+ sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
+ ret.symbols[n] = sd
+ ret.services = append(ret.services, sd)
+ }
+
+ ret.sourceInfo = internal.CreateSourceInfoMap(fd)
+ ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
+
+ // now we can resolve all type references and source code info
+ scopes := []scope{fileScope(ret)}
+ path := make([]int32, 1, 8)
+ path[0] = internal.File_messagesTag
+ for i, md := range ret.messages {
+ if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+ return nil, err
+ }
+ }
+ path[0] = internal.File_enumsTag
+ for i, ed := range ret.enums {
+ ed.resolve(append(path, int32(i)))
+ }
+ path[0] = internal.File_extensionsTag
+ for i, exd := range ret.extensions {
+ if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+ return nil, err
+ }
+ }
+ path[0] = internal.File_servicesTag
+ for i, sd := range ret.services {
+ if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+ return nil, err
+ }
+ }
+
+ return ret, nil
+}
+
+// CreateFileDescriptors constructs a set of descriptors, one for each of the
+// given descriptor protos. The given set of descriptor protos must include all
+// transitive dependencies for every file.
+func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+ return createFileDescriptors(fds, nil)
+}
+
+func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+ if len(fds) == 0 {
+ return nil, nil
+ }
+ files := map[string]*dpb.FileDescriptorProto{}
+ resolved := map[string]*FileDescriptor{}
+ var name string
+ for _, fd := range fds {
+ name = fd.GetName()
+ files[name] = fd
+ }
+ for _, fd := range fds {
+ _, err := createFromSet(fd.GetName(), r, nil, files, resolved)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return resolved, nil
+}
+
+// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
+// file descriptors and their transitive dependencies. The files are topologically sorted
+// so that a file will always appear after its dependencies.
+func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
+ var fdps []*dpb.FileDescriptorProto
+ addAllFiles(fds, &fdps, map[string]struct{}{})
+ return &dpb.FileDescriptorSet{File: fdps}
+}
+
+func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+ for _, fd := range src {
+ if _, ok := seen[fd.GetName()]; ok {
+ continue
+ }
+ seen[fd.GetName()] = struct{}{}
+ addAllFiles(fd.GetDependencies(), results, seen)
+ *results = append(*results, fd.AsFileDescriptorProto())
+ }
+}
+
+// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The
+// set's *last* file will be the returned descriptor. The set's remaining files must comprise
+// the full set of transitive dependencies of that last file. This is the same format and
+// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+ return createFileDescriptorFromSet(fds, nil)
+}
+
+func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+ files := fds.GetFile()
+ if len(files) == 0 {
+ return nil, errors.New("file descriptor set is empty")
+ }
+ resolved, err := createFileDescriptors(files, r)
+ if err != nil {
+ return nil, err
+ }
+ lastFilename := files[len(files)-1].GetName()
+ return resolved[lastFilename], nil
+}
+
+// createFromSet creates a descriptor for the given filename. It recursively
+// creates descriptors for the given file's dependencies.
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+ for _, s := range seen {
+ if filename == s {
+ return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
+ }
+ }
+ seen = append(seen, filename)
+
+ if d, ok := resolved[filename]; ok {
+ return d, nil
+ }
+ fdp := files[filename]
+ if fdp == nil {
+ return nil, intn.ErrNoSuchFile(filename)
+ }
+ deps := make([]*FileDescriptor, len(fdp.GetDependency()))
+ for i, depName := range fdp.GetDependency() {
+ resolvedDep := r.ResolveImport(filename, depName)
+ dep, err := createFromSet(resolvedDep, r, seen, files, resolved)
+ if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName {
+ dep, err = createFromSet(depName, r, seen, files, resolved)
+ }
+ if err != nil {
+ return nil, err
+ }
+ deps[i] = dep
+ }
+ d, err := createFileDescriptor(fdp, deps, r)
+ if err != nil {
+ return nil, err
+ }
+ resolved[filename] = d
+ return d, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
new file mode 100644
index 0000000..ab235a3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -0,0 +1,1666 @@
+package desc
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+// Descriptor is the common interface implemented by all descriptor objects.
+type Descriptor interface {
+ // GetName returns the name of the object described by the descriptor. This will
+ // be a base name that does not include enclosing message names or the package name.
+ // For file descriptors, this indicates the path and name to the described file.
+ GetName() string
+ // GetFullyQualifiedName returns the fully-qualified name of the object described by
+ // the descriptor. This will include the package name and any enclosing message names.
+ // For file descriptors, this returns the path and name to the described file (same as
+ // GetName).
+ GetFullyQualifiedName() string
+ // GetParent returns the enclosing element in a proto source file. If the described
+ // object is a top-level object, this returns the file descriptor. Otherwise, it returns
+ // the element in which the described object was declared. File descriptors have no
+ // parent and return nil.
+ GetParent() Descriptor
+ // GetFile returns the file descriptor in which this element was declared. File
+ // descriptors return themselves.
+ GetFile() *FileDescriptor
+ // GetOptions returns the options proto containing options for the described element.
+ GetOptions() proto.Message
+ // GetSourceInfo returns any source code information that was present in the file
+ // descriptor. Source code info is optional. If no source code info is available for
+ // the element (including if there is none at all in the file descriptor) then this
+ // returns nil
+ GetSourceInfo() *dpb.SourceCodeInfo_Location
+ // AsProto returns the underlying descriptor proto for this descriptor.
+ AsProto() proto.Message
+}
+
+type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc
+
+// FileDescriptor describes a proto source file.
+type FileDescriptor struct {
+ proto *dpb.FileDescriptorProto
+ symbols map[string]Descriptor
+ deps []*FileDescriptor
+ publicDeps []*FileDescriptor
+ weakDeps []*FileDescriptor
+ messages []*MessageDescriptor
+ enums []*EnumDescriptor
+ extensions []*FieldDescriptor
+ services []*ServiceDescriptor
+ fieldIndex map[string]map[int32]*FieldDescriptor
+ isProto3 bool
+ sourceInfo internal.SourceInfoMap
+ sourceInfoRecomputeFunc
+}
+
+func (fd *FileDescriptor) recomputeSourceInfo() {
+ internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
+}
+
+func (fd *FileDescriptor) registerField(field *FieldDescriptor) {
+ fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()]
+ if fields == nil {
+ fields = map[int32]*FieldDescriptor{}
+ fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields
+ }
+ fields[field.GetNumber()] = field
+}
+
+// GetName returns the name of the file, as it was given to the protoc invocation
+// to compile it, possibly including path (relative to a directory in the proto
+// import path).
+func (fd *FileDescriptor) GetName() string {
+ return fd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the name of the file, same as GetName. It is
+// present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFullyQualifiedName() string {
+ return fd.proto.GetName()
+}
+
+// GetPackage returns the name of the package declared in the file.
+func (fd *FileDescriptor) GetPackage() string {
+ return fd.proto.GetPackage()
+}
+
+// GetParent always returns nil: files are the root of descriptor hierarchies.
+// Is it present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetParent() Descriptor {
+ return nil
+}
+
+// GetFile returns the receiver, which is a file descriptor. This is present
+// to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFile() *FileDescriptor {
+ return fd
+}
+
+// GetOptions returns the file's options. Most usages will be more interested
+// in GetFileOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetOptions() proto.Message {
+ return fd.proto.GetOptions()
+}
+
+// GetFileOptions returns the file's options.
+func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+ return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
+// interface.
+func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return nil
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFileDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) AsProto() proto.Message {
+ return fd.proto
+}
+
+// AsFileDescriptorProto returns the underlying descriptor proto.
+func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+ return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FileDescriptor) String() string {
+ return fd.proto.String()
+}
+
+// IsProto3 returns true if the file declares a syntax of "proto3".
+func (fd *FileDescriptor) IsProto3() bool {
+ return fd.isProto3
+}
+
+// GetDependencies returns all of this file's dependencies. These correspond to
+// import statements in the file.
+func (fd *FileDescriptor) GetDependencies() []*FileDescriptor {
+ return fd.deps
+}
+
+// GetPublicDependencies returns all of this file's public dependencies. These
+// correspond to public import statements in the file.
+func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor {
+ return fd.publicDeps
+}
+
+// GetWeakDependencies returns all of this file's weak dependencies. These
+// correspond to weak import statements in the file.
+func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor {
+ return fd.weakDeps
+}
+
+// GetMessageTypes returns all top-level messages declared in this file.
+func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor {
+ return fd.messages
+}
+
+// GetEnumTypes returns all top-level enums declared in this file.
+func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor {
+ return fd.enums
+}
+
+// GetExtensions returns all top-level extensions declared in this file.
+func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor {
+ return fd.extensions
+}
+
+// GetServices returns all services declared in this file.
+func (fd *FileDescriptor) GetServices() []*ServiceDescriptor {
+ return fd.services
+}
+
+// FindSymbol returns the descriptor contained within this file for the
+// element with the given fully-qualified symbol name. If no such element
+// exists then this method returns nil.
+func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+ if symbol[0] == '.' {
+ symbol = symbol[1:]
+ }
+ return fd.symbols[symbol]
+}
+
+// FindMessage finds the message with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor {
+ if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok {
+ return md
+ } else {
+ return nil
+ }
+}
+
+// FindEnum finds the enum with the given fully-qualified name. If no such
+// element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor {
+ if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok {
+ return ed
+ } else {
+ return nil
+ }
+}
+
+// FindService finds the service with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor {
+ if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok {
+ return sd
+ } else {
+ return nil
+ }
+}
+
+// FindExtension finds the extension field for the given extended type name and
+// tag number. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor {
+ if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() {
+ return exd
+ } else {
+ return nil
+ }
+}
+
+// FindExtensionByName finds the extension field with the given fully-qualified
+// name. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor {
+ if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() {
+ return exd
+ } else {
+ return nil
+ }
+}
+
+// MessageDescriptor describes a protocol buffer message.
+type MessageDescriptor struct {
+ proto *dpb.DescriptorProto
+ parent Descriptor
+ file *FileDescriptor
+ fields []*FieldDescriptor
+ nested []*MessageDescriptor
+ enums []*EnumDescriptor
+ extensions []*FieldDescriptor
+ oneOfs []*OneOfDescriptor
+ extRanges extRanges
+ fqn string
+ sourceInfoPath []int32
+ jsonNames jsonNameMap
+ isProto3 bool
+ isMapEntry bool
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
+ msgName := merge(enclosing, md.GetName())
+ ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
+ for _, f := range md.GetField() {
+ fld, n := createFieldDescriptor(fd, ret, msgName, f)
+ symbols[n] = fld
+ ret.fields = append(ret.fields, fld)
+ }
+ for _, nm := range md.NestedType {
+ nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
+ symbols[n] = nmd
+ ret.nested = append(ret.nested, nmd)
+ }
+ for _, e := range md.EnumType {
+ ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
+ symbols[n] = ed
+ ret.enums = append(ret.enums, ed)
+ }
+ for _, ex := range md.GetExtension() {
+ exd, n := createFieldDescriptor(fd, ret, msgName, ex)
+ symbols[n] = exd
+ ret.extensions = append(ret.extensions, exd)
+ }
+ for i, o := range md.GetOneofDecl() {
+ od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
+ symbols[n] = od
+ ret.oneOfs = append(ret.oneOfs, od)
+ }
+ for _, r := range md.GetExtensionRange() {
+ // proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
+ // but protoc converts range to exclusive end in descriptor, so we must convert back
+ end := r.GetEnd() - 1
+ ret.extRanges = append(ret.extRanges, proto.ExtensionRange{
+ Start: r.GetStart(),
+ End: end})
+ }
+ sort.Sort(ret.extRanges)
+ ret.isProto3 = fd.isProto3
+ ret.isMapEntry = md.GetOptions().GetMapEntry() &&
+ len(ret.fields) == 2 &&
+ ret.fields[0].GetNumber() == 1 &&
+ ret.fields[1].GetNumber() == 2
+
+ return ret, msgName
+}
+
+func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
+ md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ path = append(path, internal.Message_nestedMessagesTag)
+ scopes = append(scopes, messageScope(md))
+ for i, nmd := range md.nested {
+ if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ path[len(path)-1] = internal.Message_enumsTag
+ for i, ed := range md.enums {
+ ed.resolve(append(path, int32(i)))
+ }
+ path[len(path)-1] = internal.Message_fieldsTag
+ for i, fld := range md.fields {
+ if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ path[len(path)-1] = internal.Message_extensionsTag
+ for i, exd := range md.extensions {
+ if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ path[len(path)-1] = internal.Message_oneOfsTag
+ for i, od := range md.oneOfs {
+ od.resolve(append(path, int32(i)))
+ }
+ return nil
+}
+
+// GetName returns the simple (unqualified) name of the message.
+func (md *MessageDescriptor) GetName() string {
+ return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the message. This
+// includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (md *MessageDescriptor) GetFullyQualifiedName() string {
+ return md.fqn
+}
+
+// GetParent returns the message's enclosing descriptor. For top-level messages,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (md *MessageDescriptor) GetParent() Descriptor {
+ return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this message is defined.
+func (md *MessageDescriptor) GetFile() *FileDescriptor {
+ return md.file
+}
+
+// GetOptions returns the message's options. Most usages will be more interested
+// in GetMessageOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) GetOptions() proto.Message {
+ return md.proto.GetOptions()
+}
+
+// GetMessageOptions returns the message's options.
+func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+ return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the message, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// message was defined and also contains comments associated with the message
+// definition.
+func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) AsProto() proto.Message {
+ return md.proto
+}
+
+// AsDescriptorProto returns the underlying descriptor proto.
+func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+ return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MessageDescriptor) String() string {
+ return md.proto.String()
+}
+
+// IsMapEntry returns true if this is a synthetic message type that represents an entry
+// in a map field.
+func (md *MessageDescriptor) IsMapEntry() bool {
+ return md.isMapEntry
+}
+
+// GetFields returns all of the fields for this message.
+func (md *MessageDescriptor) GetFields() []*FieldDescriptor {
+ return md.fields
+}
+
+// GetNestedMessageTypes returns all of the message types declared inside this message.
+func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor {
+ return md.nested
+}
+
+// GetNestedEnumTypes returns all of the enums declared inside this message.
+func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor {
+ return md.enums
+}
+
+// GetNestedExtensions returns all of the extensions declared inside this message.
+func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor {
+ return md.extensions
+}
+
+// GetOneOfs returns all of the one-of field sets declared inside this message.
+func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor {
+ return md.oneOfs
+}
+
+// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
+func (md *MessageDescriptor) IsProto3() bool {
+ return md.isProto3
+}
+
+// GetExtensionRanges returns the ranges of extension field numbers for this message.
+func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange {
+ return md.extRanges
+}
+
+// IsExtendable returns true if this message has any extension ranges.
+func (md *MessageDescriptor) IsExtendable() bool {
+ return len(md.extRanges) > 0
+}
+
+// IsExtension returns true if the given tag number is within any of this message's
+// extension ranges.
+func (md *MessageDescriptor) IsExtension(tagNumber int32) bool {
+ return md.extRanges.IsExtension(tagNumber)
+}
+
+type extRanges []proto.ExtensionRange
+
+func (er extRanges) String() string {
+ var buf bytes.Buffer
+ first := true
+ for _, r := range er {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(",")
+ }
+ fmt.Fprintf(&buf, "%d..%d", r.Start, r.End)
+ }
+ return buf.String()
+}
+
+func (er extRanges) IsExtension(tagNumber int32) bool {
+ i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber })
+ return i < len(er) && tagNumber >= er[i].Start
+}
+
+func (er extRanges) Len() int {
+ return len(er)
+}
+
+func (er extRanges) Less(i, j int) bool {
+ return er[i].Start < er[j].Start
+}
+
+func (er extRanges) Swap(i, j int) {
+ er[i], er[j] = er[j], er[i]
+}
+
+// FindFieldByName finds the field with the given name. If no such field exists
+// then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
+ fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+ if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
+ return fd
+ } else {
+ return nil
+ }
+}
+
+// FindFieldByNumber finds the field with the given tag number. If no such field
+// exists then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
+ if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+ return fd
+ } else {
+ return nil
+ }
+}
+
+// FieldDescriptor describes a field of a protocol buffer message.
+type FieldDescriptor struct {
+ proto *dpb.FieldDescriptorProto
+ parent Descriptor
+ owner *MessageDescriptor
+ file *FileDescriptor
+ oneOf *OneOfDescriptor
+ msgType *MessageDescriptor
+ enumType *EnumDescriptor
+ fqn string
+ sourceInfoPath []int32
+ def memoizedDefault
+ isMap bool
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
+ fldName := merge(enclosing, fld.GetName())
+ ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
+ if fld.GetExtendee() == "" {
+ ret.owner = parent.(*MessageDescriptor)
+ }
+ // owner for extensions, field type (be it message or enum), and one-ofs get resolved later
+ return ret, fldName
+}
+
+func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+ if fd.proto.OneofIndex != nil && fd.oneOf == nil {
+ return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+ }
+ fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
+ if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+ return err
+ } else {
+ fd.enumType = desc.(*EnumDescriptor)
+ }
+ }
+ if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+ if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+ return err
+ } else {
+ fd.msgType = desc.(*MessageDescriptor)
+ }
+ }
+ if fd.proto.GetExtendee() != "" {
+ if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+ return err
+ } else {
+ fd.owner = desc.(*MessageDescriptor)
+ }
+ }
+ fd.file.registerField(fd)
+ fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
+ fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().IsMapEntry()
+ return nil
+}
+
+func (fd *FieldDescriptor) determineDefault() interface{} {
+ if fd.IsMap() {
+ return map[interface{}]interface{}(nil)
+ } else if fd.IsRepeated() {
+ return []interface{}(nil)
+ } else if fd.msgType != nil {
+ return nil
+ }
+
+ proto3 := fd.file.isProto3
+ if !proto3 {
+ def := fd.AsFieldDescriptorProto().GetDefaultValue()
+ if def != "" {
+ ret := parseDefaultValue(fd, def)
+ if ret != nil {
+ return ret
+ }
+ // if we can't parse default value, fall-through to return normal default...
+ }
+ }
+
+ switch fd.GetType() {
+ case dpb.FieldDescriptorProto_TYPE_FIXED32,
+ dpb.FieldDescriptorProto_TYPE_UINT32:
+ return uint32(0)
+ case dpb.FieldDescriptorProto_TYPE_SFIXED32,
+ dpb.FieldDescriptorProto_TYPE_INT32,
+ dpb.FieldDescriptorProto_TYPE_SINT32:
+ return int32(0)
+ case dpb.FieldDescriptorProto_TYPE_FIXED64,
+ dpb.FieldDescriptorProto_TYPE_UINT64:
+ return uint64(0)
+ case dpb.FieldDescriptorProto_TYPE_SFIXED64,
+ dpb.FieldDescriptorProto_TYPE_INT64,
+ dpb.FieldDescriptorProto_TYPE_SINT64:
+ return int64(0)
+ case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ return float32(0.0)
+ case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return float64(0.0)
+ case dpb.FieldDescriptorProto_TYPE_BOOL:
+ return false
+ case dpb.FieldDescriptorProto_TYPE_BYTES:
+ return []byte(nil)
+ case dpb.FieldDescriptorProto_TYPE_STRING:
+ return ""
+ case dpb.FieldDescriptorProto_TYPE_ENUM:
+ if proto3 {
+ return int32(0)
+ }
+ enumVals := fd.GetEnumType().GetValues()
+ if len(enumVals) > 0 {
+ return enumVals[0].GetNumber()
+ } else {
+ return int32(0) // WTF?
+ }
+ default:
+ panic(fmt.Sprintf("Unknown field type: %v", fd.GetType()))
+ }
+}
+
+func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
+ switch fd.GetType() {
+ case dpb.FieldDescriptorProto_TYPE_ENUM:
+ vd := fd.GetEnumType().FindValueByName(val)
+ if vd != nil {
+ return vd.GetNumber()
+ }
+ return nil
+ case dpb.FieldDescriptorProto_TYPE_BOOL:
+ if val == "true" {
+ return true
+ } else if val == "false" {
+ return false
+ }
+ return nil
+ case dpb.FieldDescriptorProto_TYPE_BYTES:
+ return []byte(unescape(val))
+ case dpb.FieldDescriptorProto_TYPE_STRING:
+ return val
+ case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := strconv.ParseFloat(val, 32); err == nil {
+ return float32(f)
+ } else {
+ return float32(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ if f, err := strconv.ParseFloat(val, 64); err == nil {
+ return f
+ } else {
+ return float64(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_INT32,
+ dpb.FieldDescriptorProto_TYPE_SINT32,
+ dpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, err := strconv.ParseInt(val, 10, 32); err == nil {
+ return int32(i)
+ } else {
+ return int32(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_UINT32,
+ dpb.FieldDescriptorProto_TYPE_FIXED32:
+ if i, err := strconv.ParseUint(val, 10, 32); err == nil {
+ return uint32(i)
+ } else {
+ return uint32(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_INT64,
+ dpb.FieldDescriptorProto_TYPE_SINT64,
+ dpb.FieldDescriptorProto_TYPE_SFIXED64:
+ if i, err := strconv.ParseInt(val, 10, 64); err == nil {
+ return i
+ } else {
+ return int64(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_UINT64,
+ dpb.FieldDescriptorProto_TYPE_FIXED64:
+ if i, err := strconv.ParseUint(val, 10, 64); err == nil {
+ return i
+ } else {
+ return uint64(0)
+ }
+ default:
+ return nil
+ }
+}
+
+func unescape(s string) string {
+ // protoc encodes default values for 'bytes' fields using C escaping,
+ // so this function reverses that escaping
+ out := make([]byte, 0, len(s))
+ var buf [4]byte
+ for len(s) > 0 {
+ if s[0] != '\\' || len(s) < 2 {
+ // not escape sequence, or too short to be well-formed escape
+ out = append(out, s[0])
+ s = s[1:]
+ } else if s[1] == 'x' || s[1] == 'X' {
+ n := matchPrefix(s[2:], 2, isHex)
+ if n == 0 {
+ // bad escape
+ out = append(out, s[:2]...)
+ s = s[2:]
+ } else {
+ c, err := strconv.ParseUint(s[2:2+n], 16, 8)
+ if err != nil {
+ // shouldn't really happen...
+ out = append(out, s[:2+n]...)
+ } else {
+ out = append(out, byte(c))
+ }
+ s = s[2+n:]
+ }
+ } else if s[1] >= '0' && s[1] <= '7' {
+ n := 1 + matchPrefix(s[2:], 2, isOctal)
+ c, err := strconv.ParseUint(s[1:1+n], 8, 8)
+ if err != nil || c > 0xff {
+ out = append(out, s[:1+n]...)
+ } else {
+ out = append(out, byte(c))
+ }
+ s = s[1+n:]
+ } else if s[1] == 'u' {
+ if len(s) < 6 {
+ // bad escape
+ out = append(out, s...)
+ s = s[len(s):]
+ } else {
+ c, err := strconv.ParseUint(s[2:6], 16, 16)
+ if err != nil {
+ // bad escape
+ out = append(out, s[:6]...)
+ } else {
+ w := utf8.EncodeRune(buf[:], rune(c))
+ out = append(out, buf[:w]...)
+ }
+ s = s[6:]
+ }
+ } else if s[1] == 'U' {
+ if len(s) < 10 {
+ // bad escape
+ out = append(out, s...)
+ s = s[len(s):]
+ } else {
+ c, err := strconv.ParseUint(s[2:10], 16, 32)
+ if err != nil || c > 0x10ffff {
+ // bad escape
+ out = append(out, s[:10]...)
+ } else {
+ w := utf8.EncodeRune(buf[:], rune(c))
+ out = append(out, buf[:w]...)
+ }
+ s = s[10:]
+ }
+ } else {
+ switch s[1] {
+ case 'a':
+ out = append(out, '\a')
+ case 'b':
+ out = append(out, '\b')
+ case 'f':
+ out = append(out, '\f')
+ case 'n':
+ out = append(out, '\n')
+ case 'r':
+ out = append(out, '\r')
+ case 't':
+ out = append(out, '\t')
+ case 'v':
+ out = append(out, '\v')
+ case '\\':
+ out = append(out, '\\')
+ case '\'':
+ out = append(out, '\'')
+ case '"':
+ out = append(out, '"')
+ case '?':
+ out = append(out, '?')
+ default:
+ // invalid escape, just copy it as-is
+ out = append(out, s[:2]...)
+ }
+ s = s[2:]
+ }
+ }
+ return string(out)
+}
+
+func isOctal(b byte) bool { return b >= '0' && b <= '7' }
+func isHex(b byte) bool {
+ return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
+}
+func matchPrefix(s string, limit int, fn func(byte) bool) int {
+ l := len(s)
+ if l > limit {
+ l = limit
+ }
+ i := 0
+ for ; i < l; i++ {
+ if !fn(s[i]) {
+ return i
+ }
+ }
+ return i
+}
+
+// GetName returns the name of the field.
+func (fd *FieldDescriptor) GetName() string {
+ return fd.proto.GetName()
+}
+
+// GetNumber returns the tag number of this field.
+func (fd *FieldDescriptor) GetNumber() int32 {
+ return fd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
+// GetName, this includes fully qualified name of the enclosing message for
+// regular fields.
+//
+// For extension fields, this includes the package (if there is one) as well as
+// any enclosing messages. The package and/or enclosing messages are for where
+// the extension is defined, not the message it extends.
+//
+// If this field is part of a one-of, the fully qualified name does *not*
+// include the name of the one-of, only of the enclosing message.
+func (fd *FieldDescriptor) GetFullyQualifiedName() string {
+ return fd.fqn
+}
+
+// GetParent returns the fields's enclosing descriptor. For normal
+// (non-extension) fields, this is the enclosing message. For extensions, this
+// is the descriptor in which the extension is defined, not the message that is
+// extended. The parent for an extension may be a file descriptor or a message,
+// depending on where the extension is defined.
+func (fd *FieldDescriptor) GetParent() Descriptor {
+ return fd.parent
+}
+
+// GetFile returns the descriptor for the file in which this field is defined.
+func (fd *FieldDescriptor) GetFile() *FileDescriptor {
+ return fd.file
+}
+
+// GetOptions returns the field's options. Most usages will be more interested
+// in GetFieldOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) GetOptions() proto.Message {
+ return fd.proto.GetOptions()
+}
+
+// GetFieldOptions returns the field's options.
+func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+ return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the field, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// field was defined and also contains comments associated with the field
+// definition.
+func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return fd.file.sourceInfo.Get(fd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFieldDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) AsProto() proto.Message {
+ return fd.proto
+}
+
+// AsFieldDescriptorProto returns the underlying descriptor proto.
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+ return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FieldDescriptor) String() string {
+ return fd.proto.String()
+}
+
+// GetJSONName returns the name of the field as referenced in the message's JSON
+// format.
+func (fd *FieldDescriptor) GetJSONName() string {
+ if jsonName := fd.proto.GetJsonName(); jsonName != "" {
+ return jsonName
+ }
+ return fd.proto.GetName()
+}
+
+// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName),
+// but includes the fully qualified name of the enclosing message.
+//
+// If the field is an extension, it will return the package name (if there is
+// one) as well as the names of any enclosing messages. The package and/or
+// enclosing messages are for where the extension is defined, not the message it
+// extends.
+func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string {
+ parent := fd.GetParent()
+ switch parent := parent.(type) {
+ case *FileDescriptor:
+ pkg := parent.GetPackage()
+ if pkg == "" {
+ return fd.GetJSONName()
+ }
+ return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName())
+ default:
+ return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName())
+ }
+}
+
+// GetOwner returns the message type that this field belongs to. If this is a normal
+// field then this is the same as GetParent. But for extensions, this will be the
+// extendee message whereas GetParent refers to where the extension was declared.
+func (fd *FieldDescriptor) GetOwner() *MessageDescriptor {
+ return fd.owner
+}
+
+// IsExtension returns true if this is an extension field.
+func (fd *FieldDescriptor) IsExtension() bool {
+ return fd.proto.GetExtendee() != ""
+}
+
+// GetOneOf returns the one-of field set to which this field belongs. If this field
+// is not part of a one-of then this method returns nil.
+func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor {
+ return fd.oneOf
+}
+
+// GetType returns the type of this field. If the type indicates an enum, the
+// enum type can be queried via GetEnumType. If the type indicates a message, the
+// message type can be queried via GetMessageType.
+func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+ return fd.proto.GetType()
+}
+
+// GetLabel returns the label for this field. The label can be required (proto2-only),
+// optional (default for proto3), or required.
+func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+ return fd.proto.GetLabel()
+}
+
+// IsRequired returns true if this field has the "required" label.
+func (fd *FieldDescriptor) IsRequired() bool {
+ return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// IsRepeated returns true if this field has the "repeated" label.
+func (fd *FieldDescriptor) IsRepeated() bool {
+ return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// IsMap returns true if this is a map field. If so, it will have the "repeated"
+// label its type will be a message that represents a map entry. The map entry
+// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
+func (fd *FieldDescriptor) IsMap() bool {
+ return fd.isMap
+}
+
+// GetMapKeyType returns the type of the key field if this is a map field. If it is
+// not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
+ if fd.isMap {
+ return fd.msgType.FindFieldByNumber(int32(1))
+ }
+ return nil
+}
+
+// GetMapValueType returns the type of the value field if this is a map field. If it
+// is not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
+ if fd.isMap {
+ return fd.msgType.FindFieldByNumber(int32(2))
+ }
+ return nil
+}
+
+// GetMessageType returns the type of this field if it is a message type. If
+// this field is not a message type, it returns nil.
+func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor {
+ return fd.msgType
+}
+
+// GetEnumType returns the type of this field if it is an enum type. If this
+// field is not an enum type, it returns nil.
+func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor {
+ return fd.enumType
+}
+
+// GetDefaultValue returns the default value for this field.
+//
+// If this field represents a message type, this method always returns nil (even though
+// for proto2 files, the default value should be a default instance of the message type).
+// If the field represents an enum type, this method returns an int32 corresponding to the
+// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If
+// this field is repeated (and not a map), it returns a nil []interface{}.
+//
+// Otherwise, it returns the declared default value for the field or a zero value, if no
+// default is declared or if the file is proto3. The type of said return value corresponds
+// to the type of the field:
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
+func (fd *FieldDescriptor) GetDefaultValue() interface{} {
+ return fd.getDefaultValue()
+}
+
+// EnumDescriptor describes an enum declared in a proto file.
+type EnumDescriptor struct {
+ proto *dpb.EnumDescriptorProto
+ parent Descriptor
+ file *FileDescriptor
+ values []*EnumValueDescriptor
+ valuesByNum sortedValues
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
+ enumName := merge(enclosing, ed.GetName())
+ ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
+ for _, ev := range ed.GetValue() {
+ evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
+ symbols[n] = evd
+ ret.values = append(ret.values, evd)
+ }
+ if len(ret.values) > 0 {
+ ret.valuesByNum = make(sortedValues, len(ret.values))
+ copy(ret.valuesByNum, ret.values)
+ sort.Stable(ret.valuesByNum)
+ }
+ return ret, enumName
+}
+
+type sortedValues []*EnumValueDescriptor
+
+func (sv sortedValues) Len() int {
+ return len(sv)
+}
+
+func (sv sortedValues) Less(i, j int) bool {
+ return sv[i].GetNumber() < sv[j].GetNumber()
+}
+
+func (sv sortedValues) Swap(i, j int) {
+ sv[i], sv[j] = sv[j], sv[i]
+}
+
+func (ed *EnumDescriptor) resolve(path []int32) {
+ ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ path = append(path, internal.Enum_valuesTag)
+ for i, evd := range ed.values {
+ evd.resolve(append(path, int32(i)))
+ }
+}
+
+// GetName returns the simple (unqualified) name of the enum type.
+func (ed *EnumDescriptor) GetName() string {
+ return ed.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum type.
+// This includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (ed *EnumDescriptor) GetFullyQualifiedName() string {
+ return ed.fqn
+}
+
+// GetParent returns the enum type's enclosing descriptor. For top-level enums,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (ed *EnumDescriptor) GetParent() Descriptor {
+ return ed.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum is defined.
+func (ed *EnumDescriptor) GetFile() *FileDescriptor {
+ return ed.file
+}
+
+// GetOptions returns the enum type's options. Most usages will be more
+// interested in GetEnumOptions, which has a concrete return type. This generic
+// version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) GetOptions() proto.Message {
+ return ed.proto.GetOptions()
+}
+
+// GetEnumOptions returns the enum type's options.
+func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+ return ed.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum type, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum type was defined and also contains comments associated with the enum
+// definition.
+func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return ed.file.sourceInfo.Get(ed.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) AsProto() proto.Message {
+ return ed.proto
+}
+
+// AsEnumDescriptorProto returns the underlying descriptor proto.
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+ return ed.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (ed *EnumDescriptor) String() string {
+ return ed.proto.String()
+}
+
+// GetValues returns all of the allowed values defined for this enum.
+func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor {
+ return ed.values
+}
+
+// FindValueByName finds the enum value with the given name. If no such value exists
+// then nil is returned.
+func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
+ fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+ if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
+ return vd
+ } else {
+ return nil
+ }
+}
+
+// FindValueByNumber finds the value with the given numeric value. If no such value
+// exists then nil is returned. If aliases are allowed and multiple values have the
+// given number, the first declared value is returned.
+func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor {
+ index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num })
+ if index < len(ed.valuesByNum) {
+ vd := ed.valuesByNum[index]
+ if vd.GetNumber() == num {
+ return vd
+ }
+ }
+ return nil
+}
+
+// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
+type EnumValueDescriptor struct {
+ proto *dpb.EnumValueDescriptorProto
+ parent *EnumDescriptor
+ file *FileDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
+ valName := merge(enclosing, evd.GetName())
+ return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+}
+
+func (vd *EnumValueDescriptor) resolve(path []int32) {
+ vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the enum value.
+func (vd *EnumValueDescriptor) GetName() string {
+ return vd.proto.GetName()
+}
+
+// GetNumber returns the numeric value associated with this enum value.
+func (vd *EnumValueDescriptor) GetNumber() int32 {
+ return vd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum value.
+// Unlike GetName, this includes fully qualified name of the enclosing enum.
+func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
+ return vd.fqn
+}
+
+// GetParent returns the descriptor for the enum in which this enum value is
+// defined. Most usages will prefer to use GetEnum, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetParent() Descriptor {
+ return vd.parent
+}
+
+// GetEnum returns the enum in which this enum value is defined.
+func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor {
+ return vd.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum value is
+// defined.
+func (vd *EnumValueDescriptor) GetFile() *FileDescriptor {
+ return vd.file
+}
+
+// GetOptions returns the enum value's options. Most usages will be more
+// interested in GetEnumValueOptions, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetOptions() proto.Message {
+ return vd.proto.GetOptions()
+}
+
+// GetEnumValueOptions returns the enum value's options.
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+ return vd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum value, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum value was defined and also contains comments associated with the enum
+// value definition.
+func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return vd.file.sourceInfo.Get(vd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumValueDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) AsProto() proto.Message {
+ return vd.proto
+}
+
+// AsEnumValueDescriptorProto returns the underlying descriptor proto.
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+ return vd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (vd *EnumValueDescriptor) String() string {
+ return vd.proto.String()
+}
+
+// ServiceDescriptor describes an RPC service declared in a proto file.
+type ServiceDescriptor struct {
+ proto *dpb.ServiceDescriptorProto
+ file *FileDescriptor
+ methods []*MethodDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
+ serviceName := merge(enclosing, sd.GetName())
+ ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
+ for _, m := range sd.GetMethod() {
+ md, n := createMethodDescriptor(fd, ret, serviceName, m)
+ symbols[n] = md
+ ret.methods = append(ret.methods, md)
+ }
+ return ret, serviceName
+}
+
+func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
+ sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ path = append(path, internal.Service_methodsTag)
+ for i, md := range sd.methods {
+ if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetName returns the simple (unqualified) name of the service.
+func (sd *ServiceDescriptor) GetName() string {
+ return sd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the service. This
+// includes the package name (if there is one).
+func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
+ return sd.fqn
+}
+
+// GetParent returns the descriptor for the file in which this service is
+// defined. Most usages will prefer to use GetFile, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetParent() Descriptor {
+ return sd.file
+}
+
+// GetFile returns the descriptor for the file in which this service is defined.
+func (sd *ServiceDescriptor) GetFile() *FileDescriptor {
+ return sd.file
+}
+
+// GetOptions returns the service's options. Most usages will be more interested
+// in GetServiceOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetOptions() proto.Message {
+ return sd.proto.GetOptions()
+}
+
+// GetServiceOptions returns the service's options.
+func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+ return sd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the service, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// service was defined and also contains comments associated with the service
+// definition.
+func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return sd.file.sourceInfo.Get(sd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsServiceDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) AsProto() proto.Message {
+ return sd.proto
+}
+
+// AsServiceDescriptorProto returns the underlying descriptor proto.
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+ return sd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (sd *ServiceDescriptor) String() string {
+ return sd.proto.String()
+}
+
+// GetMethods returns all of the RPC methods for this service.
+func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor {
+ return sd.methods
+}
+
+// FindMethodByName finds the method with the given name. If no such method exists
+// then nil is returned.
+func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
+ fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+ if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
+ return md
+ } else {
+ return nil
+ }
+}
+
+// MethodDescriptor describes an RPC method declared in a proto file.
+type MethodDescriptor struct {
+ proto *dpb.MethodDescriptorProto
+ parent *ServiceDescriptor
+ file *FileDescriptor
+ inType *MessageDescriptor
+ outType *MessageDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
+ // request and response types get resolved later
+ methodName := merge(enclosing, md.GetName())
+ return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+}
+
+func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
+ md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
+ return err
+ } else {
+ md.inType = desc.(*MessageDescriptor)
+ }
+ if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+ return err
+ } else {
+ md.outType = desc.(*MessageDescriptor)
+ }
+ return nil
+}
+
+// GetName returns the name of the method.
+func (md *MethodDescriptor) GetName() string {
+ return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
+// GetName, this includes fully qualified name of the enclosing service.
+func (md *MethodDescriptor) GetFullyQualifiedName() string {
+ return md.fqn
+}
+
+// GetParent returns the descriptor for the service in which this method is
+// defined. Most usages will prefer to use GetService, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (md *MethodDescriptor) GetParent() Descriptor {
+ return md.parent
+}
+
+// GetService returns the RPC service in which this method is declared.
+func (md *MethodDescriptor) GetService() *ServiceDescriptor {
+ return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this method is defined.
+func (md *MethodDescriptor) GetFile() *FileDescriptor {
+ return md.file
+}
+
+// GetOptions returns the method's options. Most usages will be more interested
+// in GetMethodOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) GetOptions() proto.Message {
+ return md.proto.GetOptions()
+}
+
+// GetMethodOptions returns the method's options.
+func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+ return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the method, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// method was defined and also contains comments associated with the method
+// definition.
+func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsMethodDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) AsProto() proto.Message {
+ return md.proto
+}
+
+// AsMethodDescriptorProto returns the underlying descriptor proto.
+func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+ return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MethodDescriptor) String() string {
+ return md.proto.String()
+}
+
+// IsServerStreaming returns true if this is a server-streaming method.
+func (md *MethodDescriptor) IsServerStreaming() bool {
+ return md.proto.GetServerStreaming()
+}
+
+// IsClientStreaming returns true if this is a client-streaming method.
+func (md *MethodDescriptor) IsClientStreaming() bool {
+ return md.proto.GetClientStreaming()
+}
+
+// GetInputType returns the input type, or request type, of the RPC method.
+func (md *MethodDescriptor) GetInputType() *MessageDescriptor {
+ return md.inType
+}
+
+// GetOutputType returns the output type, or response type, of the RPC method.
+func (md *MethodDescriptor) GetOutputType() *MessageDescriptor {
+ return md.outType
+}
+
+// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
+type OneOfDescriptor struct {
+ proto *dpb.OneofDescriptorProto
+ parent *MessageDescriptor
+ file *FileDescriptor
+ choices []*FieldDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
+ oneOfName := merge(enclosing, od.GetName())
+ ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+ for _, f := range parent.fields {
+ oi := f.proto.OneofIndex
+ if oi != nil && *oi == int32(index) {
+ f.oneOf = ret
+ ret.choices = append(ret.choices, f)
+ }
+ }
+ return ret, oneOfName
+}
+
+func (od *OneOfDescriptor) resolve(path []int32) {
+ od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the one-of.
+func (od *OneOfDescriptor) GetName() string {
+ return od.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
+// GetName, this includes fully qualified name of the enclosing message.
+func (od *OneOfDescriptor) GetFullyQualifiedName() string {
+ return od.fqn
+}
+
+// GetParent returns the descriptor for the message in which this one-of is
+// defined. Most usages will prefer to use GetOwner, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (od *OneOfDescriptor) GetParent() Descriptor {
+ return od.parent
+}
+
+// GetOwner returns the message to which this one-of field set belongs.
+func (od *OneOfDescriptor) GetOwner() *MessageDescriptor {
+ return od.parent
+}
+
+// GetFile returns the descriptor for the file in which this one-fof is defined.
+func (od *OneOfDescriptor) GetFile() *FileDescriptor {
+ return od.file
+}
+
+// GetOptions returns the one-of's options. Most usages will be more interested
+// in GetOneOfOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) GetOptions() proto.Message {
+ return od.proto.GetOptions()
+}
+
+// GetOneOfOptions returns the one-of's options.
+func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+ return od.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the one-of, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// one-of was defined and also contains comments associated with the one-of
+// definition.
+func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return od.file.sourceInfo.Get(od.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsOneofDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) AsProto() proto.Message {
+ return od.proto
+}
+
+// AsOneofDescriptorProto returns the underlying descriptor proto.
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+ return od.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (od *OneOfDescriptor) String() string {
+ return od.proto.String()
+}
+
+// GetChoices returns the fields that are part of the one-of field set. At most one of
+// these fields may be set for a given message.
+func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor {
+ return od.choices
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(string) Descriptor
+
+func fileScope(fd *FileDescriptor) scope {
+ // we search symbols in this file, but also symbols in other files that have
+ // the same package as this file or a "parent" package (in protobuf,
+ // packages are a hierarchy like C++ namespaces)
+ prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
+ return func(name string) Descriptor {
+ for _, prefix := range prefixes {
+ n := merge(prefix, name)
+ d := findSymbol(fd, n, false)
+ if d != nil {
+ return d
+ }
+ }
+ return nil
+ }
+}
+
+func messageScope(md *MessageDescriptor) scope {
+ return func(name string) Descriptor {
+ n := merge(md.fqn, name)
+ if d, ok := md.file.symbols[n]; ok {
+ return d
+ }
+ return nil
+ }
+}
+
+func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
+ if strings.HasPrefix(name, ".") {
+ // already fully-qualified
+ d := findSymbol(fd, name[1:], false)
+ if d != nil {
+ return d, nil
+ }
+ } else {
+ // unqualified, so we look in the enclosing (last) scope first and move
+ // towards outermost (first) scope, trying to resolve the symbol
+ for i := len(scopes) - 1; i >= 0; i-- {
+ d := scopes[i](name)
+ if d != nil {
+ return d, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
+}
+
+func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
+ d := fd.symbols[name]
+ if d != nil {
+ return d
+ }
+
+ // When public = false, we are searching only directly imported symbols. But we
+ // also need to search transitive public imports due to semantics of public imports.
+ var deps []*FileDescriptor
+ if public {
+ deps = fd.publicDeps
+ } else {
+ deps = fd.deps
+ }
+ for _, dep := range deps {
+ d = findSymbol(dep, name, true)
+ if d != nil {
+ return d
+ }
+ }
+
+ return nil
+}
+
+func merge(a, b string) string {
+ if a == "" {
+ return b
+ } else {
+ return a + "." + b
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
new file mode 100644
index 0000000..d8e2df0
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
@@ -0,0 +1,30 @@
+//+build appengine
+// TODO: other build tags for environments where unsafe package is inappropriate
+
+package desc
+
+type jsonNameMap struct{}
+type memoizedDefault struct{}
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+ // NB: With allowed use of unsafe, we use it to atomically define an index
+ // via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index
+ // and do an linear scan of fields each time.
+ for _, f := range md.fields {
+ jn := f.proto.GetJsonName()
+ if jn == "" {
+ jn = f.proto.GetName()
+ }
+ if jn == jsonName {
+ return f
+ }
+ }
+ return nil
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+ return fd.determineDefault()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
new file mode 100644
index 0000000..6ff872f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
@@ -0,0 +1,59 @@
+//+build !appengine
+// TODO: exclude other build tags for environments where unsafe package is inappropriate
+
+package desc
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe
+type memoizedDefault *interface{} // loaded/stored atomically via atomic+unsafe
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+ // NB: We don't want to eagerly index JSON names because many programs won't use it.
+ // So we want to do it lazily, but also make sure the result is thread-safe. So we
+ // atomically load/store the map as if it were a normal pointer. We don't use other
+ // mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to
+ // do this lazily because those types cannot be copied, and we'd rather not induce
+ // 'go vet' errors in programs that use descriptors and try to copy them.
+ // If multiple goroutines try to access the index at the same time, before it is
+ // built, they will all end up computing the index redundantly. Future reads of
+ // the index will use whatever was the "last one stored" by those racing goroutines.
+ // Since building the index is deterministic, this is fine: all indices computed
+ // will be the same.
+ addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames))
+ jsonNames := atomic.LoadPointer(addrOfJsonNames)
+ var index map[string]*FieldDescriptor
+ if jsonNames == nil {
+ // slow path: compute the index
+ index = map[string]*FieldDescriptor{}
+ for _, f := range md.fields {
+ jn := f.proto.GetJsonName()
+ if jn == "" {
+ jn = f.proto.GetName()
+ }
+ index[jn] = f
+ }
+ atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index)))
+ } else {
+ *(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames
+ }
+ return index[jsonName]
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+ addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def))
+ def := atomic.LoadPointer(addrOfDef)
+ if def != nil {
+ return *(*interface{})(def)
+ }
+ // slow path: compute the default, potentially involves decoding value
+ d := fd.determineDefault()
+ atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d)))
+ return d
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
new file mode 100644
index 0000000..1740dce
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -0,0 +1,41 @@
+// Package desc contains "rich descriptors" for protocol buffers. The built-in
+// descriptor types are simple protobuf messages, each one representing a
+// different kind of element in the AST of a .proto source file.
+//
+// Because of this inherent "tree" quality, these build-in descriptors cannot
+// refer to their enclosing file descriptor. Nor can a field descriptor refer to
+// a message or enum descriptor that represents the field's type (for enum and
+// nested message fields). All such links must instead be stringly typed. This
+// limitation makes them much harder to use for doing interesting things with
+// reflection.
+//
+// Without this package, resolving references to types is particularly complex.
+// For example, resolving a field's type, the message type an extension extends,
+// or the request and response types of an RPC method all require searching
+// through symbols defined not only in the file in which these elements are
+// declared but also in its transitive closure of dependencies.
+//
+// "Rich descriptors" avoid the need to deal with the complexities described
+// above. A rich descriptor has all type references resolved and provides
+// methods to access other rich descriptors for all referenced elements. Each
+// rich descriptor has a usefully broad API, but does not try to mimic the full
+// interface of the underlying descriptor proto. Instead, every rich descriptor
+// provides access to that underlying proto, for extracting descriptor
+// properties that are not immediately accessible through rich descriptor's
+// methods.
+//
+// Rich descriptors can be accessed in similar ways as their "poor" cousins
+// (descriptor protos). Instead of using proto.FileDescriptor, use
+// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
+// can also be easily accessed using desc.LoadMessageDescriptor and
+// desc.LoadFieldDescriptorForExtension, respectively.
+//
+// It is also possible create rich descriptors for proto messages that a given
+// Go program doesn't even know about. For example, they could be loaded from a
+// FileDescriptorSet file (which can be generated by protoc) or loaded from a
+// server. This enables interesting things like dynamic clients: where a Go
+// program can be an RPC client of a service it wasn't compiled to know about.
+//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
new file mode 100644
index 0000000..caf3277
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -0,0 +1,306 @@
+package desc
+
+import (
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+var (
+ globalImportPathConf map[string]string
+ globalImportPathMu sync.RWMutex
+)
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path. For more details on why alternate import paths may need to
+// be configured, see ImportResolver.
+//
+// This method panics if provided invalid input. An empty importPath is invalid.
+// An un-registered registerPath is also invalid. For example, if an attempt is
+// made to register the import path "foo/bar.proto" as "bar.proto", but there is
+// no "bar.proto" registered in the Go protobuf runtime, this method will panic.
+// This method also panics if an attempt is made to register the same import
+// path more than once.
+//
+// This function works globally, applying to all descriptors loaded by this
+// package. If you instead want more granular support for handling alternate
+// import paths -- such as for a single invocation of a function in this
+// package or when the alternate path is only used from one file (so you don't
+// want the alternate path used when loading every other file), use an
+// ImportResolver instead.
+func RegisterImportPath(registerPath, importPath string) {
+ if len(importPath) == 0 {
+ panic("import path cannot be empty")
+ }
+ desc := proto.FileDescriptor(registerPath)
+ if len(desc) == 0 {
+ panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
+ }
+ globalImportPathMu.Lock()
+ defer globalImportPathMu.Unlock()
+ if reg := globalImportPathConf[importPath]; reg != "" {
+ panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg))
+ }
+ if globalImportPathConf == nil {
+ globalImportPathConf = map[string]string{}
+ }
+ globalImportPathConf[importPath] = registerPath
+}
+
+// ResolveImport resolves the given import path. If it has been registered as an
+// alternate via RegisterImportPath, the registered path is returned. Otherwise,
+// the given import path is returned unchanged.
+func ResolveImport(importPath string) string {
+ importPath = clean(importPath)
+ globalImportPathMu.RLock()
+ defer globalImportPathMu.RUnlock()
+ reg := globalImportPathConf[importPath]
+ if reg == "" {
+ return importPath
+ }
+ return reg
+}
+
+// ImportResolver lets you work-around linking issues that are caused by
+// mismatches between how a particular proto source file is registered in the Go
+// protobuf runtime and how that same file is imported by other files. The file
+// is registered using the same relative path given to protoc when the file is
+// compiled (i.e. when Go code is generated). So if any file tries to import
+// that source file, but using a different relative path, then a link error will
+// occur when this package tries to load a descriptor for the importing file.
+//
+// For example, let's say we have two proto source files: "foo/bar.proto" and
+// "fubar/baz.proto". The latter imports the former using a line like so:
+// import "foo/bar.proto";
+// However, when protoc is invoked, the command-line args looks like so:
+// protoc -Ifoo/ --go_out=foo/ bar.proto
+// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
+// how they are registered in the Go protobuf runtime. So, when loading the
+// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
+// but will find no file registered with that path:
+// fd, err := desc.LoadFileDescriptor("baz.proto")
+// // err will be non-nil, complaining that there is no such file
+// // found named "foo/bar.proto"
+//
+// This can be remedied by registering alternate import paths using an
+// ImportResolver. Continuing with the example above, the code below would fix
+// any link issue:
+// var r desc.ImportResolver
+// r.RegisterImportPath("bar.proto", "foo/bar.proto")
+// fd, err := r.LoadFileDescriptor("baz.proto")
+// // err will be nil; descriptor successfully loaded!
+//
+// If there are files that are *always* imported using a different relative
+// path then how they are registered, consider using the global
+// RegisterImportPath function, so you don't have to use an ImportResolver for
+// every file that imports it.
+type ImportResolver struct {
+ children map[string]*ImportResolver
+ importPaths map[string]string
+
+ // By default, an ImportResolver will fallback to consulting any paths
+ // registered via the top-level RegisterImportPath function. Setting this
+ // field to true will cause the ImportResolver to skip that fallback and
+ // only examine its own locally registered paths.
+ SkipFallbackRules bool
+}
+
+// ResolveImport resolves the given import path in the context of the given
+// source file. If a matching alternate has been registered with this resolver
+// via a call to RegisterImportPath or RegisterImportPathFrom, then the
+// registered path is returned. Otherwise, the given import path is returned
+// unchanged.
+func (r *ImportResolver) ResolveImport(source, importPath string) string {
+ if r != nil {
+ res := r.resolveImport(clean(source), clean(importPath))
+ if res != "" {
+ return res
+ }
+ if r.SkipFallbackRules {
+ return importPath
+ }
+ }
+ return ResolveImport(importPath)
+}
+
+func (r *ImportResolver) resolveImport(source, importPath string) string {
+ if source == "" {
+ return r.importPaths[importPath]
+ }
+ var car, cdr string
+ idx := strings.IndexRune(source, filepath.Separator)
+ if idx < 0 {
+ car, cdr = source, ""
+ } else {
+ car, cdr = source[:idx], source[idx+1:]
+ }
+ ch := r.children[car]
+ if ch != nil {
+ if reg := ch.resolveImport(cdr, importPath); reg != "" {
+ return reg
+ }
+ }
+ return r.importPaths[importPath]
+}
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path with this resolver. Any appearance of the given import path
+// when linking files will instead try to link the given registered path. If the
+// registered path cannot be located, then linking will fallback to the actual
+// imported path.
+//
+// This method will panic if given an empty path or if the same import path is
+// registered more than once.
+//
+// To constrain the contexts where the given import path is to be re-written,
+// use RegisterImportPathFrom instead.
+func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) {
+ r.RegisterImportPathFrom(registerPath, importPath, "")
+}
+
+// RegisterImportPathFrom registers an alternate import path for a given
+// registered proto file path with this resolver, but only for imports in the
+// specified source context.
+//
+// The source context can be the name of a folder or a proto source file. Any
+// appearance of the given import path in that context will instead try to link
+// the given registered path. To be in context, the file that is being linked
+// (i.e. the one whose import statement is being resolved) must be the same
+// relative path of the source context or be a sub-path (i.e. a descendant of
+// the source folder).
+//
+// If the registered path cannot be located, then linking will fallback to the
+// actual imported path.
+//
+// This method will panic if given an empty path. The source context, on the
+// other hand, is allowed to be blank. A blank source matches all files. This
+// method also panics if the same import path is registered in the same source
+// context more than once.
+func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) {
+ importPath = clean(importPath)
+ if len(importPath) == 0 {
+ panic("import path cannot be empty")
+ }
+ registerPath = clean(registerPath)
+ if len(registerPath) == 0 {
+ panic("registered path cannot be empty")
+ }
+ r.registerImportPathFrom(registerPath, importPath, clean(source))
+}
+
+func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) {
+ if source == "" {
+ if r.importPaths == nil {
+ r.importPaths = map[string]string{}
+ } else if reg := r.importPaths[importPath]; reg != "" {
+ panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath))
+ }
+ r.importPaths[importPath] = registerPath
+ return
+ }
+ var car, cdr string
+ idx := strings.IndexRune(source, filepath.Separator)
+ if idx < 0 {
+ car, cdr = source, ""
+ } else {
+ car, cdr = source[:idx], source[idx+1:]
+ }
+ ch := r.children[car]
+ if ch == nil {
+ if r.children == nil {
+ r.children = map[string]*ImportResolver{}
+ }
+ ch = &ImportResolver{}
+ r.children[car] = ch
+ }
+ ch.registerImportPathFrom(registerPath, importPath, cdr)
+}
+
+// LoadFileDescriptor is the same as the package function of the same name, but
+// any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
+ return loadFileDescriptor(filePath, r)
+}
+
+// LoadMessageDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking
+// files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
+ return loadMessageDescriptor(msgName, r)
+}
+
+// LoadMessageDescriptorForMessage is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForMessage(msg, r)
+}
+
+// LoadMessageDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForType(msgType, r)
+}
+
+// LoadEnumDescriptorForEnum is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForEnum(enum, r)
+}
+
+// LoadEnumDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForType(enumType, r)
+}
+
+// LoadFieldDescriptorForExtension is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+ return loadFieldDescriptorForExtension(ext, r)
+}
+
+// CreateFileDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+ return createFileDescriptor(fdp, deps, r)
+}
+
+// CreateFileDescriptors is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor protos.
+func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+ return createFileDescriptors(fds, r)
+}
+
+// CreateFileDescriptorFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+ return createFileDescriptorFromSet(fds, r)
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+
+func clean(path string) string {
+ if path == "" {
+ return ""
+ }
+ path = filepath.Clean(path)
+ if path == "." {
+ return ""
+ }
+ return strings.TrimPrefix(path, dotPrefix)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
new file mode 100644
index 0000000..4d7dbae
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -0,0 +1,96 @@
+package internal
+
+import (
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// SourceInfoMap is a map of paths in a descriptor to the corresponding source
+// code info.
+type SourceInfoMap map[string]*dpb.SourceCodeInfo_Location
+
+// Get returns the source code info for the given path.
+func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+ return m[asMapKey(path)]
+}
+
+// Put stores the given source code info for the given path.
+func (m SourceInfoMap) Put(path []int32, loc *dpb.SourceCodeInfo_Location) {
+ m[asMapKey(path)] = loc
+}
+
+// PutIfAbsent stores the given source code info for the given path only if the
+// given path does not exist in the map. This method returns true when the value
+// is stored, false if the path already exists.
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+ k := asMapKey(path)
+ if _, ok := m[k]; ok {
+ return false
+ }
+ m[k] = loc
+ return true
+}
+
+func asMapKey(slice []int32) string {
+ // NB: arrays should be usable as map keys, but this does not
+ // work due to a bug: https://github.com/golang/go/issues/22605
+ //rv := reflect.ValueOf(slice)
+ //arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem())
+ //array := reflect.New(arrayType).Elem()
+ //reflect.Copy(array, rv)
+ //return array.Interface()
+
+ b := make([]byte, len(slice)*4)
+ for i, s := range slice {
+ j := i * 4
+ b[j] = byte(s)
+ b[j+1] = byte(s >> 8)
+ b[j+2] = byte(s >> 16)
+ b[j+3] = byte(s >> 24)
+ }
+ return string(b)
+}
+
+// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
+// source code info in the given file descriptor proto.
+func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+ res := SourceInfoMap{}
+ PopulateSourceInfoMap(fd, res)
+ return res
+}
+
+// PopulateSourceInfoMap populates the given SourceInfoMap with information from
+// the given file descriptor.
+func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+ for _, l := range fd.GetSourceCodeInfo().GetLocation() {
+ m.Put(l.Path, l)
+ }
+}
+
+// NB: This wonkiness allows desc.Descriptor impl to implement an interface that
+// is only usable from this package, by embedding a SourceInfoComputeFunc that
+// implements the actual logic (which must live in desc package to avoid a
+// dependency cycle).
+
+// SourceInfoComputer is a single method which will be invoked to recompute
+// source info. This is needed for the protoparse package, which needs to link
+// descriptors without source info in order to interpret options, but then needs
+// to re-compute source info after that interpretation so that final linked
+// descriptors expose the right info.
+type SourceInfoComputer interface {
+ recomputeSourceInfo()
+}
+
+// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will
+// be aliased in the desc package to an unexported name so it is not marked as
+// an exported field in reflection and not present in Go docs.
+type SourceInfoComputeFunc func()
+
+func (f SourceInfoComputeFunc) recomputeSourceInfo() {
+ f()
+}
+
+// RecomputeSourceInfo is used to initiate recomputation of source info. This is
+// is used by the protoparse package, after it interprets options.
+func RecomputeSourceInfo(c SourceInfoComputer) {
+ c.recomputeSourceInfo()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
new file mode 100644
index 0000000..d5197f1
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -0,0 +1,267 @@
+package internal
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ // MaxTag is the maximum allowed tag number for a field.
+ MaxTag = 536870911 // 2^29 - 1
+
+ // SpecialReservedStart is the first tag in a range that is reserved and not
+ // allowed for use in message definitions.
+ SpecialReservedStart = 19000
+ // SpecialReservedEnd is the last tag in a range that is reserved and not
+ // allowed for use in message definitions.
+ SpecialReservedEnd = 19999
+
+ // NB: It would be nice to use constants from generated code instead of
+ // hard-coding these here. But code-gen does not emit these as constants
+ // anywhere. The only places they appear in generated code are struct tags
+ // on fields of the generated descriptor protos.
+
+ // File_packageTag is the tag number of the package element in a file
+ // descriptor proto.
+ File_packageTag = 2
+ // File_dependencyTag is the tag number of the dependencies element in a
+ // file descriptor proto.
+ File_dependencyTag = 3
+ // File_messagesTag is the tag number of the messages element in a file
+ // descriptor proto.
+ File_messagesTag = 4
+ // File_enumsTag is the tag number of the enums element in a file descriptor
+ // proto.
+ File_enumsTag = 5
+ // File_servicesTag is the tag number of the services element in a file
+ // descriptor proto.
+ File_servicesTag = 6
+ // File_extensionsTag is the tag number of the extensions element in a file
+ // descriptor proto.
+ File_extensionsTag = 7
+ // File_optionsTag is the tag number of the options element in a file
+ // descriptor proto.
+ File_optionsTag = 8
+ // File_syntaxTag is the tag number of the syntax element in a file
+ // descriptor proto.
+ File_syntaxTag = 12
+ // Message_nameTag is the tag number of the name element in a message
+ // descriptor proto.
+ Message_nameTag = 1
+ // Message_fieldsTag is the tag number of the fields element in a message
+ // descriptor proto.
+ Message_fieldsTag = 2
+ // Message_nestedMessagesTag is the tag number of the nested messages
+ // element in a message descriptor proto.
+ Message_nestedMessagesTag = 3
+ // Message_enumsTag is the tag number of the enums element in a message
+ // descriptor proto.
+ Message_enumsTag = 4
+ // Message_extensionRangeTag is the tag number of the extension ranges
+ // element in a message descriptor proto.
+ Message_extensionRangeTag = 5
+ // Message_extensionsTag is the tag number of the extensions element in a
+ // message descriptor proto.
+ Message_extensionsTag = 6
+ // Message_optionsTag is the tag number of the options element in a message
+ // descriptor proto.
+ Message_optionsTag = 7
+ // Message_oneOfsTag is the tag number of the one-ofs element in a message
+ // descriptor proto.
+ Message_oneOfsTag = 8
+ // Message_reservedRangeTag is the tag number of the reserved ranges element
+ // in a message descriptor proto.
+ Message_reservedRangeTag = 9
+ // Message_reservedNameTag is the tag number of the reserved names element
+ // in a message descriptor proto.
+ Message_reservedNameTag = 10
+ // ExtensionRange_startTag is the tag number of the start index in an
+ // extension range proto.
+ ExtensionRange_startTag = 1
+ // ExtensionRange_endTag is the tag number of the end index in an
+ // extension range proto.
+ ExtensionRange_endTag = 2
+ // ExtensionRange_optionsTag is the tag number of the options element in an
+ // extension range proto.
+ ExtensionRange_optionsTag = 3
+ // ReservedRange_startTag is the tag number of the start index in a reserved
+ // range proto.
+ ReservedRange_startTag = 1
+ // ReservedRange_endTag is the tag number of the end index in a reserved
+ // range proto.
+ ReservedRange_endTag = 2
+ // Field_nameTag is the tag number of the name element in a field descriptor
+ // proto.
+ Field_nameTag = 1
+ // Field_extendeeTag is the tag number of the extendee element in a field
+ // descriptor proto.
+ Field_extendeeTag = 2
+ // Field_numberTag is the tag number of the number element in a field
+ // descriptor proto.
+ Field_numberTag = 3
+ // Field_labelTag is the tag number of the label element in a field
+ // descriptor proto.
+ Field_labelTag = 4
+ // Field_typeTag is the tag number of the type element in a field descriptor
+ // proto.
+ Field_typeTag = 5
+ // Field_defaultTag is the tag number of the default value element in a
+ // field descriptor proto.
+ Field_defaultTag = 7
+ // Field_optionsTag is the tag number of the options element in a field
+ // descriptor proto.
+ Field_optionsTag = 8
+ // Field_jsonNameTag is the tag number of the JSON name element in a field
+ // descriptor proto.
+ Field_jsonNameTag = 10
+ // OneOf_nameTag is the tag number of the name element in a one-of
+ // descriptor proto.
+ OneOf_nameTag = 1
+ // OneOf_optionsTag is the tag number of the options element in a one-of
+ // descriptor proto.
+ OneOf_optionsTag = 2
+ // Enum_nameTag is the tag number of the name element in an enum descriptor
+ // proto.
+ Enum_nameTag = 1
+ // Enum_valuesTag is the tag number of the values element in an enum
+ // descriptor proto.
+ Enum_valuesTag = 2
+ // Enum_optionsTag is the tag number of the options element in an enum
+ // descriptor proto.
+ Enum_optionsTag = 3
+ // Enum_reservedRangeTag is the tag number of the reserved ranges element in
+ // an enum descriptor proto.
+ Enum_reservedRangeTag = 4
+ // Enum_reservedNameTag is the tag number of the reserved names element in
+ // an enum descriptor proto.
+ Enum_reservedNameTag = 5
+ // EnumVal_nameTag is the tag number of the name element in an enum value
+ // descriptor proto.
+ EnumVal_nameTag = 1
+ // EnumVal_numberTag is the tag number of the number element in an enum
+ // value descriptor proto.
+ EnumVal_numberTag = 2
+ // EnumVal_optionsTag is the tag number of the options element in an enum
+ // value descriptor proto.
+ EnumVal_optionsTag = 3
+ // Service_nameTag is the tag number of the name element in a service
+ // descriptor proto.
+ Service_nameTag = 1
+ // Service_methodsTag is the tag number of the methods element in a service
+ // descriptor proto.
+ Service_methodsTag = 2
+ // Service_optionsTag is the tag number of the options element in a service
+ // descriptor proto.
+ Service_optionsTag = 3
+ // Method_nameTag is the tag number of the name element in a method
+ // descriptor proto.
+ Method_nameTag = 1
+ // Method_inputTag is the tag number of the input type element in a method
+ // descriptor proto.
+ Method_inputTag = 2
+ // Method_outputTag is the tag number of the output type element in a method
+ // descriptor proto.
+ Method_outputTag = 3
+ // Method_optionsTag is the tag number of the options element in a method
+ // descriptor proto.
+ Method_optionsTag = 4
+ // Method_inputStreamTag is the tag number of the input stream flag in a
+ // method descriptor proto.
+ Method_inputStreamTag = 5
+ // Method_outputStreamTag is the tag number of the output stream flag in a
+ // method descriptor proto.
+ Method_outputStreamTag = 6
+
+ // UninterpretedOptionsTag is the tag number of the uninterpreted options
+ // element. All *Options messages use the same tag for the field that stores
+ // uninterpreted options.
+ UninterpretedOptionsTag = 999
+
+ // Uninterpreted_nameTag is the tag number of the name element in an
+ // uninterpreted options proto.
+ Uninterpreted_nameTag = 2
+ // Uninterpreted_identTag is the tag number of the identifier value in an
+ // uninterpreted options proto.
+ Uninterpreted_identTag = 3
+ // Uninterpreted_posIntTag is the tag number of the positive int value in an
+ // uninterpreted options proto.
+ Uninterpreted_posIntTag = 4
+ // Uninterpreted_negIntTag is the tag number of the negative int value in an
+ // uninterpreted options proto.
+ Uninterpreted_negIntTag = 5
+ // Uninterpreted_doubleTag is the tag number of the double value in an
+ // uninterpreted options proto.
+ Uninterpreted_doubleTag = 6
+ // Uninterpreted_stringTag is the tag number of the string value in an
+ // uninterpreted options proto.
+ Uninterpreted_stringTag = 7
+ // Uninterpreted_aggregateTag is the tag number of the aggregate value in an
+ // uninterpreted options proto.
+ Uninterpreted_aggregateTag = 8
+ // UninterpretedName_nameTag is the tag number of the name element in an
+ // uninterpreted option name proto.
+ UninterpretedName_nameTag = 1
+)
+
+// JsonName returns the default JSON name for a field with the given name.
+func JsonName(name string) string {
+ var js []rune
+ nextUpper := false
+ for i, r := range name {
+ if r == '_' {
+ nextUpper = true
+ continue
+ }
+ if i == 0 {
+ js = append(js, r)
+ } else if nextUpper {
+ nextUpper = false
+ js = append(js, unicode.ToUpper(r))
+ } else {
+ js = append(js, r)
+ }
+ }
+ return string(js)
+}
+
+// InitCap returns the given field name, but with the first letter capitalized.
+func InitCap(name string) string {
+ r, sz := utf8.DecodeRuneInString(name)
+ return string(unicode.ToUpper(r)) + name[sz:]
+}
+
+// CreatePrefixList returns a list of package prefixes to search when resolving
+// a symbol name. If the given package is blank, it returns only the empty
+// string. If the given package contains only one token, e.g. "foo", it returns
+// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
+// successively shorter prefixes of the package and then the empty string. For
+// example, for a package named "foo.bar.baz" it will return the following list:
+// ["foo.bar.baz", "foo.bar", "foo", ""]
+func CreatePrefixList(pkg string) []string {
+ if pkg == "" {
+ return []string{""}
+ }
+
+ numDots := 0
+ // one pass to pre-allocate the returned slice
+ for i := 0; i < len(pkg); i++ {
+ if pkg[i] == '.' {
+ numDots++
+ }
+ }
+ if numDots == 0 {
+ return []string{pkg, ""}
+ }
+
+ prefixes := make([]string, numDots+2)
+ // second pass to fill in returned slice
+ for i := 0; i < len(pkg); i++ {
+ if pkg[i] == '.' {
+ prefixes[numDots] = pkg[:i]
+ numDots--
+ }
+ }
+ prefixes[0] = pkg
+
+ return prefixes
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
new file mode 100644
index 0000000..4a05830
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -0,0 +1,341 @@
+package desc
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/internal"
+)
+
+var (
+ cacheMu sync.RWMutex
+ filesCache = map[string]*FileDescriptor{}
+ messagesCache = map[string]*MessageDescriptor{}
+ enumCache = map[reflect.Type]*EnumDescriptor{}
+)
+
+// LoadFileDescriptor creates a file descriptor using the bytes returned by
+// proto.FileDescriptor. Descriptors are cached so that they do not need to be
+// re-processed if the same file is fetched again later.
+func LoadFileDescriptor(file string) (*FileDescriptor, error) {
+ return loadFileDescriptor(file, nil)
+}
+
+func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
+ f := getFileFromCache(file)
+ if f != nil {
+ return f, nil
+ }
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadFileDescriptorLocked(file, r)
+}
+
+func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
+ f := filesCache[file]
+ if f != nil {
+ return f, nil
+ }
+ fd, err := internal.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err = toFileDescriptorLocked(fd, r)
+ if err != nil {
+ return nil, err
+ }
+ putCacheLocked(file, f)
+ return f, nil
+}
+
+func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
+ deps := make([]*FileDescriptor, len(fd.GetDependency()))
+ for i, dep := range fd.GetDependency() {
+ resolvedDep := r.ResolveImport(fd.GetName(), dep)
+ var err error
+ deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
+ if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
+ // try original path
+ deps[i], err = loadFileDescriptorLocked(dep, r)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return CreateFileDescriptor(fd, deps...)
+}
+
+func getFileFromCache(file string) *FileDescriptor {
+ cacheMu.RLock()
+ defer cacheMu.RUnlock()
+ return filesCache[file]
+}
+
+func putCacheLocked(filename string, fd *FileDescriptor) {
+ filesCache[filename] = fd
+ putMessageCacheLocked(fd.messages)
+}
+
+func putMessageCacheLocked(mds []*MessageDescriptor) {
+ for _, md := range mds {
+ messagesCache[md.fqn] = md
+ putMessageCacheLocked(md.nested)
+ }
+}
+
+// interface implemented by generated messages, which all have a Descriptor() method in
+// addition to the methods of proto.Message
+type protoMessage interface {
+ proto.Message
+ Descriptor() ([]byte, []int)
+}
+
+// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
+// Message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
+ return loadMessageDescriptor(message, nil)
+}
+
+func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
+ m := getMessageFromCache(message)
+ if m != nil {
+ return m, nil
+ }
+
+ pt := proto.MessageType(message)
+ if pt == nil {
+ return nil, nil
+ }
+ msg, err := messageFromType(pt)
+ if err != nil {
+ return nil, err
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadMessageDescriptorForTypeLocked(message, msg, r)
+}
+
+// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForType(messageType, nil)
+}
+
+func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
+ m, err := messageFromType(messageType)
+ if err != nil {
+ return nil, err
+ }
+ return loadMessageDescriptorForMessage(m, r)
+}
+
+// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
+// returned by message.Descriptor(). If the given type is not recognized, then a nil
+// descriptor is returned.
+func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForMessage(message, nil)
+}
+
+func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
+ // efficiently handle dynamic messages
+ type descriptorable interface {
+ GetMessageDescriptor() *MessageDescriptor
+ }
+ if d, ok := message.(descriptorable); ok {
+ return d.GetMessageDescriptor(), nil
+ }
+
+ name := proto.MessageName(message)
+ if name == "" {
+ return nil, nil
+ }
+ m := getMessageFromCache(name)
+ if m != nil {
+ return m, nil
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+}
+
+func messageFromType(mt reflect.Type) (protoMessage, error) {
+ if mt.Kind() != reflect.Ptr {
+ mt = reflect.PtrTo(mt)
+ }
+ m, ok := reflect.Zero(mt).Interface().(protoMessage)
+ if !ok {
+ return nil, fmt.Errorf("failed to create message from type: %v", mt)
+ }
+ return m, nil
+}
+
+func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
+ m := messagesCache[name]
+ if m != nil {
+ return m, nil
+ }
+
+ fdb, _ := message.Descriptor()
+ fd, err := internal.DecodeFileDescriptor(name, fdb)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := toFileDescriptorLocked(fd, r)
+ if err != nil {
+ return nil, err
+ }
+ putCacheLocked(fd.GetName(), f)
+ return f.FindSymbol(name).(*MessageDescriptor), nil
+}
+
+func getMessageFromCache(message string) *MessageDescriptor {
+ cacheMu.RLock()
+ defer cacheMu.RUnlock()
+ return messagesCache[message]
+}
+
+// interface implemented by all generated enums
+type protoEnum interface {
+ EnumDescriptor() ([]byte, []int)
+}
+
+// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because
+// it is not useful since protoc-gen-go does not expose the name anywhere in generated
+// code or register it in a way that is it accessible for reflection code. This also
+// means we have to cache enum descriptors differently -- we can only cache them as
+// they are requested, as opposed to caching all enum types whenever a file descriptor
+// is cached. This is because we need to know the generated type of the enums, and we
+// don't know that at the time of caching file descriptors.
+
+// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by enum.EnumDescriptor() for the given enum type.
+func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForType(enumType, nil)
+}
+
+func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
+ // we cache descriptors using non-pointer type
+ if enumType.Kind() == reflect.Ptr {
+ enumType = enumType.Elem()
+ }
+ e := getEnumFromCache(enumType)
+ if e != nil {
+ return e, nil
+ }
+ enum, err := enumFromType(enumType)
+ if err != nil {
+ return nil, err
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+}
+
+// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
+// returned by enum.EnumDescriptor().
+func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForEnum(enum, nil)
+}
+
+func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+ et := reflect.TypeOf(enum)
+ // we cache descriptors using non-pointer type
+ if et.Kind() == reflect.Ptr {
+ et = et.Elem()
+ enum = reflect.Zero(et).Interface().(protoEnum)
+ }
+ e := getEnumFromCache(et)
+ if e != nil {
+ return e, nil
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadEnumDescriptorForTypeLocked(et, enum, r)
+}
+
+func enumFromType(et reflect.Type) (protoEnum, error) {
+ if et.Kind() != reflect.Int32 {
+ et = reflect.PtrTo(et)
+ }
+ e, ok := reflect.Zero(et).Interface().(protoEnum)
+ if !ok {
+ return nil, fmt.Errorf("failed to create enum from type: %v", et)
+ }
+ return e, nil
+}
+
+func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+ e := enumCache[et]
+ if e != nil {
+ return e, nil
+ }
+
+ fdb, path := enum.EnumDescriptor()
+ name := fmt.Sprintf("%v", et)
+ fd, err := internal.DecodeFileDescriptor(name, fdb)
+ if err != nil {
+ return nil, err
+ }
+ // see if we already have cached "rich" descriptor
+ f, ok := filesCache[fd.GetName()]
+ if !ok {
+ f, err = toFileDescriptorLocked(fd, r)
+ if err != nil {
+ return nil, err
+ }
+ putCacheLocked(fd.GetName(), f)
+ }
+
+ ed := findEnum(f, path)
+ enumCache[et] = ed
+ return ed, nil
+}
+
+func getEnumFromCache(et reflect.Type) *EnumDescriptor {
+ cacheMu.RLock()
+ defer cacheMu.RUnlock()
+ return enumCache[et]
+}
+
+func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
+ if len(path) == 1 {
+ return fd.GetEnumTypes()[path[0]]
+ }
+ md := fd.GetMessageTypes()[path[0]]
+ for _, i := range path[1 : len(path)-1] {
+ md = md.GetNestedMessageTypes()[i]
+ }
+ return md.GetNestedEnumTypes()[path[len(path)-1]]
+}
+
+// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
+// extension description.
+func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+ return loadFieldDescriptorForExtension(ext, nil)
+}
+
+func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
+ file, err := loadFileDescriptor(ext.Filename, r)
+ if err != nil {
+ return nil, err
+ }
+ field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor)
+ // make sure descriptor agrees with attributes of the ExtensionDesc
+ if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) ||
+ field.GetNumber() != ext.Field {
+ return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name)
+ }
+ return field, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore b/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore
new file mode 100644
index 0000000..2652053
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore
@@ -0,0 +1 @@
+y.output
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
new file mode 100644
index 0000000..2499917
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
@@ -0,0 +1,1081 @@
+package protoparse
+
+import "fmt"
+
+// This file defines all of the nodes in the proto AST.
+
+// ErrorWithSourcePos is an error about a proto source file includes information
+// about the location in the file that caused the error.
+type ErrorWithSourcePos struct {
+ Underlying error
+ Pos *SourcePos
+}
+
+// Error implements the error interface
+func (e ErrorWithSourcePos) Error() string {
+ if e.Pos.Line <= 0 || e.Pos.Col <= 0 {
+ return fmt.Sprintf("%s: %v", e.Pos.Filename, e.Underlying)
+ }
+ return fmt.Sprintf("%s:%d:%d: %v", e.Pos.Filename, e.Pos.Line, e.Pos.Col, e.Underlying)
+}
+
+// SourcePos identifies a location in a proto source file.
+type SourcePos struct {
+ Filename string
+ Line, Col int
+ Offset int
+}
+
+func unknownPos(filename string) *SourcePos {
+ return &SourcePos{Filename: filename}
+}
+
+type node interface {
+ start() *SourcePos
+ end() *SourcePos
+ leadingComments() []*comment
+ trailingComments() []*comment
+}
+
+type terminalNode interface {
+ node
+ popLeadingComment() *comment
+ pushTrailingComment(*comment)
+}
+
+var _ terminalNode = (*basicNode)(nil)
+var _ terminalNode = (*stringLiteralNode)(nil)
+var _ terminalNode = (*intLiteralNode)(nil)
+var _ terminalNode = (*floatLiteralNode)(nil)
+var _ terminalNode = (*identNode)(nil)
+
+type fileDecl interface {
+ node
+ getSyntax() node
+}
+
+var _ fileDecl = (*fileNode)(nil)
+var _ fileDecl = (*noSourceNode)(nil)
+
+type optionDecl interface {
+ node
+ getName() node
+ getValue() valueNode
+}
+
+var _ optionDecl = (*optionNode)(nil)
+var _ optionDecl = (*noSourceNode)(nil)
+
+type fieldDecl interface {
+ node
+ fieldLabel() node
+ fieldName() node
+ fieldType() node
+ fieldTag() node
+ fieldExtendee() node
+ getGroupKeyword() node
+}
+
+var _ fieldDecl = (*fieldNode)(nil)
+var _ fieldDecl = (*groupNode)(nil)
+var _ fieldDecl = (*mapFieldNode)(nil)
+var _ fieldDecl = (*syntheticMapField)(nil)
+var _ fieldDecl = (*noSourceNode)(nil)
+
+type rangeDecl interface {
+ node
+ rangeStart() node
+ rangeEnd() node
+}
+
+var _ rangeDecl = (*rangeNode)(nil)
+var _ rangeDecl = (*noSourceNode)(nil)
+
+type enumValueDecl interface {
+ node
+ getName() node
+ getNumber() node
+}
+
+var _ enumValueDecl = (*enumValueNode)(nil)
+var _ enumValueDecl = (*noSourceNode)(nil)
+
+type msgDecl interface {
+ node
+ messageName() node
+ reservedNames() []*stringLiteralNode
+}
+
+var _ msgDecl = (*messageNode)(nil)
+var _ msgDecl = (*groupNode)(nil)
+var _ msgDecl = (*mapFieldNode)(nil)
+var _ msgDecl = (*noSourceNode)(nil)
+
+type methodDecl interface {
+ node
+ getInputType() node
+ getOutputType() node
+}
+
+var _ methodDecl = (*methodNode)(nil)
+var _ methodDecl = (*noSourceNode)(nil)
+
+type posRange struct {
+ start, end *SourcePos
+}
+
+type basicNode struct {
+ posRange
+ leading []*comment
+ trailing []*comment
+}
+
+func (n *basicNode) start() *SourcePos {
+ return n.posRange.start
+}
+
+func (n *basicNode) end() *SourcePos {
+ return n.posRange.end
+}
+
+func (n *basicNode) leadingComments() []*comment {
+ return n.leading
+}
+
+func (n *basicNode) trailingComments() []*comment {
+ return n.trailing
+}
+
+func (n *basicNode) popLeadingComment() *comment {
+ c := n.leading[0]
+ n.leading = n.leading[1:]
+ return c
+}
+
+func (n *basicNode) pushTrailingComment(c *comment) {
+ n.trailing = append(n.trailing, c)
+}
+
+type comment struct {
+ posRange
+ text string
+}
+
+type basicCompositeNode struct {
+ first node
+ last node
+}
+
+func (n *basicCompositeNode) start() *SourcePos {
+ return n.first.start()
+}
+
+func (n *basicCompositeNode) end() *SourcePos {
+ return n.last.end()
+}
+
+func (n *basicCompositeNode) leadingComments() []*comment {
+ return n.first.leadingComments()
+}
+
+func (n *basicCompositeNode) trailingComments() []*comment {
+ return n.last.trailingComments()
+}
+
+func (n *basicCompositeNode) setRange(first, last node) {
+ n.first = first
+ n.last = last
+}
+
+type fileNode struct {
+ basicCompositeNode
+ syntax *syntaxNode
+ decls []*fileElement
+
+ // These fields are populated after parsing, to make it easier to find them
+ // without searching decls. The parse result has a map of descriptors to
+ // nodes which makes the other declarations easily discoverable. But these
+ // elements do not map to descriptors -- they are just stored as strings in
+ // the file descriptor.
+ imports []*importNode
+ pkg *packageNode
+}
+
+func (n *fileNode) getSyntax() node {
+ return n.syntax
+}
+
+type fileElement struct {
+ // a discriminated union: only one field will be set
+ imp *importNode
+ pkg *packageNode
+ option *optionNode
+ message *messageNode
+ enum *enumNode
+ extend *extendNode
+ service *serviceNode
+ empty *basicNode
+}
+
+func (n *fileElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *fileElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *fileElement) leadingComments() []*comment {
+ return n.get().leadingComments()
+}
+
+func (n *fileElement) trailingComments() []*comment {
+ return n.get().trailingComments()
+}
+
+func (n *fileElement) get() node {
+ switch {
+ case n.imp != nil:
+ return n.imp
+ case n.pkg != nil:
+ return n.pkg
+ case n.option != nil:
+ return n.option
+ case n.message != nil:
+ return n.message
+ case n.enum != nil:
+ return n.enum
+ case n.extend != nil:
+ return n.extend
+ case n.service != nil:
+ return n.service
+ default:
+ return n.empty
+ }
+}
+
+type syntaxNode struct {
+ basicCompositeNode
+ syntax *stringLiteralNode
+}
+
+type importNode struct {
+ basicCompositeNode
+ name *stringLiteralNode
+ public bool
+ weak bool
+}
+
+type packageNode struct {
+ basicCompositeNode
+ name *identNode
+}
+
+type identifier string
+
+type identKind int
+
+const (
+ identSimpleName identKind = iota
+ identQualified
+ identTypeName
+)
+
+type identNode struct {
+ basicNode
+ val string
+ kind identKind
+}
+
+func (n *identNode) value() interface{} {
+ return identifier(n.val)
+}
+
+type optionNode struct {
+ basicCompositeNode
+ name *optionNameNode
+ val valueNode
+}
+
+func (n *optionNode) getName() node {
+ return n.name
+}
+
+func (n *optionNode) getValue() valueNode {
+ return n.val
+}
+
+type optionNameNode struct {
+ basicCompositeNode
+ parts []*optionNamePartNode
+}
+
+type optionNamePartNode struct {
+ basicCompositeNode
+ text *identNode
+ offset int
+ length int
+ isExtension bool
+ st, en *SourcePos
+}
+
+func (n *optionNamePartNode) start() *SourcePos {
+ if n.isExtension {
+ return n.basicCompositeNode.start()
+ }
+ return n.st
+}
+
+func (n *optionNamePartNode) end() *SourcePos {
+ if n.isExtension {
+ return n.basicCompositeNode.end()
+ }
+ return n.en
+}
+
+func (n *optionNamePartNode) setRange(first, last node) {
+ n.basicCompositeNode.setRange(first, last)
+ if !n.isExtension {
+ st := *first.start()
+ st.Col += n.offset
+ n.st = &st
+ en := st
+ en.Col += n.length
+ n.en = &en
+ }
+}
+
+type valueNode interface {
+ node
+ value() interface{}
+}
+
+var _ valueNode = (*stringLiteralNode)(nil)
+var _ valueNode = (*intLiteralNode)(nil)
+var _ valueNode = (*negativeIntLiteralNode)(nil)
+var _ valueNode = (*floatLiteralNode)(nil)
+var _ valueNode = (*boolLiteralNode)(nil)
+var _ valueNode = (*sliceLiteralNode)(nil)
+var _ valueNode = (*aggregateLiteralNode)(nil)
+var _ valueNode = (*noSourceNode)(nil)
+
+type stringLiteralNode struct {
+ basicCompositeNode
+ val string
+}
+
+func (n *stringLiteralNode) value() interface{} {
+ return n.val
+}
+
+func (n *stringLiteralNode) popLeadingComment() *comment {
+ return n.first.(terminalNode).popLeadingComment()
+}
+
+func (n *stringLiteralNode) pushTrailingComment(c *comment) {
+ n.last.(terminalNode).pushTrailingComment(c)
+}
+
+type intLiteralNode struct {
+ basicNode
+ val uint64
+}
+
+func (n *intLiteralNode) value() interface{} {
+ return n.val
+}
+
+type negativeIntLiteralNode struct {
+ basicCompositeNode
+ val int64
+}
+
+func (n *negativeIntLiteralNode) value() interface{} {
+ return n.val
+}
+
+type floatLiteralNode struct {
+ basicCompositeNode
+ val float64
+}
+
+func (n *floatLiteralNode) value() interface{} {
+ return n.val
+}
+
+func (n *floatLiteralNode) popLeadingComment() *comment {
+ return n.first.(terminalNode).popLeadingComment()
+}
+
+func (n *floatLiteralNode) pushTrailingComment(c *comment) {
+ n.last.(terminalNode).pushTrailingComment(c)
+}
+
+type boolLiteralNode struct {
+ basicNode
+ val bool
+}
+
+func (n *boolLiteralNode) value() interface{} {
+ return n.val
+}
+
+type sliceLiteralNode struct {
+ basicCompositeNode
+ elements []valueNode
+}
+
+func (n *sliceLiteralNode) value() interface{} {
+ return n.elements
+}
+
+type aggregateLiteralNode struct {
+ basicCompositeNode
+ elements []*aggregateEntryNode
+}
+
+func (n *aggregateLiteralNode) value() interface{} {
+ return n.elements
+}
+
+type aggregateEntryNode struct {
+ basicCompositeNode
+ name *aggregateNameNode
+ val valueNode
+}
+
+type aggregateNameNode struct {
+ basicCompositeNode
+ name *identNode
+ isExtension bool
+}
+
+func (a *aggregateNameNode) value() string {
+ if a.isExtension {
+ return "[" + a.name.val + "]"
+ } else {
+ return a.name.val
+ }
+}
+
+type fieldNode struct {
+ basicCompositeNode
+ label *labelNode
+ fldType *identNode
+ name *identNode
+ tag *intLiteralNode
+ options []*optionNode
+
+ // This field is populated after parsing, to allow lookup of extendee source
+ // locations when field extendees cannot be linked. (Otherwise, this is just
+ // stored as a string in the field descriptors defined inside the extend
+ // block).
+ extendee *extendNode
+}
+
+func (n *fieldNode) fieldLabel() node {
+ // proto3 fields and fields inside one-ofs will not have a label and we need
+ // this check in order to return a nil node -- otherwise we'd return a
+ // non-nil node that has a nil pointer value in it :/
+ if n.label == nil {
+ return nil
+ }
+ return n.label
+}
+
+func (n *fieldNode) fieldName() node {
+ return n.name
+}
+
+func (n *fieldNode) fieldType() node {
+ return n.fldType
+}
+
+func (n *fieldNode) fieldTag() node {
+ return n.tag
+}
+
+func (n *fieldNode) fieldExtendee() node {
+ if n.extendee != nil {
+ return n.extendee.extendee
+ }
+ return nil
+}
+
+func (n *fieldNode) getGroupKeyword() node {
+ return nil
+}
+
+type labelNode struct {
+ basicNode
+ repeated bool
+ required bool
+}
+
+type groupNode struct {
+ basicCompositeNode
+ groupKeyword *identNode
+ label *labelNode
+ name *identNode
+ tag *intLiteralNode
+ decls []*messageElement
+
+ // This field is populated after parsing, to make it easier to find them
+ // without searching decls. The parse result has a map of descriptors to
+ // nodes which makes the other declarations easily discoverable. But these
+ // elements do not map to descriptors -- they are just stored as strings in
+ // the message descriptor.
+ reserved []*stringLiteralNode
+ // This field is populated after parsing, to allow lookup of extendee source
+ // locations when field extendees cannot be linked. (Otherwise, this is just
+ // stored as a string in the field descriptors defined inside the extend
+ // block).
+ extendee *extendNode
+}
+
+func (n *groupNode) fieldLabel() node {
+ return n.label
+}
+
+func (n *groupNode) fieldName() node {
+ return n.name
+}
+
+func (n *groupNode) fieldType() node {
+ return n.name
+}
+
+func (n *groupNode) fieldTag() node {
+ return n.tag
+}
+
+func (n *groupNode) fieldExtendee() node {
+ if n.extendee != nil {
+ return n.extendee.extendee
+ }
+ return nil
+}
+
+func (n *groupNode) getGroupKeyword() node {
+ return n.groupKeyword
+}
+
+func (n *groupNode) messageName() node {
+ return n.name
+}
+
+func (n *groupNode) reservedNames() []*stringLiteralNode {
+ return n.reserved
+}
+
+type oneOfNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*oneOfElement
+}
+
+type oneOfElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ field *fieldNode
+ empty *basicNode
+}
+
+func (n *oneOfElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *oneOfElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *oneOfElement) leadingComments() []*comment {
+ return n.get().leadingComments()
+}
+
+func (n *oneOfElement) trailingComments() []*comment {
+ return n.get().trailingComments()
+}
+
+func (n *oneOfElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.field != nil:
+ return n.field
+ default:
+ return n.empty
+ }
+}
+
+type mapFieldNode struct {
+ basicCompositeNode
+ mapKeyword *identNode
+ keyType *identNode
+ valueType *identNode
+ name *identNode
+ tag *intLiteralNode
+ options []*optionNode
+}
+
+func (n *mapFieldNode) fieldLabel() node {
+ return n.mapKeyword
+}
+
+func (n *mapFieldNode) fieldName() node {
+ return n.name
+}
+
+func (n *mapFieldNode) fieldType() node {
+ return n.mapKeyword
+}
+
+func (n *mapFieldNode) fieldTag() node {
+ return n.tag
+}
+
+func (n *mapFieldNode) fieldExtendee() node {
+ return nil
+}
+
+func (n *mapFieldNode) getGroupKeyword() node {
+ return nil
+}
+
+func (n *mapFieldNode) messageName() node {
+ return n.name
+}
+
+func (n *mapFieldNode) reservedNames() []*stringLiteralNode {
+ return nil
+}
+
+func (n *mapFieldNode) keyField() *syntheticMapField {
+ tag := &intLiteralNode{
+ basicNode: basicNode{
+ posRange: posRange{start: n.keyType.start(), end: n.keyType.end()},
+ },
+ val: 1,
+ }
+ return &syntheticMapField{ident: n.keyType, tag: tag}
+}
+
+func (n *mapFieldNode) valueField() *syntheticMapField {
+ tag := &intLiteralNode{
+ basicNode: basicNode{
+ posRange: posRange{start: n.valueType.start(), end: n.valueType.end()},
+ },
+ val: 2,
+ }
+ return &syntheticMapField{ident: n.valueType, tag: tag}
+}
+
+type syntheticMapField struct {
+ ident *identNode
+ tag *intLiteralNode
+}
+
+func (n *syntheticMapField) start() *SourcePos {
+ return n.ident.start()
+}
+
+func (n *syntheticMapField) end() *SourcePos {
+ return n.ident.end()
+}
+
+func (n *syntheticMapField) leadingComments() []*comment {
+ return nil
+}
+
+func (n *syntheticMapField) trailingComments() []*comment {
+ return nil
+}
+
+func (n *syntheticMapField) fieldLabel() node {
+ return n.ident
+}
+
+func (n *syntheticMapField) fieldName() node {
+ return n.ident
+}
+
+func (n *syntheticMapField) fieldType() node {
+ return n.ident
+}
+
+func (n *syntheticMapField) fieldTag() node {
+ return n.tag
+}
+
+func (n *syntheticMapField) fieldExtendee() node {
+ return nil
+}
+
+func (n *syntheticMapField) getGroupKeyword() node {
+ return nil
+}
+
+type extensionRangeNode struct {
+ basicCompositeNode
+ ranges []*rangeNode
+ options []*optionNode
+}
+
+type rangeNode struct {
+ basicCompositeNode
+ stNode, enNode node
+ st, en int32
+}
+
+func (n *rangeNode) rangeStart() node {
+ return n.stNode
+}
+
+func (n *rangeNode) rangeEnd() node {
+ return n.enNode
+}
+
+type reservedNode struct {
+ basicCompositeNode
+ ranges []*rangeNode
+ names []*stringLiteralNode
+}
+
+type enumNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*enumElement
+
+ // This field is populated after parsing, to make it easier to find them
+ // without searching decls. The parse result has a map of descriptors to
+ // nodes which makes the other declarations easily discoverable. But these
+ // elements do not map to descriptors -- they are just stored as strings in
+ // the message descriptor.
+ reserved []*stringLiteralNode
+}
+
+type enumElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ value *enumValueNode
+ reserved *reservedNode
+ empty *basicNode
+}
+
+func (n *enumElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *enumElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *enumElement) leadingComments() []*comment {
+ return n.get().leadingComments()
+}
+
+func (n *enumElement) trailingComments() []*comment {
+ return n.get().trailingComments()
+}
+
+func (n *enumElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.value != nil:
+ return n.value
+ default:
+ return n.empty
+ }
+}
+
+type enumValueNode struct {
+ basicCompositeNode
+ name *identNode
+ options []*optionNode
+
+ // only one of these two will be set:
+
+ numberP *intLiteralNode // positive numeric value
+ numberN *negativeIntLiteralNode // negative numeric value
+}
+
+func (n *enumValueNode) getName() node {
+ return n.name
+}
+
+func (n *enumValueNode) getNumber() node {
+ if n.numberP != nil {
+ return n.numberP
+ }
+ return n.numberN
+}
+
+type messageNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*messageElement
+
+ // This field is populated after parsing, to make it easier to find them
+ // without searching decls. The parse result has a map of descriptors to
+ // nodes which makes the other declarations easily discoverable. But these
+ // elements do not map to descriptors -- they are just stored as strings in
+ // the message descriptor.
+ reserved []*stringLiteralNode
+}
+
+func (n *messageNode) messageName() node {
+ return n.name
+}
+
+func (n *messageNode) reservedNames() []*stringLiteralNode {
+ return n.reserved
+}
+
+type messageElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ field *fieldNode
+ mapField *mapFieldNode
+ oneOf *oneOfNode
+ group *groupNode
+ nested *messageNode
+ enum *enumNode
+ extend *extendNode
+ extensionRange *extensionRangeNode
+ reserved *reservedNode
+ empty *basicNode
+}
+
+func (n *messageElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *messageElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *messageElement) leadingComments() []*comment {
+ return n.get().leadingComments()
+}
+
+func (n *messageElement) trailingComments() []*comment {
+ return n.get().trailingComments()
+}
+
+func (n *messageElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.field != nil:
+ return n.field
+ case n.mapField != nil:
+ return n.mapField
+ case n.oneOf != nil:
+ return n.oneOf
+ case n.group != nil:
+ return n.group
+ case n.nested != nil:
+ return n.nested
+ case n.enum != nil:
+ return n.enum
+ case n.extend != nil:
+ return n.extend
+ case n.extensionRange != nil:
+ return n.extensionRange
+ case n.reserved != nil:
+ return n.reserved
+ default:
+ return n.empty
+ }
+}
+
+type extendNode struct {
+ basicCompositeNode
+ extendee *identNode
+ decls []*extendElement
+}
+
+type extendElement struct {
+ // a discriminated union: only one field will be set
+ field *fieldNode
+ group *groupNode
+ empty *basicNode
+}
+
+func (n *extendElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *extendElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *extendElement) leadingComments() []*comment {
+ return n.get().leadingComments()
+}
+
+func (n *extendElement) trailingComments() []*comment {
+ return n.get().trailingComments()
+}
+
+func (n *extendElement) get() node {
+ switch {
+ case n.field != nil:
+ return n.field
+ case n.group != nil:
+ return n.group
+ default:
+ return n.empty
+ }
+}
+
+type serviceNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*serviceElement
+}
+
+type serviceElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ rpc *methodNode
+ empty *basicNode
+}
+
+func (n *serviceElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *serviceElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *serviceElement) leadingComments() []*comment {
+ return n.get().leadingComments()
+}
+
+func (n *serviceElement) trailingComments() []*comment {
+ return n.get().trailingComments()
+}
+
+func (n *serviceElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.rpc != nil:
+ return n.rpc
+ default:
+ return n.empty
+ }
+}
+
+type methodNode struct {
+ basicCompositeNode
+ name *identNode
+ input *rpcTypeNode
+ output *rpcTypeNode
+ options []*optionNode
+}
+
+func (n *methodNode) getInputType() node {
+ return n.input.msgType
+}
+
+func (n *methodNode) getOutputType() node {
+ return n.output.msgType
+}
+
+type rpcTypeNode struct {
+ basicCompositeNode
+ msgType *identNode
+ streamKeyword node
+}
+
+type noSourceNode struct {
+ pos *SourcePos
+}
+
+func (n noSourceNode) start() *SourcePos {
+ return n.pos
+}
+
+func (n noSourceNode) end() *SourcePos {
+ return n.pos
+}
+
+func (n noSourceNode) leadingComments() []*comment {
+ return nil
+}
+
+func (n noSourceNode) trailingComments() []*comment {
+ return nil
+}
+
+func (n noSourceNode) getSyntax() node {
+ return n
+}
+
+func (n noSourceNode) getName() node {
+ return n
+}
+
+func (n noSourceNode) getValue() valueNode {
+ return n
+}
+
+func (n noSourceNode) fieldLabel() node {
+ return n
+}
+
+func (n noSourceNode) fieldName() node {
+ return n
+}
+
+func (n noSourceNode) fieldType() node {
+ return n
+}
+
+func (n noSourceNode) fieldTag() node {
+ return n
+}
+
+func (n noSourceNode) fieldExtendee() node {
+ return n
+}
+
+func (n noSourceNode) getGroupKeyword() node {
+ return n
+}
+
+func (n noSourceNode) rangeStart() node {
+ return n
+}
+
+func (n noSourceNode) rangeEnd() node {
+ return n
+}
+
+func (n noSourceNode) getNumber() node {
+ return n
+}
+
+func (n noSourceNode) messageName() node {
+ return n
+}
+
+func (n noSourceNode) reservedNames() []*stringLiteralNode {
+ return nil
+}
+
+func (n noSourceNode) getInputType() node {
+ return n
+}
+
+func (n noSourceNode) getOutputType() node {
+ return n
+}
+
+func (n noSourceNode) value() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
new file mode 100644
index 0000000..c6446d3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
@@ -0,0 +1,10 @@
+// Package protoparse provides functionality for parsing *.proto source files
+// into descriptors that can be used with other protoreflect packages, like
+// dynamic messages and dynamic GRPC clients.
+//
+// This package links in other packages that include compiled descriptors for
+// the various "google/protobuf/*.proto" files that are included with protoc.
+// That way, like when invoking protoc, programs need not supply copies of these
+// "builtin" files. Though if copies of the files are provided, they will be
+// used instead of the builtin descriptors.
+package protoparse
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go
new file mode 100644
index 0000000..c685e56
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go
@@ -0,0 +1,766 @@
+package protoparse
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type runeReader struct {
+ rr *bufio.Reader
+ unread []rune
+ err error
+}
+
+func (rr *runeReader) readRune() (r rune, size int, err error) {
+ if rr.err != nil {
+ return 0, 0, rr.err
+ }
+ if len(rr.unread) > 0 {
+ r := rr.unread[len(rr.unread)-1]
+ rr.unread = rr.unread[:len(rr.unread)-1]
+ return r, utf8.RuneLen(r), nil
+ }
+ r, sz, err := rr.rr.ReadRune()
+ if err != nil {
+ rr.err = err
+ }
+ return r, sz, err
+}
+
+func (rr *runeReader) unreadRune(r rune) {
+ rr.unread = append(rr.unread, r)
+}
+
+func lexError(l protoLexer, pos *SourcePos, err string) {
+ pl := l.(*protoLex)
+ if pl.err == nil {
+ pl.err = ErrorWithSourcePos{Underlying: errors.New(err), Pos: pos}
+ }
+}
+
+type protoLex struct {
+ filename string
+ input *runeReader
+ err error
+ res *fileNode
+
+ lineNo int
+ colNo int
+ offset int
+
+ prevSym terminalNode
+}
+
+func newLexer(in io.Reader) *protoLex {
+ return &protoLex{input: &runeReader{rr: bufio.NewReader(in)}}
+}
+
+var keywords = map[string]int{
+ "syntax": _SYNTAX,
+ "import": _IMPORT,
+ "weak": _WEAK,
+ "public": _PUBLIC,
+ "package": _PACKAGE,
+ "option": _OPTION,
+ "true": _TRUE,
+ "false": _FALSE,
+ "inf": _INF,
+ "nan": _NAN,
+ "repeated": _REPEATED,
+ "optional": _OPTIONAL,
+ "required": _REQUIRED,
+ "double": _DOUBLE,
+ "float": _FLOAT,
+ "int32": _INT32,
+ "int64": _INT64,
+ "uint32": _UINT32,
+ "uint64": _UINT64,
+ "sint32": _SINT32,
+ "sint64": _SINT64,
+ "fixed32": _FIXED32,
+ "fixed64": _FIXED64,
+ "sfixed32": _SFIXED32,
+ "sfixed64": _SFIXED64,
+ "bool": _BOOL,
+ "string": _STRING,
+ "bytes": _BYTES,
+ "group": _GROUP,
+ "oneof": _ONEOF,
+ "map": _MAP,
+ "extensions": _EXTENSIONS,
+ "to": _TO,
+ "max": _MAX,
+ "reserved": _RESERVED,
+ "enum": _ENUM,
+ "message": _MESSAGE,
+ "extend": _EXTEND,
+ "service": _SERVICE,
+ "rpc": _RPC,
+ "stream": _STREAM,
+ "returns": _RETURNS,
+}
+
+func (l *protoLex) cur() *SourcePos {
+ return &SourcePos{
+ Filename: l.filename,
+ Offset: l.offset,
+ Line: l.lineNo + 1,
+ Col: l.colNo + 1,
+ }
+}
+
+func (l *protoLex) prev() *SourcePos {
+ if l.prevSym == nil {
+ return &SourcePos{
+ Filename: l.filename,
+ Offset: 0,
+ Line: 1,
+ Col: 1,
+ }
+ }
+ return l.prevSym.start()
+}
+
+func (l *protoLex) Lex(lval *protoSymType) int {
+ if l.err != nil {
+ // if we are already in a failed state, bail
+ lval.err = l.err
+ return _ERROR
+ }
+
+ prevLineNo := l.lineNo
+ prevColNo := l.colNo
+ prevOffset := l.offset
+ var comments []*comment
+
+ pos := func() posRange {
+ return posRange{
+ start: &SourcePos{
+ Filename: l.filename,
+ Offset: prevOffset,
+ Line: prevLineNo + 1,
+ Col: prevColNo + 1,
+ },
+ end: l.cur(),
+ }
+ }
+ basic := func() basicNode {
+ return basicNode{
+ posRange: pos(),
+ leading: comments,
+ }
+ }
+ setPrev := func(n terminalNode) {
+ nStart := n.start().Line
+ if _, ok := n.(*basicNode); ok {
+ // if the node is a simple rune, don't attribute comments to it
+ // HACK: adjusting the start line makes leading comments appear
+ // detached so logic below will naturally associated trailing
+ // comment to previous symbol
+ nStart += 2
+ }
+ if l.prevSym != nil && len(n.leadingComments()) > 0 && l.prevSym.end().Line < nStart {
+ // we may need to re-attribute the first comment to
+ // instead be previous node's trailing comment
+ prevEnd := l.prevSym.end().Line
+ comments := n.leadingComments()
+ c := comments[0]
+ commentStart := c.start.Line
+ if commentStart == prevEnd {
+ // comment is on same line as previous symbol
+ n.popLeadingComment()
+ l.prevSym.pushTrailingComment(c)
+ } else if commentStart == prevEnd+1 {
+ // comment is right after previous symbol; see if it is detached
+ // and if so re-attribute
+ singleLineStyle := strings.HasPrefix(c.text, "//")
+ line := c.end.Line
+ groupEnd := -1
+ for i := 1; i < len(comments); i++ {
+ c := comments[i]
+ newGroup := false
+ if !singleLineStyle || c.start.Line > line+1 {
+ // we've found a gap between comments, which means the
+ // previous comments were detached
+ newGroup = true
+ } else {
+ line = c.end.Line
+ singleLineStyle = strings.HasPrefix(comments[i].text, "//")
+ if !singleLineStyle {
+ // we've found a switch from // comments to /*
+ // consider that a new group which means the
+ // previous comments were detached
+ newGroup = true
+ }
+ }
+ if newGroup {
+ groupEnd = i
+ break
+ }
+ }
+
+ if groupEnd == -1 {
+ // just one group of comments; we'll mark it as a trailing
+ // comment if it immediately follows previous symbol and is
+ // detached from current symbol
+ c1 := comments[0]
+ c2 := comments[len(comments)-1]
+ if c1.start.Line <= prevEnd+1 && c2.end.Line < nStart-1 {
+ groupEnd = len(comments)
+ }
+ }
+
+ for i := 0; i < groupEnd; i++ {
+ l.prevSym.pushTrailingComment(n.popLeadingComment())
+ }
+ }
+ }
+
+ l.prevSym = n
+ }
+ setString := func(val string) {
+ b := basic()
+ lval.str = &stringLiteralNode{val: val}
+ lval.str.setRange(&b, &b)
+ setPrev(lval.str)
+ }
+ setIdent := func(val string, kind identKind) {
+ lval.id = &identNode{basicNode: basic(), val: val, kind: kind}
+ setPrev(lval.id)
+ }
+ setInt := func(val uint64) {
+ lval.ui = &intLiteralNode{basicNode: basic(), val: val}
+ setPrev(lval.ui)
+ }
+ setFloat := func(val float64) {
+ b := basic()
+ lval.f = &floatLiteralNode{val: val}
+ lval.f.setRange(&b, &b)
+ setPrev(lval.f)
+ }
+ setRune := func() {
+ b := basic()
+ lval.b = &b
+ setPrev(lval.b)
+ }
+ setError := func(err error) {
+ lval.err = err
+ l.err = err
+ }
+
+ for {
+ c, n, err := l.input.readRune()
+ if err == io.EOF {
+ // we're not actually returning a rune, but this will associate
+ // accumulated comments as a trailing comment on last symbol
+ // (if appropriate)
+ setRune()
+ return 0
+ } else if err != nil {
+ setError(err)
+ return _ERROR
+ }
+
+ prevLineNo = l.lineNo
+ prevColNo = l.colNo
+ prevOffset = l.offset
+
+ l.offset += n
+ if c == '\n' {
+ l.colNo = 0
+ l.lineNo++
+ continue
+ } else if c == '\r' {
+ continue
+ }
+ l.colNo++
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ if c == '.' {
+ // tokens that start with a dot include type names and decimal literals
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ setRune()
+ return int(c)
+ }
+ if cn == '_' || (cn >= 'a' && cn <= 'z') || (cn >= 'A' && cn <= 'Z') {
+ l.colNo++
+ token := []rune{c, cn}
+ token = l.readIdentifier(token)
+ setIdent(string(token), identTypeName)
+ return _TYPENAME
+ }
+ if cn >= '0' && cn <= '9' {
+ l.colNo++
+ token := []rune{c, cn}
+ token = l.readNumber(token, false, true)
+ f, err := strconv.ParseFloat(string(token), 64)
+ if err != nil {
+ setError(err)
+ return _ERROR
+ }
+ setFloat(f)
+ return _FLOAT_LIT
+ }
+ l.input.unreadRune(cn)
+ setRune()
+ return int(c)
+ }
+
+ if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') {
+ // identifier
+ token := []rune{c}
+ token = l.readIdentifier(token)
+ str := string(token)
+ if strings.Contains(str, ".") {
+ setIdent(str, identQualified)
+ return _FQNAME
+ }
+ if t, ok := keywords[str]; ok {
+ setIdent(str, identSimpleName)
+ return t
+ }
+ setIdent(str, identSimpleName)
+ return _NAME
+ }
+
+ if c >= '0' && c <= '9' {
+ // integer or float literal
+ if c == '0' {
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ setInt(0)
+ return _INT_LIT
+ }
+ if cn == 'x' || cn == 'X' {
+ cnn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(cn)
+ setInt(0)
+ return _INT_LIT
+ }
+ if (cnn >= '0' && cnn <= '9') || (cnn >= 'a' && cnn <= 'f') || (cnn >= 'A' && cnn <= 'F') {
+ // hexadecimal!
+ l.colNo += 2
+ token := []rune{cnn}
+ token = l.readHexNumber(token)
+ ui, err := strconv.ParseUint(string(token), 16, 64)
+ if err != nil {
+ setError(err)
+ return _ERROR
+ }
+ setInt(ui)
+ return _INT_LIT
+ }
+ l.input.unreadRune(cnn)
+ l.input.unreadRune(cn)
+ setInt(0)
+ return _INT_LIT
+ } else {
+ l.input.unreadRune(cn)
+ }
+ }
+ token := []rune{c}
+ token = l.readNumber(token, true, true)
+ numstr := string(token)
+ if strings.Contains(numstr, ".") || strings.Contains(numstr, "e") || strings.Contains(numstr, "E") {
+ // floating point!
+ f, err := strconv.ParseFloat(numstr, 64)
+ if err != nil {
+ setError(err)
+ return _ERROR
+ }
+ setFloat(f)
+ return _FLOAT_LIT
+ }
+ // integer! (decimal or octal)
+ ui, err := strconv.ParseUint(numstr, 0, 64)
+ if err != nil {
+ setError(err)
+ return _ERROR
+ }
+ setInt(ui)
+ return _INT_LIT
+ }
+
+ if c == '\'' || c == '"' {
+ // string literal
+ str, err := l.readStringLiteral(c)
+ if err != nil {
+ setError(err)
+ return _ERROR
+ }
+ setString(str)
+ return _STRING_LIT
+ }
+
+ if c == '/' {
+ // comment
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ setRune()
+ return int(c)
+ }
+ if cn == '/' {
+ l.colNo++
+ hitNewline, txt := l.skipToEndOfLineComment()
+ commentPos := pos()
+ commentPos.end.Col++
+ if hitNewline {
+ l.colNo = 0
+ l.lineNo++
+ }
+ comments = append(comments, &comment{posRange: commentPos, text: txt})
+ continue
+ }
+ if cn == '*' {
+ l.colNo++
+ if txt, ok := l.skipToEndOfBlockComment(); !ok {
+ setError(errors.New("block comment never terminates, unexpected EOF"))
+ return _ERROR
+ } else {
+ comments = append(comments, &comment{posRange: pos(), text: txt})
+ }
+ continue
+ }
+ l.input.unreadRune(cn)
+ }
+
+ setRune()
+ return int(c)
+ }
+}
+
+func (l *protoLex) readNumber(sofar []rune, allowDot bool, allowExp bool) []rune {
+ token := sofar
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ break
+ }
+ if c == '.' {
+ if !allowDot {
+ l.input.unreadRune(c)
+ break
+ }
+ allowDot = false
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(c)
+ break
+ }
+ if cn < '0' || cn > '9' {
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ c = cn
+ } else if c == 'e' || c == 'E' {
+ if !allowExp {
+ l.input.unreadRune(c)
+ break
+ }
+ allowExp = false
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(c)
+ break
+ }
+ if cn == '-' || cn == '+' {
+ cnn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ if cnn < '0' || cnn > '9' {
+ l.input.unreadRune(cnn)
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ c = cn
+ cn = cnn
+ } else if cn < '0' || cn > '9' {
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ c = cn
+ } else if c < '0' || c > '9' {
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ }
+ return token
+}
+
+func (l *protoLex) readHexNumber(sofar []rune) []rune {
+ token := sofar
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ break
+ }
+ if (c < 'a' || c > 'f') && (c < 'A' || c > 'F') && (c < '0' || c > '9') {
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ }
+ return token
+}
+
+func (l *protoLex) readIdentifier(sofar []rune) []rune {
+ token := sofar
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ break
+ }
+ if c == '.' {
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(c)
+ break
+ }
+ if cn != '_' && (cn < 'a' || cn > 'z') && (cn < 'A' || cn > 'Z') {
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ c = cn
+ } else if c != '_' && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') {
+ l.input.unreadRune(c)
+ break
+ }
+ l.colNo++
+ token = append(token, c)
+ }
+ return token
+}
+
+func (l *protoLex) readStringLiteral(quote rune) (string, error) {
+ var buf bytes.Buffer
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return "", err
+ }
+ if c == '\n' {
+ l.colNo = 0
+ l.lineNo++
+ return "", errors.New("encountered end-of-line before end of string literal")
+ }
+ l.colNo++
+ if c == quote {
+ break
+ }
+ if c == 0 {
+ return "", errors.New("null character ('\\0') not allowed in string literal")
+ }
+ if c == '\\' {
+ // escape sequence
+ c, _, err = l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.colNo++
+ if c == 'x' || c == 'X' {
+ // hex escape
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.colNo++
+ c2, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ var hex string
+ if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') {
+ l.input.unreadRune(c2)
+ hex = string(c)
+ } else {
+ l.colNo++
+ hex = string([]rune{c, c2})
+ }
+ i, err := strconv.ParseInt(hex, 16, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid hex escape: \\x%q", hex)
+ }
+ buf.WriteByte(byte(i))
+
+ } else if c >= '0' && c <= '7' {
+ // octal escape
+ c2, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ var octal string
+ if c2 < '0' || c2 > '7' {
+ l.input.unreadRune(c2)
+ octal = string(c)
+ } else {
+ l.colNo++
+ c3, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ if c3 < '0' || c3 > '7' {
+ l.input.unreadRune(c3)
+ octal = string([]rune{c, c2})
+ } else {
+ l.colNo++
+ octal = string([]rune{c, c2, c3})
+ }
+ }
+ i, err := strconv.ParseInt(octal, 8, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid octal escape: \\%q", octal)
+ }
+ if i > 0xff {
+ return "", fmt.Errorf("octal escape is out range, must be between 0 and 377: \\%q", octal)
+ }
+ buf.WriteByte(byte(i))
+
+ } else if c == 'u' {
+ // short unicode escape
+ u := make([]rune, 4)
+ for i := range u {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.colNo++
+ u[i] = c
+ }
+ i, err := strconv.ParseInt(string(u), 16, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid unicode escape: \\u%q", string(u))
+ }
+ buf.WriteRune(rune(i))
+
+ } else if c == 'U' {
+ // long unicode escape
+ u := make([]rune, 8)
+ for i := range u {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.colNo++
+ u[i] = c
+ }
+ i, err := strconv.ParseInt(string(u), 16, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid unicode escape: \\U%q", string(u))
+ }
+ if i > 0x10ffff || i < 0 {
+ return "", fmt.Errorf("unicode escape is out of range, must be between 0 and 0x10ffff: \\U%q", string(u))
+ }
+ buf.WriteRune(rune(i))
+
+ } else if c == 'a' {
+ buf.WriteByte('\a')
+ } else if c == 'b' {
+ buf.WriteByte('\b')
+ } else if c == 'f' {
+ buf.WriteByte('\f')
+ } else if c == 'n' {
+ buf.WriteByte('\n')
+ } else if c == 'r' {
+ buf.WriteByte('\r')
+ } else if c == 't' {
+ buf.WriteByte('\t')
+ } else if c == 'v' {
+ buf.WriteByte('\v')
+ } else if c == '\\' {
+ buf.WriteByte('\\')
+ } else if c == '\'' {
+ buf.WriteByte('\'')
+ } else if c == '"' {
+ buf.WriteByte('"')
+ } else if c == '?' {
+ buf.WriteByte('?')
+ } else {
+ return "", fmt.Errorf("invalid escape sequence: %q", "\\"+string(c))
+ }
+ } else {
+ buf.WriteRune(c)
+ }
+ }
+ return buf.String(), nil
+}
+
+func (l *protoLex) skipToEndOfLineComment() (bool, string) {
+ txt := []rune{'/', '/'}
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return false, string(txt)
+ }
+ if c == '\n' {
+ return true, string(txt)
+ }
+ l.colNo++
+ txt = append(txt, c)
+ }
+}
+
+func (l *protoLex) skipToEndOfBlockComment() (string, bool) {
+ txt := []rune{'/', '*'}
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", false
+ }
+ if c == '\n' {
+ l.colNo = 0
+ l.lineNo++
+ } else {
+ l.colNo++
+ }
+ txt = append(txt, c)
+ if c == '*' {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", false
+ }
+ if c == '/' {
+ l.colNo++
+ txt = append(txt, c)
+ return string(txt), true
+ }
+ l.input.unreadRune(c)
+ }
+ }
+}
+
+func (l *protoLex) Error(s string) {
+ if l.err == nil {
+ l.err = ErrorWithSourcePos{Underlying: errors.New(s), Pos: l.prevSym.start()}
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go
new file mode 100644
index 0000000..c150936
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go
@@ -0,0 +1,652 @@
+package protoparse
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+type linker struct {
+ files map[string]*parseResult
+ descriptorPool map[*dpb.FileDescriptorProto]map[string]proto.Message
+ extensions map[string]map[int32]string
+}
+
+func newLinker(files map[string]*parseResult) *linker {
+ return &linker{files: files}
+}
+
+func (l *linker) linkFiles() (map[string]*desc.FileDescriptor, error) {
+ // First, we put all symbols into a single pool, which lets us ensure there
+ // are no duplicate symbols and will also let us resolve and revise all type
+ // references in next step.
+ if err := l.createDescriptorPool(); err != nil {
+ return nil, err
+ }
+
+ // After we've populated the pool, we can now try to resolve all type
+ // references. All references must be checked for correct type, any fields
+ // with enum types must be corrected (since we parse them as if they are
+ // message references since we don't actually know message or enum until
+ // link time), and references will be re-written to be fully-qualified
+ // references (e.g. start with a dot ".").
+ if err := l.resolveReferences(); err != nil {
+ return nil, err
+ }
+
+ // Now we've validated the descriptors, so we can link them into rich
+ // descriptors. This is a little redundant since that step does similar
+ // checking of symbols. But, without breaking encapsulation (e.g. exporting
+ // a lot of fields from desc package that are currently unexported) or
+ // merging this into the same package, we can't really prevent it.
+ linked, err := l.createdLinkedDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ // Now that we have linked descriptors, we can interpret any uninterpreted
+ // options that remain.
+ for _, r := range l.files {
+ fd := linked[r.fd.GetName()]
+ if err := interpretFileOptions(r, richFileDescriptorish{FileDescriptor: fd}); err != nil {
+ return nil, err
+ }
+ }
+
+ return linked, nil
+}
+
+func (l *linker) createDescriptorPool() error {
+ l.descriptorPool = map[*dpb.FileDescriptorProto]map[string]proto.Message{}
+ for _, r := range l.files {
+ fd := r.fd
+ pool := map[string]proto.Message{}
+ l.descriptorPool[fd] = pool
+ prefix := fd.GetPackage()
+ if prefix != "" {
+ prefix += "."
+ }
+ for _, md := range fd.MessageType {
+ if err := addMessageToPool(r, pool, prefix, md); err != nil {
+ return err
+ }
+ }
+ for _, fld := range fd.Extension {
+ if err := addFieldToPool(r, pool, prefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, ed := range fd.EnumType {
+ if err := addEnumToPool(r, pool, prefix, ed); err != nil {
+ return err
+ }
+ }
+ for _, sd := range fd.Service {
+ if err := addServiceToPool(r, pool, prefix, sd); err != nil {
+ return err
+ }
+ }
+ }
+ // try putting everything into a single pool, to ensure there are no duplicates
+ // across files (e.g. same symbol, but declared in two different files)
+ type entry struct {
+ file string
+ msg proto.Message
+ }
+ pool := map[string]entry{}
+ for f, p := range l.descriptorPool {
+ for k, v := range p {
+ if e, ok := pool[k]; ok {
+ desc1 := e.msg
+ file1 := e.file
+ desc2 := v
+ file2 := f.GetName()
+ if file2 < file1 {
+ file1, file2 = file2, file1
+ desc1, desc2 = desc2, desc1
+ }
+ node := l.files[file2].nodes[desc2]
+ return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("duplicate symbol %s: already defined as %s in %q", k, descriptorType(desc1), file1)}
+ }
+ pool[k] = entry{file: f.GetName(), msg: v}
+ }
+ }
+
+ return nil
+}
+
+func addMessageToPool(r *parseResult, pool map[string]proto.Message, prefix string, md *dpb.DescriptorProto) error {
+ fqn := prefix + md.GetName()
+ if err := addToPool(r, pool, fqn, md); err != nil {
+ return err
+ }
+ prefix = fqn + "."
+ for _, fld := range md.Field {
+ if err := addFieldToPool(r, pool, prefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Extension {
+ if err := addFieldToPool(r, pool, prefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, nmd := range md.NestedType {
+ if err := addMessageToPool(r, pool, prefix, nmd); err != nil {
+ return err
+ }
+ }
+ for _, ed := range md.EnumType {
+ if err := addEnumToPool(r, pool, prefix, ed); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addFieldToPool(r *parseResult, pool map[string]proto.Message, prefix string, fld *dpb.FieldDescriptorProto) error {
+ fqn := prefix + fld.GetName()
+ return addToPool(r, pool, fqn, fld)
+}
+
+func addEnumToPool(r *parseResult, pool map[string]proto.Message, prefix string, ed *dpb.EnumDescriptorProto) error {
+ fqn := prefix + ed.GetName()
+ if err := addToPool(r, pool, fqn, ed); err != nil {
+ return err
+ }
+ for _, evd := range ed.Value {
+ vfqn := fqn + "." + evd.GetName()
+ if err := addToPool(r, pool, vfqn, evd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addServiceToPool(r *parseResult, pool map[string]proto.Message, prefix string, sd *dpb.ServiceDescriptorProto) error {
+ fqn := prefix + sd.GetName()
+ if err := addToPool(r, pool, fqn, sd); err != nil {
+ return err
+ }
+ for _, mtd := range sd.Method {
+ mfqn := fqn + "." + mtd.GetName()
+ if err := addToPool(r, pool, mfqn, mtd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addToPool(r *parseResult, pool map[string]proto.Message, fqn string, dsc proto.Message) error {
+ if d, ok := pool[fqn]; ok {
+ node := r.nodes[dsc]
+ return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("duplicate symbol %s: already defined as %s", fqn, descriptorType(d))}
+ }
+ pool[fqn] = dsc
+ return nil
+}
+
+func descriptorType(m proto.Message) string {
+ switch m := m.(type) {
+ case *dpb.DescriptorProto:
+ return "message"
+ case *dpb.DescriptorProto_ExtensionRange:
+ return "extension range"
+ case *dpb.FieldDescriptorProto:
+ if m.GetExtendee() == "" {
+ return "field"
+ } else {
+ return "extension"
+ }
+ case *dpb.EnumDescriptorProto:
+ return "enum"
+ case *dpb.EnumValueDescriptorProto:
+ return "enum value"
+ case *dpb.ServiceDescriptorProto:
+ return "service"
+ case *dpb.MethodDescriptorProto:
+ return "method"
+ case *dpb.FileDescriptorProto:
+ return "file"
+ default:
+ // shouldn't be possible
+ return fmt.Sprintf("%T", m)
+ }
+}
+
+func (l *linker) resolveReferences() error {
+ l.extensions = map[string]map[int32]string{}
+ for _, r := range l.files {
+ fd := r.fd
+ prefix := fd.GetPackage()
+ scopes := []scope{fileScope(fd, l)}
+ if prefix != "" {
+ prefix += "."
+ }
+ if fd.Options != nil {
+ if err := l.resolveOptions(r, fd, "file", fd.GetName(), proto.MessageName(fd.Options), fd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ for _, md := range fd.MessageType {
+ if err := l.resolveMessageTypes(r, fd, prefix, md, scopes); err != nil {
+ return err
+ }
+ }
+ for _, fld := range fd.Extension {
+ if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+ return err
+ }
+ }
+ for _, ed := range fd.EnumType {
+ if err := l.resolveEnumTypes(r, fd, prefix, ed, scopes); err != nil {
+ return err
+ }
+ }
+ for _, sd := range fd.Service {
+ if err := l.resolveServiceTypes(r, fd, prefix, sd, scopes); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveEnumTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, ed *dpb.EnumDescriptorProto, scopes []scope) error {
+ enumFqn := prefix + ed.GetName()
+ if ed.Options != nil {
+ if err := l.resolveOptions(r, fd, "enum", enumFqn, proto.MessageName(ed.Options), ed.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ for _, evd := range ed.Value {
+ if evd.Options != nil {
+ evFqn := enumFqn + "." + evd.GetName()
+ if err := l.resolveOptions(r, fd, "enum value", evFqn, proto.MessageName(evd.Options), evd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveMessageTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, md *dpb.DescriptorProto, scopes []scope) error {
+ fqn := prefix + md.GetName()
+ scope := messageScope(fqn, isProto3(fd), l.descriptorPool[fd])
+ scopes = append(scopes, scope)
+ prefix = fqn + "."
+
+ if md.Options != nil {
+ if err := l.resolveOptions(r, fd, "message", fqn, proto.MessageName(md.Options), md.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+
+ for _, nmd := range md.NestedType {
+ if err := l.resolveMessageTypes(r, fd, prefix, nmd, scopes); err != nil {
+ return err
+ }
+ }
+ for _, ned := range md.EnumType {
+ if err := l.resolveEnumTypes(r, fd, prefix, ned, scopes); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Field {
+ if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Extension {
+ if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+ return err
+ }
+ }
+ for _, er := range md.ExtensionRange {
+ if er.Options != nil {
+ erName := fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1)
+ if err := l.resolveOptions(r, fd, "extension range", erName, proto.MessageName(er.Options), er.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveFieldTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto, scopes []scope) error {
+ thisName := prefix + fld.GetName()
+ scope := fmt.Sprintf("field %s", thisName)
+ node := r.getFieldNode(fld)
+ elemType := "field"
+ if fld.GetExtendee() != "" {
+ fqn, dsc, _ := l.resolve(fd, fld.GetExtendee(), isMessage, scopes)
+ if dsc == nil {
+ return ErrorWithSourcePos{Pos: node.fieldExtendee().start(), Underlying: fmt.Errorf("unknown extendee type %s", fld.GetExtendee())}
+ }
+ extd, ok := dsc.(*dpb.DescriptorProto)
+ if !ok {
+ otherType := descriptorType(dsc)
+ return ErrorWithSourcePos{Pos: node.fieldExtendee().start(), Underlying: fmt.Errorf("extendee is invalid: %s is a %s, not a message", fqn, otherType)}
+ }
+ fld.Extendee = proto.String("." + fqn)
+ // make sure the tag number is in range
+ found := false
+ tag := fld.GetNumber()
+ for _, rng := range extd.ExtensionRange {
+ if tag >= rng.GetStart() && tag < rng.GetEnd() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return ErrorWithSourcePos{Pos: node.fieldTag().start(), Underlying: fmt.Errorf("%s: tag %d is not in valid range for extended type %s", scope, tag, fqn)}
+ }
+ // make sure tag is not a duplicate
+ usedExtTags := l.extensions[fqn]
+ if usedExtTags == nil {
+ usedExtTags = map[int32]string{}
+ l.extensions[fqn] = usedExtTags
+ }
+ if other := usedExtTags[fld.GetNumber()]; other != "" {
+ return ErrorWithSourcePos{Pos: node.fieldTag().start(), Underlying: fmt.Errorf("%s: duplicate extension: %s and %s are both using tag %d", scope, other, thisName, fld.GetNumber())}
+ }
+ usedExtTags[fld.GetNumber()] = thisName
+ elemType = "extension"
+ }
+
+ if fld.Options != nil {
+ if err := l.resolveOptions(r, fd, elemType, thisName, proto.MessageName(fld.Options), fld.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+
+ if fld.GetTypeName() == "" {
+ // scalar type; no further resolution required
+ return nil
+ }
+
+ fqn, dsc, proto3 := l.resolve(fd, fld.GetTypeName(), isType, scopes)
+ if dsc == nil {
+ return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: unknown type %s", scope, fld.GetTypeName())}
+ }
+ switch dsc := dsc.(type) {
+ case *dpb.DescriptorProto:
+ fld.TypeName = proto.String("." + fqn)
+ case *dpb.EnumDescriptorProto:
+ if fld.GetExtendee() == "" && isProto3(fd) && !proto3 {
+ // fields in a proto3 message cannot refer to proto2 enums
+ return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: cannot use proto2 enum %s in a proto3 message", scope, fld.GetTypeName())}
+ }
+ fld.TypeName = proto.String("." + fqn)
+ // the type was tentatively set to message, but now we know it's actually an enum
+ fld.Type = dpb.FieldDescriptorProto_TYPE_ENUM.Enum()
+ default:
+ otherType := descriptorType(dsc)
+ return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: invalid type: %s is a %s, not a message or enum", scope, fqn, otherType)}
+ }
+ return nil
+}
+
+func (l *linker) resolveServiceTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, sd *dpb.ServiceDescriptorProto, scopes []scope) error {
+ thisName := prefix + sd.GetName()
+ if sd.Options != nil {
+ if err := l.resolveOptions(r, fd, "service", thisName, proto.MessageName(sd.Options), sd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+
+ for _, mtd := range sd.Method {
+ if mtd.Options != nil {
+ if err := l.resolveOptions(r, fd, "method", thisName+"."+mtd.GetName(), proto.MessageName(mtd.Options), mtd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ scope := fmt.Sprintf("method %s.%s", thisName, mtd.GetName())
+ node := r.getMethodNode(mtd)
+ fqn, dsc, _ := l.resolve(fd, mtd.GetInputType(), isMessage, scopes)
+ if dsc == nil {
+ return ErrorWithSourcePos{Pos: node.getInputType().start(), Underlying: fmt.Errorf("%s: unknown request type %s", scope, mtd.GetInputType())}
+ }
+ if _, ok := dsc.(*dpb.DescriptorProto); !ok {
+ otherType := descriptorType(dsc)
+ return ErrorWithSourcePos{Pos: node.getInputType().start(), Underlying: fmt.Errorf("%s: invalid request type: %s is a %s, not a message", scope, fqn, otherType)}
+ }
+ mtd.InputType = proto.String("." + fqn)
+
+ fqn, dsc, _ = l.resolve(fd, mtd.GetOutputType(), isMessage, scopes)
+ if dsc == nil {
+ return ErrorWithSourcePos{Pos: node.getOutputType().start(), Underlying: fmt.Errorf("%s: unknown response type %s", scope, mtd.GetOutputType())}
+ }
+ if _, ok := dsc.(*dpb.DescriptorProto); !ok {
+ otherType := descriptorType(dsc)
+ return ErrorWithSourcePos{Pos: node.getOutputType().start(), Underlying: fmt.Errorf("%s: invalid response type: %s is a %s, not a message", scope, fqn, otherType)}
+ }
+ mtd.OutputType = proto.String("." + fqn)
+ }
+ return nil
+}
+
+func (l *linker) resolveOptions(r *parseResult, fd *dpb.FileDescriptorProto, elemType, elemName, optType string, opts []*dpb.UninterpretedOption, scopes []scope) error {
+ var scope string
+ if elemType != "file" {
+ scope = fmt.Sprintf("%s %s: ", elemType, elemName)
+ }
+ for _, opt := range opts {
+ for _, nm := range opt.Name {
+ if nm.GetIsExtension() {
+ node := r.getOptionNamePartNode(nm)
+ fqn, dsc, _ := l.resolve(fd, nm.GetNamePart(), isField, scopes)
+ if dsc == nil {
+ return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sunknown extension %s", scope, nm.GetNamePart())}
+ }
+ if ext, ok := dsc.(*dpb.FieldDescriptorProto); !ok {
+ otherType := descriptorType(dsc)
+ return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sinvalid extension: %s is a %s, not an extension", scope, nm.GetNamePart(), otherType)}
+ } else if ext.GetExtendee() == "" {
+ return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sinvalid extension: %s is a field but not an extension", scope, nm.GetNamePart())}
+ }
+ nm.NamePart = proto.String("." + fqn)
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolve(fd *dpb.FileDescriptorProto, name string, allowed func(proto.Message) bool, scopes []scope) (fqn string, element proto.Message, proto3 bool) {
+ if strings.HasPrefix(name, ".") {
+ // already fully-qualified
+ d, proto3 := l.findSymbol(fd, name[1:], false, map[*dpb.FileDescriptorProto]struct{}{})
+ if d != nil {
+ return name[1:], d, proto3
+ }
+ } else {
+ // unqualified, so we look in the enclosing (last) scope first and move
+ // towards outermost (first) scope, trying to resolve the symbol
+ var bestGuess proto.Message
+ var bestGuessFqn string
+ var bestGuessProto3 bool
+ for i := len(scopes) - 1; i >= 0; i-- {
+ fqn, d, proto3 := scopes[i](name)
+ if d != nil {
+ if allowed(d) {
+ return fqn, d, proto3
+ } else if bestGuess == nil {
+ bestGuess = d
+ bestGuessFqn = fqn
+ bestGuessProto3 = proto3
+ }
+ }
+ }
+ // we return best guess, even though it was not an allowed kind of
+ // descriptor, so caller can print a better error message (e.g.
+ // indicating that the name was found but that it's the wrong type)
+ return bestGuessFqn, bestGuess, bestGuessProto3
+ }
+ return "", nil, false
+}
+
+func isField(m proto.Message) bool {
+ _, ok := m.(*dpb.FieldDescriptorProto)
+ return ok
+}
+
+func isMessage(m proto.Message) bool {
+ _, ok := m.(*dpb.DescriptorProto)
+ return ok
+}
+
+func isType(m proto.Message) bool {
+ switch m.(type) {
+ case *dpb.DescriptorProto, *dpb.EnumDescriptorProto:
+ return true
+ }
+ return false
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(symbol string) (fqn string, element proto.Message, proto3 bool)
+
+func fileScope(fd *dpb.FileDescriptorProto, l *linker) scope {
+ // we search symbols in this file, but also symbols in other files that have
+ // the same package as this file or a "parent" package (in protobuf,
+ // packages are a hierarchy like C++ namespaces)
+ prefixes := internal.CreatePrefixList(fd.GetPackage())
+ return func(name string) (string, proto.Message, bool) {
+ for _, prefix := range prefixes {
+ var n string
+ if prefix == "" {
+ n = name
+ } else {
+ n = prefix + "." + name
+ }
+ d, proto3 := l.findSymbol(fd, n, false, map[*dpb.FileDescriptorProto]struct{}{})
+ if d != nil {
+ return n, d, proto3
+ }
+ }
+ return "", nil, false
+ }
+}
+
+func messageScope(messageName string, proto3 bool, filePool map[string]proto.Message) scope {
+ return func(name string) (string, proto.Message, bool) {
+ n := messageName + "." + name
+ if d, ok := filePool[n]; ok {
+ return n, d, proto3
+ }
+ return "", nil, false
+ }
+}
+
+func (l *linker) findSymbol(fd *dpb.FileDescriptorProto, name string, public bool, checked map[*dpb.FileDescriptorProto]struct{}) (element proto.Message, proto3 bool) {
+ if _, ok := checked[fd]; ok {
+ // already checked this one
+ return nil, false
+ }
+ checked[fd] = struct{}{}
+ d := l.descriptorPool[fd][name]
+ if d != nil {
+ return d, isProto3(fd)
+ }
+
+ // When public = false, we are searching only directly imported symbols. But we
+ // also need to search transitive public imports due to semantics of public imports.
+ if public {
+ for _, depIndex := range fd.PublicDependency {
+ dep := fd.Dependency[depIndex]
+ depres := l.files[dep]
+ if depres == nil {
+ // we'll catch this error later
+ continue
+ }
+ if d, proto3 := l.findSymbol(depres.fd, name, true, checked); d != nil {
+ return d, proto3
+ }
+ }
+ } else {
+ for _, dep := range fd.Dependency {
+ depres := l.files[dep]
+ if depres == nil {
+ // we'll catch this error later
+ continue
+ }
+ if d, proto3 := l.findSymbol(depres.fd, name, true, checked); d != nil {
+ return d, proto3
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func isProto3(fd *dpb.FileDescriptorProto) bool {
+ return fd.GetSyntax() == "proto3"
+}
+
+func (l *linker) createdLinkedDescriptors() (map[string]*desc.FileDescriptor, error) {
+ names := make([]string, 0, len(l.files))
+ for name := range l.files {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ linked := map[string]*desc.FileDescriptor{}
+ for _, name := range names {
+ if _, err := l.linkFile(name, nil, linked); err != nil {
+ return nil, err
+ }
+ }
+ return linked, nil
+}
+
+func (l *linker) linkFile(name string, seen []string, linked map[string]*desc.FileDescriptor) (*desc.FileDescriptor, error) {
+ // check for import cycle
+ for _, s := range seen {
+ if name == s {
+ var msg bytes.Buffer
+ first := true
+ for _, s := range seen {
+ if first {
+ first = false
+ } else {
+ msg.WriteString(" -> ")
+ }
+ fmt.Fprintf(&msg, "%q", s)
+ }
+ fmt.Fprintf(&msg, " -> %q", name)
+ return nil, fmt.Errorf("cycle found in imports: %s", msg.String())
+ }
+ }
+ seen = append(seen, name)
+
+ if lfd, ok := linked[name]; ok {
+ // already linked
+ return lfd, nil
+ }
+ r := l.files[name]
+ if r == nil {
+ importer := seen[len(seen)-2] // len-1 is *this* file, before that is the one that imported it
+ return nil, fmt.Errorf("no descriptor found for %q, imported by %q", name, importer)
+ }
+ var deps []*desc.FileDescriptor
+ for _, dep := range r.fd.Dependency {
+ ldep, err := l.linkFile(dep, seen, linked)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, ldep)
+ }
+ lfd, err := desc.CreateFileDescriptor(r.fd, deps...)
+ if err != nil {
+ return nil, fmt.Errorf("error linking %q: %s", name, err)
+ }
+ linked[name] = lfd
+ return lfd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go
new file mode 100644
index 0000000..be287f6
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go
@@ -0,0 +1,1405 @@
+package protoparse
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// NB: To process options, we need descriptors, but we may not have rich
+// descriptors when trying to interpret options for unlinked parsed files.
+// So we define minimal interfaces that can be backed by both rich descriptors
+// as well as their poorer cousins, plain ol' descriptor protos.
+
+type descriptorish interface {
+ GetFile() fileDescriptorish
+ GetFullyQualifiedName() string
+ AsProto() proto.Message
+}
+
+type fileDescriptorish interface {
+ descriptorish
+ GetFileOptions() *dpb.FileOptions
+ GetPackage() string
+ FindSymbol(name string) desc.Descriptor
+ GetPublicDependencies() []fileDescriptorish
+ GetDependencies() []fileDescriptorish
+ GetMessageTypes() []msgDescriptorish
+ GetExtensions() []fldDescriptorish
+ GetEnumTypes() []enumDescriptorish
+ GetServices() []svcDescriptorish
+}
+
+type msgDescriptorish interface {
+ descriptorish
+ GetMessageOptions() *dpb.MessageOptions
+ GetFields() []fldDescriptorish
+ GetOneOfs() []oneofDescriptorish
+ GetExtensionRanges() []extRangeDescriptorish
+ GetNestedMessageTypes() []msgDescriptorish
+ GetNestedExtensions() []fldDescriptorish
+ GetNestedEnumTypes() []enumDescriptorish
+}
+
+type fldDescriptorish interface {
+ descriptorish
+ GetFieldOptions() *dpb.FieldOptions
+ GetMessageType() *desc.MessageDescriptor
+ GetEnumType() *desc.EnumDescriptor
+ AsFieldDescriptorProto() *dpb.FieldDescriptorProto
+}
+
+type oneofDescriptorish interface {
+ descriptorish
+ GetOneOfOptions() *dpb.OneofOptions
+}
+
+type enumDescriptorish interface {
+ descriptorish
+ GetEnumOptions() *dpb.EnumOptions
+ GetValues() []enumValDescriptorish
+}
+
+type enumValDescriptorish interface {
+ descriptorish
+ GetEnumValueOptions() *dpb.EnumValueOptions
+}
+
+type svcDescriptorish interface {
+ descriptorish
+ GetServiceOptions() *dpb.ServiceOptions
+ GetMethods() []methodDescriptorish
+}
+
+type methodDescriptorish interface {
+ descriptorish
+ GetMethodOptions() *dpb.MethodOptions
+}
+
+// The hierarchy of descriptorish implementations backed by
+// rich descriptors:
+
+type richFileDescriptorish struct {
+ *desc.FileDescriptor
+}
+
+func (d richFileDescriptorish) GetFile() fileDescriptorish {
+ return d
+}
+
+func (d richFileDescriptorish) GetPublicDependencies() []fileDescriptorish {
+ deps := d.FileDescriptor.GetPublicDependencies()
+ ret := make([]fileDescriptorish, len(deps))
+ for i, d := range deps {
+ ret[i] = richFileDescriptorish{FileDescriptor: d}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetDependencies() []fileDescriptorish {
+ deps := d.FileDescriptor.GetDependencies()
+ ret := make([]fileDescriptorish, len(deps))
+ for i, d := range deps {
+ ret[i] = richFileDescriptorish{FileDescriptor: d}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetMessageTypes() []msgDescriptorish {
+ msgs := d.FileDescriptor.GetMessageTypes()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = richMsgDescriptorish{MessageDescriptor: m}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetExtensions() []fldDescriptorish {
+ flds := d.FileDescriptor.GetExtensions()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = richFldDescriptorish{FieldDescriptor: f}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetEnumTypes() []enumDescriptorish {
+ ens := d.FileDescriptor.GetEnumTypes()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, en := range ens {
+ ret[i] = richEnumDescriptorish{EnumDescriptor: en}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetServices() []svcDescriptorish {
+ svcs := d.FileDescriptor.GetServices()
+ ret := make([]svcDescriptorish, len(svcs))
+ for i, s := range svcs {
+ ret[i] = richSvcDescriptorish{ServiceDescriptor: s}
+ }
+ return ret
+}
+
+type richMsgDescriptorish struct {
+ *desc.MessageDescriptor
+}
+
+func (d richMsgDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.MessageDescriptor.GetFile()}
+}
+
+func (d richMsgDescriptorish) GetFields() []fldDescriptorish {
+ flds := d.MessageDescriptor.GetFields()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = richFldDescriptorish{FieldDescriptor: f}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetOneOfs() []oneofDescriptorish {
+ oos := d.MessageDescriptor.GetOneOfs()
+ ret := make([]oneofDescriptorish, len(oos))
+ for i, oo := range oos {
+ ret[i] = richOneOfDescriptorish{OneOfDescriptor: oo}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetExtensionRanges() []extRangeDescriptorish {
+ md := d.MessageDescriptor
+ mdFqn := md.GetFullyQualifiedName()
+ extrs := md.AsDescriptorProto().GetExtensionRange()
+ ret := make([]extRangeDescriptorish, len(extrs))
+ for i, extr := range extrs {
+ ret[i] = extRangeDescriptorish{
+ er: extr,
+ qual: mdFqn,
+ file: richFileDescriptorish{FileDescriptor: md.GetFile()},
+ }
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetNestedMessageTypes() []msgDescriptorish {
+ msgs := d.MessageDescriptor.GetNestedMessageTypes()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = richMsgDescriptorish{MessageDescriptor: m}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetNestedExtensions() []fldDescriptorish {
+ flds := d.MessageDescriptor.GetNestedExtensions()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = richFldDescriptorish{FieldDescriptor: f}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetNestedEnumTypes() []enumDescriptorish {
+ ens := d.MessageDescriptor.GetNestedEnumTypes()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, en := range ens {
+ ret[i] = richEnumDescriptorish{EnumDescriptor: en}
+ }
+ return ret
+}
+
+type richFldDescriptorish struct {
+ *desc.FieldDescriptor
+}
+
+func (d richFldDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.FieldDescriptor.GetFile()}
+}
+
+func (d richFldDescriptorish) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+ return d.FieldDescriptor.AsFieldDescriptorProto()
+}
+
+type richOneOfDescriptorish struct {
+ *desc.OneOfDescriptor
+}
+
+func (d richOneOfDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.OneOfDescriptor.GetFile()}
+}
+
+type richEnumDescriptorish struct {
+ *desc.EnumDescriptor
+}
+
+func (d richEnumDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.EnumDescriptor.GetFile()}
+}
+
+func (d richEnumDescriptorish) GetValues() []enumValDescriptorish {
+ vals := d.EnumDescriptor.GetValues()
+ ret := make([]enumValDescriptorish, len(vals))
+ for i, val := range vals {
+ ret[i] = richEnumValDescriptorish{EnumValueDescriptor: val}
+ }
+ return ret
+}
+
+type richEnumValDescriptorish struct {
+ *desc.EnumValueDescriptor
+}
+
+func (d richEnumValDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.EnumValueDescriptor.GetFile()}
+}
+
+type richSvcDescriptorish struct {
+ *desc.ServiceDescriptor
+}
+
+func (d richSvcDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.ServiceDescriptor.GetFile()}
+}
+
+func (d richSvcDescriptorish) GetMethods() []methodDescriptorish {
+ mtds := d.ServiceDescriptor.GetMethods()
+ ret := make([]methodDescriptorish, len(mtds))
+ for i, mtd := range mtds {
+ ret[i] = richMethodDescriptorish{MethodDescriptor: mtd}
+ }
+ return ret
+}
+
+type richMethodDescriptorish struct {
+ *desc.MethodDescriptor
+}
+
+func (d richMethodDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.MethodDescriptor.GetFile()}
+}
+
+// The hierarchy of descriptorish implementations backed by
+// plain descriptor protos:
+
+type poorFileDescriptorish struct {
+ *dpb.FileDescriptorProto
+}
+
+func (d poorFileDescriptorish) GetFile() fileDescriptorish {
+ return d
+}
+
+func (d poorFileDescriptorish) GetFullyQualifiedName() string {
+ return d.FileDescriptorProto.GetName()
+}
+
+func (d poorFileDescriptorish) AsProto() proto.Message {
+ return d.FileDescriptorProto
+}
+
+func (d poorFileDescriptorish) GetFileOptions() *dpb.FileOptions {
+ return d.FileDescriptorProto.GetOptions()
+}
+
+func (d poorFileDescriptorish) FindSymbol(name string) desc.Descriptor {
+ return nil
+}
+
+func (d poorFileDescriptorish) GetPublicDependencies() []fileDescriptorish {
+ return nil
+}
+
+func (d poorFileDescriptorish) GetDependencies() []fileDescriptorish {
+ return nil
+}
+
+func (d poorFileDescriptorish) GetMessageTypes() []msgDescriptorish {
+ msgs := d.FileDescriptorProto.GetMessageType()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = poorMsgDescriptorish{
+ DescriptorProto: m,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+func (d poorFileDescriptorish) GetExtensions() []fldDescriptorish {
+ exts := d.FileDescriptorProto.GetExtension()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]fldDescriptorish, len(exts))
+ for i, e := range exts {
+ ret[i] = poorFldDescriptorish{
+ FieldDescriptorProto: e,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+func (d poorFileDescriptorish) GetEnumTypes() []enumDescriptorish {
+ ens := d.FileDescriptorProto.GetEnumType()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, e := range ens {
+ ret[i] = poorEnumDescriptorish{
+ EnumDescriptorProto: e,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+func (d poorFileDescriptorish) GetServices() []svcDescriptorish {
+ svcs := d.FileDescriptorProto.GetService()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]svcDescriptorish, len(svcs))
+ for i, s := range svcs {
+ ret[i] = poorSvcDescriptorish{
+ ServiceDescriptorProto: s,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+type poorMsgDescriptorish struct {
+ *dpb.DescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorMsgDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorMsgDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.DescriptorProto.GetName())
+}
+
+func qualify(qual, name string) string {
+ if qual == "" {
+ return name
+ } else {
+ return fmt.Sprintf("%s.%s", qual, name)
+ }
+}
+
+func (d poorMsgDescriptorish) AsProto() proto.Message {
+ return d.DescriptorProto
+}
+
+func (d poorMsgDescriptorish) GetMessageOptions() *dpb.MessageOptions {
+ return d.DescriptorProto.GetOptions()
+}
+
+func (d poorMsgDescriptorish) GetFields() []fldDescriptorish {
+ flds := d.DescriptorProto.GetField()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = poorFldDescriptorish{
+ FieldDescriptorProto: f,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetOneOfs() []oneofDescriptorish {
+ oos := d.DescriptorProto.GetOneofDecl()
+ ret := make([]oneofDescriptorish, len(oos))
+ for i, oo := range oos {
+ ret[i] = poorOneOfDescriptorish{
+ OneofDescriptorProto: oo,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetExtensionRanges() []extRangeDescriptorish {
+ mdFqn := d.GetFullyQualifiedName()
+ extrs := d.DescriptorProto.GetExtensionRange()
+ ret := make([]extRangeDescriptorish, len(extrs))
+ for i, extr := range extrs {
+ ret[i] = extRangeDescriptorish{
+ er: extr,
+ qual: mdFqn,
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedMessageTypes() []msgDescriptorish {
+ msgs := d.DescriptorProto.GetNestedType()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = poorMsgDescriptorish{
+ DescriptorProto: m,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedExtensions() []fldDescriptorish {
+ flds := d.DescriptorProto.GetExtension()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = poorFldDescriptorish{
+ FieldDescriptorProto: f,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedEnumTypes() []enumDescriptorish {
+ ens := d.DescriptorProto.GetEnumType()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, en := range ens {
+ ret[i] = poorEnumDescriptorish{
+ EnumDescriptorProto: en,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+type poorFldDescriptorish struct {
+ *dpb.FieldDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorFldDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorFldDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.FieldDescriptorProto.GetName())
+}
+
+func (d poorFldDescriptorish) AsProto() proto.Message {
+ return d.FieldDescriptorProto
+}
+
+func (d poorFldDescriptorish) GetFieldOptions() *dpb.FieldOptions {
+ return d.FieldDescriptorProto.GetOptions()
+}
+
+func (d poorFldDescriptorish) GetMessageType() *desc.MessageDescriptor {
+ return nil
+}
+
+func (d poorFldDescriptorish) GetEnumType() *desc.EnumDescriptor {
+ return nil
+}
+
+type poorOneOfDescriptorish struct {
+ *dpb.OneofDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorOneOfDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorOneOfDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.OneofDescriptorProto.GetName())
+}
+
+func (d poorOneOfDescriptorish) AsProto() proto.Message {
+ return d.OneofDescriptorProto
+}
+
+func (d poorOneOfDescriptorish) GetOneOfOptions() *dpb.OneofOptions {
+ return d.OneofDescriptorProto.GetOptions()
+}
+
+func (d poorFldDescriptorish) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+ return d.FieldDescriptorProto
+}
+
+type poorEnumDescriptorish struct {
+ *dpb.EnumDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorEnumDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorEnumDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.EnumDescriptorProto.GetName())
+}
+
+func (d poorEnumDescriptorish) AsProto() proto.Message {
+ return d.EnumDescriptorProto
+}
+
+func (d poorEnumDescriptorish) GetEnumOptions() *dpb.EnumOptions {
+ return d.EnumDescriptorProto.GetOptions()
+}
+
+func (d poorEnumDescriptorish) GetValues() []enumValDescriptorish {
+ vals := d.EnumDescriptorProto.GetValue()
+ ret := make([]enumValDescriptorish, len(vals))
+ for i, v := range vals {
+ ret[i] = poorEnumValDescriptorish{
+ EnumValueDescriptorProto: v,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+type poorEnumValDescriptorish struct {
+ *dpb.EnumValueDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorEnumValDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorEnumValDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.EnumValueDescriptorProto.GetName())
+}
+
+func (d poorEnumValDescriptorish) AsProto() proto.Message {
+ return d.EnumValueDescriptorProto
+}
+
+func (d poorEnumValDescriptorish) GetEnumValueOptions() *dpb.EnumValueOptions {
+ return d.EnumValueDescriptorProto.GetOptions()
+}
+
+type poorSvcDescriptorish struct {
+ *dpb.ServiceDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorSvcDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorSvcDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.ServiceDescriptorProto.GetName())
+}
+
+func (d poorSvcDescriptorish) AsProto() proto.Message {
+ return d.ServiceDescriptorProto
+}
+
+func (d poorSvcDescriptorish) GetServiceOptions() *dpb.ServiceOptions {
+ return d.ServiceDescriptorProto.GetOptions()
+}
+
+func (d poorSvcDescriptorish) GetMethods() []methodDescriptorish {
+ mtds := d.ServiceDescriptorProto.GetMethod()
+ ret := make([]methodDescriptorish, len(mtds))
+ for i, m := range mtds {
+ ret[i] = poorMethodDescriptorish{
+ MethodDescriptorProto: m,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+type poorMethodDescriptorish struct {
+ *dpb.MethodDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorMethodDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorMethodDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.MethodDescriptorProto.GetName())
+}
+
+func (d poorMethodDescriptorish) AsProto() proto.Message {
+ return d.MethodDescriptorProto
+}
+
+func (d poorMethodDescriptorish) GetMethodOptions() *dpb.MethodOptions {
+ return d.MethodDescriptorProto.GetOptions()
+}
+
+type extRangeDescriptorish struct {
+ er *dpb.DescriptorProto_ExtensionRange
+ qual string
+ file fileDescriptorish
+}
+
+func (er extRangeDescriptorish) GetFile() fileDescriptorish {
+ return er.file
+}
+
+func (er extRangeDescriptorish) GetFullyQualifiedName() string {
+ return qualify(er.qual, fmt.Sprintf("%d-%d", er.er.GetStart(), er.er.GetEnd()-1))
+}
+
+func (er extRangeDescriptorish) AsProto() proto.Message {
+ return er.er
+}
+
+func (er extRangeDescriptorish) GetExtensionRangeOptions() *dpb.ExtensionRangeOptions {
+ return er.er.GetOptions()
+}
+
+func interpretFileOptions(r *parseResult, fd fileDescriptorish) error {
+ opts := fd.GetFileOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, fd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, md := range fd.GetMessageTypes() {
+ if err := interpretMessageOptions(r, md); err != nil {
+ return err
+ }
+ }
+ for _, fld := range fd.GetExtensions() {
+ if err := interpretFieldOptions(r, fld); err != nil {
+ return err
+ }
+ }
+ for _, ed := range fd.GetEnumTypes() {
+ if err := interpretEnumOptions(r, ed); err != nil {
+ return err
+ }
+ }
+ for _, sd := range fd.GetServices() {
+ opts := sd.GetServiceOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, sd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, mtd := range sd.GetMethods() {
+ opts := mtd.GetMethodOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, mtd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func interpretMessageOptions(r *parseResult, md msgDescriptorish) error {
+ opts := md.GetMessageOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, md, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, fld := range md.GetFields() {
+ if err := interpretFieldOptions(r, fld); err != nil {
+ return err
+ }
+ }
+ for _, ood := range md.GetOneOfs() {
+ opts := ood.GetOneOfOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, ood, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ for _, fld := range md.GetNestedExtensions() {
+ if err := interpretFieldOptions(r, fld); err != nil {
+ return err
+ }
+ }
+ for _, er := range md.GetExtensionRanges() {
+ opts := er.GetExtensionRangeOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, er, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ for _, nmd := range md.GetNestedMessageTypes() {
+ if err := interpretMessageOptions(r, nmd); err != nil {
+ return err
+ }
+ }
+ for _, ed := range md.GetNestedEnumTypes() {
+ if err := interpretEnumOptions(r, ed); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func interpretFieldOptions(r *parseResult, fld fldDescriptorish) error {
+ opts := fld.GetFieldOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ uo := opts.UninterpretedOption
+ scope := fmt.Sprintf("field %s", fld.GetFullyQualifiedName())
+
+ // process json_name pseudo-option
+ if index, err := findOption(r, scope, uo, "json_name"); err != nil && !r.lenient {
+ return err
+ } else if err == nil && index >= 0 {
+ opt := uo[index]
+ optNode := r.getOptionNode(opt)
+
+ // attribute source code info
+ if on, ok := optNode.(*optionNode); ok {
+ r.interpretedOptions[on] = []int32{-1, internal.Field_jsonNameTag}
+ }
+ uo = removeOption(uo, index)
+ if opt.StringValue == nil {
+ return ErrorWithSourcePos{Pos: optNode.getValue().start(), Underlying: fmt.Errorf("%s: expecting string value for json_name option", scope)}
+ }
+ fld.AsFieldDescriptorProto().JsonName = proto.String(string(opt.StringValue))
+ }
+
+ // and process default pseudo-option
+ if index, err := processDefaultOption(r, scope, fld, uo); err != nil && !r.lenient {
+ return err
+ } else if err == nil && index >= 0 {
+ // attribute source code info
+ optNode := r.getOptionNode(uo[index])
+ if on, ok := optNode.(*optionNode); ok {
+ r.interpretedOptions[on] = []int32{-1, internal.Field_defaultTag}
+ }
+ uo = removeOption(uo, index)
+ }
+
+ if len(uo) == 0 {
+ // no real options, only pseudo-options above? clear out options
+ fld.AsFieldDescriptorProto().Options = nil
+ } else if remain, err := interpretOptions(r, fld, opts, uo); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ return nil
+}
+
+func processDefaultOption(res *parseResult, scope string, fld fldDescriptorish, uos []*dpb.UninterpretedOption) (defaultIndex int, err error) {
+ found, err := findOption(res, scope, uos, "default")
+ if err != nil {
+ return -1, err
+ } else if found == -1 {
+ return -1, nil
+ }
+ opt := uos[found]
+ optNode := res.getOptionNode(opt)
+ fdp := fld.AsFieldDescriptorProto()
+ if fdp.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED {
+ return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default value cannot be set because field is repeated", scope)}
+ }
+ if fdp.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP || fdp.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE {
+ return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default value cannot be set because field is a message", scope)}
+ }
+ val := optNode.getValue()
+ if _, ok := val.(*aggregateLiteralNode); ok {
+ return -1, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%s: default value cannot be an aggregate", scope)}
+ }
+ mc := &messageContext{
+ res: res,
+ file: fld.GetFile(),
+ elementName: fld.GetFullyQualifiedName(),
+ elementType: descriptorType(fld.AsProto()),
+ option: opt,
+ }
+ v, err := fieldValue(res, mc, fld, val, true)
+ if err != nil {
+ return -1, err
+ }
+ if str, ok := v.(string); ok {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(str)
+ } else if b, ok := v.([]byte); ok {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(encodeDefaultBytes(b))
+ } else {
+ var flt float64
+ var ok bool
+ if flt, ok = v.(float64); !ok {
+ var flt32 float32
+ if flt32, ok = v.(float32); ok {
+ flt = float64(flt32)
+ }
+ }
+ if ok {
+ if math.IsInf(flt, 1) {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String("inf")
+ } else if ok && math.IsInf(flt, -1) {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String("-inf")
+ } else if ok && math.IsNaN(flt) {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String("nan")
+ } else {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(fmt.Sprintf("%v", v))
+ }
+ } else {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(fmt.Sprintf("%v", v))
+ }
+ }
+ return found, nil
+}
+
+func encodeDefaultBytes(b []byte) string {
+ var buf bytes.Buffer
+ writeEscapedBytes(&buf, b)
+ return buf.String()
+}
+
+func interpretEnumOptions(r *parseResult, ed enumDescriptorish) error {
+ opts := ed.GetEnumOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, ed, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, evd := range ed.GetValues() {
+ opts := evd.GetEnumValueOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, evd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func interpretOptions(res *parseResult, element descriptorish, opts proto.Message, uninterpreted []*dpb.UninterpretedOption) ([]*dpb.UninterpretedOption, error) {
+ optsd, err := desc.LoadMessageDescriptorForMessage(opts)
+ if err != nil {
+ if res.lenient {
+ return uninterpreted, nil
+ }
+ return nil, err
+ }
+ dm := dynamic.NewMessage(optsd)
+ err = dm.ConvertFrom(opts)
+ if err != nil {
+ if res.lenient {
+ return uninterpreted, nil
+ }
+ node := res.nodes[element.AsProto()]
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+ }
+
+ mc := &messageContext{res: res, file: element.GetFile(), elementName: element.GetFullyQualifiedName(), elementType: descriptorType(element.AsProto())}
+ var remain []*dpb.UninterpretedOption
+ for _, uo := range uninterpreted {
+ node := res.getOptionNode(uo)
+ if !uo.Name[0].GetIsExtension() && uo.Name[0].GetNamePart() == "uninterpreted_option" {
+ if res.lenient {
+ remain = append(remain, uo)
+ continue
+ }
+ // uninterpreted_option might be found reflectively, but is not actually valid for use
+ return nil, ErrorWithSourcePos{Pos: node.getName().start(), Underlying: fmt.Errorf("%vinvalid option 'uninterpreted_option'", mc)}
+ }
+ mc.option = uo
+ path, err := interpretField(res, mc, element, dm, uo, 0, nil)
+ if err != nil {
+ if res.lenient {
+ remain = append(remain, uo)
+ continue
+ }
+ return nil, err
+ }
+ if optn, ok := node.(*optionNode); ok {
+ res.interpretedOptions[optn] = path
+ }
+ }
+
+ if err := dm.ValidateRecursive(); err != nil {
+ // if lenient, we'll let this pass, but it means that some required field was not set!
+ // TODO: do this in a more granular way, so we can validate individual fields
+ // and leave them uninterpreted, instead of just having to live with the
+ // thing having invalid data in extensions.
+ if !res.lenient {
+ node := res.nodes[element.AsProto()]
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("error in %s options: %v", descriptorType(element.AsProto()), err)}
+ }
+ }
+
+ if res.lenient {
+ // If we're lenient, then we don't want to clobber the passed in message
+ // and leave it partially populated. So we convert into a copy first
+ optsClone := proto.Clone(opts)
+ if err := dm.ConvertTo(optsClone); err != nil {
+ // TODO: do this in a more granular way, so we can convert individual
+ // fields and leave bad ones uninterpreted instead of skipping all of
+ // the work we've done so far.
+ return uninterpreted, nil
+ }
+ // conversion from dynamic message above worked, so now
+ // it is safe to overwrite the passed in message
+ opts.Reset()
+ proto.Merge(opts, optsClone)
+
+ } else {
+ // not lenient: try to convert into the passed in message
+ // and fail is not successful
+ if err := dm.ConvertTo(opts); err != nil {
+ node := res.nodes[element.AsProto()]
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+ }
+ }
+
+ return remain, nil
+}
+
+func interpretField(res *parseResult, mc *messageContext, element descriptorish, dm *dynamic.Message, opt *dpb.UninterpretedOption, nameIndex int, pathPrefix []int32) (path []int32, err error) {
+ var fld *desc.FieldDescriptor
+ nm := opt.GetName()[nameIndex]
+ node := res.getOptionNamePartNode(nm)
+ if nm.GetIsExtension() {
+ extName := nm.GetNamePart()
+ if extName[0] == '.' {
+ extName = extName[1:] /* skip leading dot */
+ }
+ fld = findExtension(element.GetFile(), extName, false, map[fileDescriptorish]struct{}{})
+ if fld == nil {
+ return nil, ErrorWithSourcePos{
+ Pos: node.start(),
+ Underlying: fmt.Errorf("%vunrecognized extension %s of %s",
+ mc, extName, dm.GetMessageDescriptor().GetFullyQualifiedName()),
+ }
+ }
+ if fld.GetOwner().GetFullyQualifiedName() != dm.GetMessageDescriptor().GetFullyQualifiedName() {
+ return nil, ErrorWithSourcePos{
+ Pos: node.start(),
+ Underlying: fmt.Errorf("%vextension %s should extend %s but instead extends %s",
+ mc, extName, dm.GetMessageDescriptor().GetFullyQualifiedName(), fld.GetOwner().GetFullyQualifiedName()),
+ }
+ }
+ } else {
+ fld = dm.GetMessageDescriptor().FindFieldByName(nm.GetNamePart())
+ if fld == nil {
+ return nil, ErrorWithSourcePos{
+ Pos: node.start(),
+ Underlying: fmt.Errorf("%vfield %s of %s does not exist",
+ mc, nm.GetNamePart(), dm.GetMessageDescriptor().GetFullyQualifiedName()),
+ }
+ }
+ }
+
+ path = append(pathPrefix, fld.GetNumber())
+
+ if len(opt.GetName()) > nameIndex+1 {
+ nextnm := opt.GetName()[nameIndex+1]
+ nextnode := res.getOptionNamePartNode(nextnm)
+ if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE {
+ return nil, ErrorWithSourcePos{
+ Pos: nextnode.start(),
+ Underlying: fmt.Errorf("%vcannot set field %s because %s is not a message",
+ mc, nextnm.GetNamePart(), nm.GetNamePart()),
+ }
+ }
+ if fld.IsRepeated() {
+ return nil, ErrorWithSourcePos{
+ Pos: nextnode.start(),
+ Underlying: fmt.Errorf("%vcannot set field %s because %s is repeated (must use an aggregate)",
+ mc, nextnm.GetNamePart(), nm.GetNamePart()),
+ }
+ }
+ var fdm *dynamic.Message
+ var err error
+ if dm.HasField(fld) {
+ var v interface{}
+ v, err = dm.TryGetField(fld)
+ fdm, _ = v.(*dynamic.Message)
+ } else {
+ fdm = dynamic.NewMessage(fld.GetMessageType())
+ err = dm.TrySetField(fld, fdm)
+ }
+ if err != nil {
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+ }
+ // recurse to set next part of name
+ return interpretField(res, mc, element, fdm, opt, nameIndex+1, path)
+ }
+
+ optNode := res.getOptionNode(opt)
+ if err := setOptionField(res, mc, dm, fld, node, optNode.getValue()); err != nil {
+ return nil, err
+ }
+ if fld.IsRepeated() {
+ path = append(path, int32(dm.FieldLength(fld))-1)
+ }
+ return path, nil
+}
+
+func findExtension(fd fileDescriptorish, name string, public bool, checked map[fileDescriptorish]struct{}) *desc.FieldDescriptor {
+ if _, ok := checked[fd]; ok {
+ return nil
+ }
+ checked[fd] = struct{}{}
+ d := fd.FindSymbol(name)
+ if d != nil {
+ if fld, ok := d.(*desc.FieldDescriptor); ok {
+ return fld
+ }
+ return nil
+ }
+
+ // When public = false, we are searching only directly imported symbols. But we
+ // also need to search transitive public imports due to semantics of public imports.
+ if public {
+ for _, dep := range fd.GetPublicDependencies() {
+ d := findExtension(dep, name, true, checked)
+ if d != nil {
+ return d
+ }
+ }
+ } else {
+ for _, dep := range fd.GetDependencies() {
+ d := findExtension(dep, name, true, checked)
+ if d != nil {
+ return d
+ }
+ }
+ }
+ return nil
+}
+
+func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, fld *desc.FieldDescriptor, name node, val valueNode) error {
+ v := val.value()
+ if sl, ok := v.([]valueNode); ok {
+ // handle slices a little differently than the others
+ if !fld.IsRepeated() {
+ return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue is an array but field is not repeated", mc)}
+ }
+ origPath := mc.optAggPath
+ defer func() {
+ mc.optAggPath = origPath
+ }()
+ for index, item := range sl {
+ mc.optAggPath = fmt.Sprintf("%s[%d]", origPath, index)
+ if v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, item, false); err != nil {
+ return err
+ } else if err = dm.TryAddRepeatedField(fld, v); err != nil {
+ return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%verror setting value: %s", mc, err)}
+ }
+ }
+ return nil
+ }
+
+ v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, val, false)
+ if err != nil {
+ return err
+ }
+ if fld.IsRepeated() {
+ err = dm.TryAddRepeatedField(fld, v)
+ } else {
+ if dm.HasField(fld) {
+ return ErrorWithSourcePos{Pos: name.start(), Underlying: fmt.Errorf("%vnon-repeated option field %s already set", mc, fieldName(fld))}
+ }
+ err = dm.TrySetField(fld, v)
+ }
+ if err != nil {
+ return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%verror setting value: %s", mc, err)}
+ }
+
+ return nil
+}
+
+type messageContext struct {
+ res *parseResult
+ file fileDescriptorish
+ elementType string
+ elementName string
+ option *dpb.UninterpretedOption
+ optAggPath string
+}
+
+func (c *messageContext) String() string {
+ var ctx bytes.Buffer
+ if c.elementType != "file" {
+ fmt.Fprintf(&ctx, "%s %s: ", c.elementType, c.elementName)
+ }
+ if c.option != nil && c.option.Name != nil {
+ ctx.WriteString("option ")
+ writeOptionName(&ctx, c.option.Name)
+ if c.res.nodes == nil {
+ // if we have no source position info, try to provide as much context
+ // as possible (if nodes != nil, we don't need this because any errors
+ // will actually have file and line numbers)
+ if c.optAggPath != "" {
+ fmt.Fprintf(&ctx, " at %s", c.optAggPath)
+ }
+ }
+ ctx.WriteString(": ")
+ }
+ return ctx.String()
+}
+
+func writeOptionName(buf *bytes.Buffer, parts []*dpb.UninterpretedOption_NamePart) {
+ first := true
+ for _, p := range parts {
+ if first {
+ first = false
+ } else {
+ buf.WriteByte('.')
+ }
+ nm := p.GetNamePart()
+ if nm[0] == '.' {
+ // skip leading dot
+ nm = nm[1:]
+ }
+ if p.GetIsExtension() {
+ buf.WriteByte('(')
+ buf.WriteString(nm)
+ buf.WriteByte(')')
+ } else {
+ buf.WriteString(nm)
+ }
+ }
+}
+
+func fieldName(fld *desc.FieldDescriptor) string {
+ if fld.IsExtension() {
+ return fld.GetFullyQualifiedName()
+ } else {
+ return fld.GetName()
+ }
+}
+
+func valueKind(val interface{}) string {
+ switch val := val.(type) {
+ case identifier:
+ return "identifier"
+ case bool:
+ return "bool"
+ case int64:
+ if val < 0 {
+ return "negative integer"
+ }
+ return "integer"
+ case uint64:
+ return "integer"
+ case float64:
+ return "double"
+ case string, []byte:
+ return "string"
+ case []*aggregateEntryNode:
+ return "message"
+ default:
+ return fmt.Sprintf("%T", val)
+ }
+}
+
+func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val valueNode, enumAsString bool) (interface{}, error) {
+ v := val.value()
+ t := fld.AsFieldDescriptorProto().GetType()
+ switch t {
+ case dpb.FieldDescriptorProto_TYPE_ENUM:
+ if id, ok := v.(identifier); ok {
+ ev := fld.GetEnumType().FindValueByName(string(id))
+ if ev == nil {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%venum %s has no value named %s", mc, fld.GetEnumType().GetFullyQualifiedName(), id)}
+ }
+ if enumAsString {
+ return ev.GetName(), nil
+ } else {
+ return ev.GetNumber(), nil
+ }
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting enum, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_MESSAGE, dpb.FieldDescriptorProto_TYPE_GROUP:
+ if aggs, ok := v.([]*aggregateEntryNode); ok {
+ fmd := fld.GetMessageType()
+ fdm := dynamic.NewMessage(fmd)
+ origPath := mc.optAggPath
+ defer func() {
+ mc.optAggPath = origPath
+ }()
+ for _, a := range aggs {
+ if origPath == "" {
+ mc.optAggPath = a.name.value()
+ } else {
+ mc.optAggPath = origPath + "." + a.name.value()
+ }
+ var ffld *desc.FieldDescriptor
+ if a.name.isExtension {
+ n := a.name.name.val
+ ffld = findExtension(mc.file, n, false, map[fileDescriptorish]struct{}{})
+ if ffld == nil {
+ // may need to qualify with package name
+ pkg := mc.file.GetPackage()
+ if pkg != "" {
+ ffld = findExtension(mc.file, pkg+"."+n, false, map[fileDescriptorish]struct{}{})
+ }
+ }
+ } else {
+ ffld = fmd.FindFieldByName(a.name.value())
+ }
+ if ffld == nil {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vfield %s not found", mc, a.name.name.val)}
+ }
+ if err := setOptionField(res, mc, fdm, ffld, a.name, a.val); err != nil {
+ return nil, err
+ }
+ }
+ return fdm, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting message, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_BOOL:
+ if b, ok := v.(bool); ok {
+ return b, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting bool, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_BYTES:
+ if str, ok := v.(string); ok {
+ return []byte(str), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting bytes, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_STRING:
+ if str, ok := v.(string); ok {
+ return str, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting string, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_INT32, dpb.FieldDescriptorProto_TYPE_SINT32, dpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, ok := v.(int64); ok {
+ if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int32", mc, i)}
+ }
+ return int32(i), nil
+ }
+ if ui, ok := v.(uint64); ok {
+ if ui > math.MaxInt32 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int32", mc, ui)}
+ }
+ return int32(ui), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting int32, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_UINT32, dpb.FieldDescriptorProto_TYPE_FIXED32:
+ if i, ok := v.(int64); ok {
+ if i > math.MaxUint32 || i < 0 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint32", mc, i)}
+ }
+ return uint32(i), nil
+ }
+ if ui, ok := v.(uint64); ok {
+ if ui > math.MaxUint32 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint32", mc, ui)}
+ }
+ return uint32(ui), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting uint32, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_INT64, dpb.FieldDescriptorProto_TYPE_SINT64, dpb.FieldDescriptorProto_TYPE_SFIXED64:
+ if i, ok := v.(int64); ok {
+ return i, nil
+ }
+ if ui, ok := v.(uint64); ok {
+ if ui > math.MaxInt64 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int64", mc, ui)}
+ }
+ return int64(ui), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting int64, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_UINT64, dpb.FieldDescriptorProto_TYPE_FIXED64:
+ if i, ok := v.(int64); ok {
+ if i < 0 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint64", mc, i)}
+ }
+ return uint64(i), nil
+ }
+ if ui, ok := v.(uint64); ok {
+ return ui, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting uint64, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ if d, ok := v.(float64); ok {
+ return d, nil
+ }
+ if i, ok := v.(int64); ok {
+ return float64(i), nil
+ }
+ if u, ok := v.(uint64); ok {
+ return float64(u), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting double, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ if d, ok := v.(float64); ok {
+ if (d > math.MaxFloat32 || d < -math.MaxFloat32) && !math.IsInf(d, 1) && !math.IsInf(d, -1) && !math.IsNaN(d) {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %f is out of range for float", mc, d)}
+ }
+ return float32(d), nil
+ }
+ if i, ok := v.(int64); ok {
+ return float32(i), nil
+ }
+ if u, ok := v.(uint64); ok {
+ return float32(u), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting float, got %s", mc, valueKind(v))}
+ default:
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vunrecognized field type: %s", mc, t)}
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
new file mode 100644
index 0000000..ce9a3e4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
@@ -0,0 +1,1520 @@
+package protoparse
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+//go:generate goyacc -o proto.y.go -p proto proto.y
+
+var errNoImportPathsForAbsoluteFilePath = errors.New("must specify at least one import path if any absolute file paths are given")
+
+func init() {
+ protoErrorVerbose = true
+
+ // fix up the generated "token name" array so that error messages are nicer
+ setTokenName(_STRING_LIT, "string literal")
+ setTokenName(_INT_LIT, "int literal")
+ setTokenName(_FLOAT_LIT, "float literal")
+ setTokenName(_NAME, "identifier")
+ setTokenName(_FQNAME, "fully-qualified name")
+ setTokenName(_TYPENAME, "type name")
+ setTokenName(_ERROR, "error")
+ // for keywords, just show the keyword itself wrapped in quotes
+ for str, i := range keywords {
+ setTokenName(i, fmt.Sprintf(`"%s"`, str))
+ }
+}
+
+func setTokenName(token int, text string) {
+ // NB: this is based on logic in generated parse code that translates the
+ // int returned from the lexer into an internal token number.
+ var intern int
+ if token < len(protoTok1) {
+ intern = protoTok1[token]
+ } else {
+ if token >= protoPrivate {
+ if token < protoPrivate+len(protoTok2) {
+ intern = protoTok2[token-protoPrivate]
+ }
+ }
+ if intern == 0 {
+ for i := 0; i+1 < len(protoTok3); i += 2 {
+ if protoTok3[i] == token {
+ intern = protoTok3[i+1]
+ break
+ }
+ }
+ }
+ }
+
+ if intern >= 1 && intern-1 < len(protoToknames) {
+ protoToknames[intern-1] = text
+ return
+ }
+
+ panic(fmt.Sprintf("Unknown token value: %d", token))
+}
+
+// FileAccessor is an abstraction for opening proto source files. It takes the
+// name of the file to open and returns either the input reader or an error.
+type FileAccessor func(filename string) (io.ReadCloser, error)
+
+// FileContentsFromMap returns a FileAccessor that uses the given map of file
+// contents. This allows proto source files to be constructed in memory and
+// easily supplied to a parser. The map keys are the paths to the proto source
+// files, and the values are the actual proto source contents.
+func FileContentsFromMap(files map[string]string) FileAccessor {
+ return func(filename string) (io.ReadCloser, error) {
+ contents, ok := files[filename]
+ if !ok {
+ return nil, os.ErrNotExist
+ }
+ return ioutil.NopCloser(strings.NewReader(contents)), nil
+ }
+}
+
+// ResolveFilenames tries to resolve fileNames into paths that are relative to
+// directories in the given importPaths. The returned slice has the results in
+// the same order as they are supplied in fileNames.
+//
+// The resulting names should be suitable for passing to Parser.ParseFiles.
+//
+// If importPaths is empty and any path is absolute, this returns error.
+// If importPaths is empty and all paths are relative, this returns the original fileNames.
+func ResolveFilenames(importPaths []string, fileNames ...string) ([]string, error) {
+ if len(importPaths) == 0 {
+ if containsAbsFilePath(fileNames) {
+ // We have to do this as otherwise parseProtoFiles can result in duplicate symbols.
+ // For example, assume we import "foo/bar/bar.proto" in a file "/home/alice/dev/foo/bar/baz.proto"
+ // as we call ParseFiles("/home/alice/dev/foo/bar/bar.proto","/home/alice/dev/foo/bar/baz.proto")
+ // with "/home/alice/dev" as our current directory. Due to the recursive nature of parseProtoFiles,
+ // it will discover the import "foo/bar/bar.proto" in the input file, and call parse on this,
+ // adding "foo/bar/bar.proto" to the parsed results, as well as "/home/alice/dev/foo/bar/bar.proto"
+ // from the input file list. This will result in a
+ // 'duplicate symbol SYMBOL: already defined as field in "/home/alice/dev/foo/bar/bar.proto'
+ // error being returned from ParseFiles.
+ return nil, errNoImportPathsForAbsoluteFilePath
+ }
+ return fileNames, nil
+ }
+ absImportPaths, err := absoluteFilePaths(importPaths)
+ if err != nil {
+ return nil, err
+ }
+ absFileNames, err := absoluteFilePaths(fileNames)
+ if err != nil {
+ return nil, err
+ }
+ resolvedFileNames := make([]string, 0, len(fileNames))
+ for _, absFileName := range absFileNames {
+ resolvedFileName, err := resolveAbsFilename(absImportPaths, absFileName)
+ if err != nil {
+ return nil, err
+ }
+ resolvedFileNames = append(resolvedFileNames, resolvedFileName)
+ }
+ return resolvedFileNames, nil
+}
+
+// Parser parses proto source into descriptors.
+type Parser struct {
+ // The paths used to search for dependencies that are referenced in import
+ // statements in proto source files. If no import paths are provided then
+ // "." (current directory) is assumed to be the only import path.
+ //
+ // This setting is only used during ParseFiles operations. Since calls to
+ // ParseFilesButDoNotLink do not link, there is no need to load and parse
+ // dependencies.
+ ImportPaths []string
+
+ // If true, the supplied file names/paths need not necessarily match how the
+ // files are referenced in import statements. The parser will attempt to
+ // match import statements to supplied paths, "guessing" the import paths
+ // for the files. Note that this inference is not perfect and link errors
+ // could result. It works best when all proto files are organized such that
+ // a single import path can be inferred (e.g. all files under a single tree
+ // with import statements all being relative to the root of this tree).
+ InferImportPaths bool
+
+ // Used to create a reader for a given filename, when loading proto source
+ // file contents. If unset, os.Open is used. If ImportPaths is also empty
+ // then relative paths are will be relative to the process's current working
+ // directory.
+ Accessor FileAccessor
+
+ // If true, the resulting file descriptors will retain source code info,
+ // that maps elements to their location in the source files as well as
+ // includes comments found during parsing (and attributed to elements of
+ // the source file).
+ IncludeSourceCodeInfo bool
+
+ // If true, the results from ParseFilesButDoNotLink will be passed through
+ // some additional validations. But only constraints that do not require
+ // linking can be checked. These include proto2 vs. proto3 language features,
+ // looking for incorrect usage of reserved names or tags, and ensuring that
+ // fields have unique tags and that enum values have unique numbers (unless
+ // the enum allows aliases).
+ ValidateUnlinkedFiles bool
+
+ // If true, the results from ParseFilesButDoNotLink will have options
+ // interpreted. Any uninterpretable options (including any custom options or
+ // options that refer to message and enum types, which can only be
+ // interpreted after linking) will be left in uninterpreted_options. Also,
+ // the "default" pseudo-option for fields can only be interpreted for scalar
+ // fields, excluding enums. (Interpreting default values for enum fields
+ // requires resolving enum names, which requires linking.)
+ InterpretOptionsInUnlinkedFiles bool
+}
+
+// ParseFiles parses the named files into descriptors. The returned slice has
+// the same number of entries as the give filenames, in the same order. So the
+// first returned descriptor corresponds to the first given name, and so on.
+//
+// All dependencies for all specified files (including transitive dependencies)
+// must be accessible via the parser's Accessor or a link error will occur. The
+// exception to this rule is that files can import standard Google-provided
+// files -- e.g. google/protobuf/*.proto -- without needing to supply sources
+// for these files. Like protoc, this parser has a built-in version of these
+// files it can use if they aren't explicitly supplied.
+func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) {
+ accessor := p.Accessor
+ if accessor == nil {
+ accessor = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ }
+ paths := p.ImportPaths
+ if len(paths) > 0 {
+ acc := accessor
+ accessor = func(name string) (io.ReadCloser, error) {
+ var ret error
+ for _, path := range paths {
+ f, err := acc(filepath.Join(path, name))
+ if err != nil {
+ if ret == nil {
+ ret = err
+ }
+ continue
+ }
+ return f, nil
+ }
+ return nil, ret
+ }
+ }
+
+ protos := map[string]*parseResult{}
+ err := parseProtoFiles(accessor, filenames, true, true, protos)
+ if err != nil {
+ return nil, err
+ }
+ if p.InferImportPaths {
+ protos = fixupFilenames(protos)
+ }
+ linkedProtos, err := newLinker(protos).linkFiles()
+ if err != nil {
+ return nil, err
+ }
+ if p.IncludeSourceCodeInfo {
+ for name, fd := range linkedProtos {
+ pr := protos[name]
+ fd.AsFileDescriptorProto().SourceCodeInfo = pr.generateSourceCodeInfo()
+ internal.RecomputeSourceInfo(fd)
+ }
+ }
+ fds := make([]*desc.FileDescriptor, len(filenames))
+ for i, name := range filenames {
+ fd := linkedProtos[name]
+ fds[i] = fd
+ }
+ return fds, nil
+}
+
+// ParseFilesButDoNotLink parses the named files into descriptor protos. The
+// results are just protos, not fully-linked descriptors. It is possible that
+// descriptors are invalid and still be returned in parsed form without error
+// due to the fact that the linking step is skipped (and thus many validation
+// steps omitted).
+//
+// There are a few side effects to not linking the descriptors:
+// 1. No options will be interpreted. Options can refer to extensions or have
+// message and enum types. Without linking, these extension and type
+// references are not resolved, so the options may not be interpretable.
+// So all options will appear in UninterpretedOption fields of the various
+// descriptor options messages.
+// 2. Type references will not be resolved. This means that the actual type
+// names in the descriptors may be unqualified and even relative to the
+// scope in which the type reference appears. This goes for fields that
+// have message and enum types. It also applies to methods and their
+// references to request and response message types.
+// 3. Enum fields are not known. Until a field's type reference is resolved
+// (during linking), it is not known whether the type refers to a message
+// or an enum. So all fields with such type references have their Type set
+// to TYPE_MESSAGE.
+//
+// This method will still validate the syntax of parsed files. If the parser's
+// ValidateUnlinkedFiles field is true, additional checks, beyond syntax will
+// also be performed.
+func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*dpb.FileDescriptorProto, error) {
+ accessor := p.Accessor
+ if accessor == nil {
+ accessor = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ }
+
+ protos := map[string]*parseResult{}
+ err := parseProtoFiles(accessor, filenames, false, p.ValidateUnlinkedFiles, protos)
+ if err != nil {
+ return nil, err
+ }
+ if p.InferImportPaths {
+ protos = fixupFilenames(protos)
+ }
+ fds := make([]*dpb.FileDescriptorProto, len(filenames))
+ for i, name := range filenames {
+ pr := protos[name]
+ fd := pr.fd
+ if p.InterpretOptionsInUnlinkedFiles {
+ pr.lenient = true
+ interpretFileOptions(pr, poorFileDescriptorish{FileDescriptorProto: fd})
+ }
+ if p.IncludeSourceCodeInfo {
+ fd.SourceCodeInfo = pr.generateSourceCodeInfo()
+ }
+ fds[i] = fd
+ }
+ return fds, nil
+}
+
+func containsAbsFilePath(filePaths []string) bool {
+ for _, filePath := range filePaths {
+ if filepath.IsAbs(filePath) {
+ return true
+ }
+ }
+ return false
+}
+
+func absoluteFilePaths(filePaths []string) ([]string, error) {
+ absFilePaths := make([]string, 0, len(filePaths))
+ for _, filePath := range filePaths {
+ absFilePath, err := filepath.Abs(filePath)
+ if err != nil {
+ return nil, err
+ }
+ absFilePaths = append(absFilePaths, absFilePath)
+ }
+ return absFilePaths, nil
+}
+
+func resolveAbsFilename(absImportPaths []string, absFileName string) (string, error) {
+ for _, absImportPath := range absImportPaths {
+ if isDescendant(absImportPath, absFileName) {
+ resolvedPath, err := filepath.Rel(absImportPath, absFileName)
+ if err != nil {
+ return "", err
+ }
+ return resolvedPath, nil
+ }
+ }
+ return "", fmt.Errorf("%s does not reside in any import path", absFileName)
+}
+
+// isDescendant returns true if file is a descendant of dir.
+func isDescendant(dir, file string) bool {
+ dir = filepath.Clean(dir)
+ cur := file
+ for {
+ d := filepath.Dir(cur)
+ if d == dir {
+ return true
+ }
+ if d == "." || d == cur {
+ // we've run out of path elements
+ return false
+ }
+ cur = d
+ }
+}
+
+func fixupFilenames(protos map[string]*parseResult) map[string]*parseResult {
+ // In the event that the given filenames (keys in the supplied map) do not
+ // match the actual paths used in 'import' statements in the files, we try
+ // to revise names in the protos so that they will match and be linkable.
+ revisedProtos := map[string]*parseResult{}
+
+ protoPaths := map[string]struct{}{}
+ // TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?)
+ importCandidates := map[string]map[string]struct{}{}
+ candidatesAvailable := map[string]struct{}{}
+ for name := range protos {
+ candidatesAvailable[name] = struct{}{}
+ for _, f := range protos {
+ for _, imp := range f.fd.Dependency {
+ if strings.HasSuffix(name, imp) {
+ candidates := importCandidates[imp]
+ if candidates == nil {
+ candidates = map[string]struct{}{}
+ importCandidates[imp] = candidates
+ }
+ candidates[name] = struct{}{}
+ }
+ }
+ }
+ }
+ for imp, candidates := range importCandidates {
+ // if we found multiple possible candidates, use the one that is an exact match
+ // if it exists, and otherwise, guess that it's the shortest path (fewest elements)
+ var best string
+ for c := range candidates {
+ if _, ok := candidatesAvailable[c]; !ok {
+ // already used this candidate and re-written its filename accordingly
+ continue
+ }
+ if c == imp {
+ // exact match!
+ best = c
+ break
+ }
+ if best == "" {
+ best = c
+ } else {
+ // HACK: we can't actually tell which files is supposed to match
+ // this import, so arbitrarily pick the "shorter" one (fewest
+ // path elements) or, on a tie, the lexically earlier one
+ minLen := strings.Count(best, string(filepath.Separator))
+ cLen := strings.Count(c, string(filepath.Separator))
+ if cLen < minLen || (cLen == minLen && c < best) {
+ best = c
+ }
+ }
+ }
+ if best != "" {
+ prefix := best[:len(best)-len(imp)]
+ if len(prefix) > 0 {
+ protoPaths[prefix] = struct{}{}
+ }
+ f := protos[best]
+ f.fd.Name = proto.String(imp)
+ revisedProtos[imp] = f
+ delete(candidatesAvailable, best)
+ }
+ }
+
+ if len(candidatesAvailable) == 0 {
+ return revisedProtos
+ }
+
+ if len(protoPaths) == 0 {
+ for c := range candidatesAvailable {
+ revisedProtos[c] = protos[c]
+ }
+ return revisedProtos
+ }
+
+ // Any remaining candidates are entry-points (not imported by others), so
+ // the best bet to "fixing" their file name is to see if they're in one of
+ // the proto paths we found, and if so strip that prefix.
+ protoPathStrs := make([]string, len(protoPaths))
+ i := 0
+ for p := range protoPaths {
+ protoPathStrs[i] = p
+ i++
+ }
+ sort.Strings(protoPathStrs)
+ // we look at paths in reverse order, so we'll use a longer proto path if
+ // there is more than one match
+ for c := range candidatesAvailable {
+ var imp string
+ for i := len(protoPathStrs) - 1; i >= 0; i-- {
+ p := protoPathStrs[i]
+ if strings.HasPrefix(c, p) {
+ imp = c[len(p):]
+ break
+ }
+ }
+ if imp != "" {
+ f := protos[c]
+ f.fd.Name = proto.String(imp)
+ revisedProtos[imp] = f
+ } else {
+ revisedProtos[c] = protos[c]
+ }
+ }
+
+ return revisedProtos
+}
+
+func parseProtoFiles(acc FileAccessor, filenames []string, recursive, validate bool, parsed map[string]*parseResult) error {
+ for _, name := range filenames {
+ if _, ok := parsed[name]; ok {
+ continue
+ }
+ in, err := acc(name)
+ if err != nil {
+ if d, ok := standardImports[name]; ok {
+ parsed[name] = &parseResult{fd: d}
+ continue
+ }
+ return err
+ }
+ func() {
+ defer in.Close()
+ parsed[name], err = parseProto(name, in, validate)
+ }()
+ if err != nil {
+ return err
+ }
+ if recursive {
+ err = parseProtoFiles(acc, parsed[name].fd.Dependency, true, validate, parsed)
+ if err != nil {
+ return fmt.Errorf("failed to load imports for %q: %s", name, err)
+ }
+ }
+ }
+ return nil
+}
+
+type parseResult struct {
+ // the parsed file descriptor
+ fd *dpb.FileDescriptorProto
+
+ // if set to true, enables lenient interpretation of options, where
+ // unrecognized options will be left uninterpreted instead of resulting in a
+ // link error
+ lenient bool
+
+ // a map of elements in the descriptor to nodes in the AST
+ // (for extracting position information when validating the descriptor)
+ nodes map[proto.Message]node
+
+ // a map of uninterpreted option AST nodes to their relative path
+ // in the resulting options message
+ interpretedOptions map[*optionNode][]int32
+}
+
+func (r *parseResult) getFileNode(f *dpb.FileDescriptorProto) fileDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(f.GetName())}
+ }
+ return r.nodes[f].(fileDecl)
+}
+
+func (r *parseResult) getOptionNode(o *dpb.UninterpretedOption) optionDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[o].(optionDecl)
+}
+
+func (r *parseResult) getOptionNamePartNode(o *dpb.UninterpretedOption_NamePart) node {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[o]
+}
+
+func (r *parseResult) getMessageNode(m *dpb.DescriptorProto) msgDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[m].(msgDecl)
+}
+
+func (r *parseResult) getFieldNode(f *dpb.FieldDescriptorProto) fieldDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[f].(fieldDecl)
+}
+
+func (r *parseResult) getOneOfNode(o *dpb.OneofDescriptorProto) node {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[o]
+}
+
+func (r *parseResult) getExtensionRangeNode(e *dpb.DescriptorProto_ExtensionRange) rangeDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[e].(rangeDecl)
+}
+
+func (r *parseResult) getMessageReservedRangeNode(rr *dpb.DescriptorProto_ReservedRange) rangeDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[rr].(rangeDecl)
+}
+
+func (r *parseResult) getEnumNode(e *dpb.EnumDescriptorProto) node {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[e]
+}
+
+func (r *parseResult) getEnumValueNode(e *dpb.EnumValueDescriptorProto) enumValueDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[e].(enumValueDecl)
+}
+
+func (r *parseResult) getEnumReservedRangeNode(rr *dpb.EnumDescriptorProto_EnumReservedRange) rangeDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[rr].(rangeDecl)
+}
+
+func (r *parseResult) getServiceNode(s *dpb.ServiceDescriptorProto) node {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[s]
+}
+
+func (r *parseResult) getMethodNode(m *dpb.MethodDescriptorProto) methodDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[m].(methodDecl)
+}
+
+func (r *parseResult) putFileNode(f *dpb.FileDescriptorProto, n *fileNode) {
+ r.nodes[f] = n
+}
+
+func (r *parseResult) putOptionNode(o *dpb.UninterpretedOption, n *optionNode) {
+ r.nodes[o] = n
+}
+
+func (r *parseResult) putOptionNamePartNode(o *dpb.UninterpretedOption_NamePart, n *optionNamePartNode) {
+ r.nodes[o] = n
+}
+
+func (r *parseResult) putMessageNode(m *dpb.DescriptorProto, n msgDecl) {
+ r.nodes[m] = n
+}
+
+func (r *parseResult) putFieldNode(f *dpb.FieldDescriptorProto, n fieldDecl) {
+ r.nodes[f] = n
+}
+
+func (r *parseResult) putOneOfNode(o *dpb.OneofDescriptorProto, n *oneOfNode) {
+ r.nodes[o] = n
+}
+
+func (r *parseResult) putExtensionRangeNode(e *dpb.DescriptorProto_ExtensionRange, n *rangeNode) {
+ r.nodes[e] = n
+}
+
+func (r *parseResult) putMessageReservedRangeNode(rr *dpb.DescriptorProto_ReservedRange, n *rangeNode) {
+ r.nodes[rr] = n
+}
+
+func (r *parseResult) putEnumNode(e *dpb.EnumDescriptorProto, n *enumNode) {
+ r.nodes[e] = n
+}
+
+func (r *parseResult) putEnumValueNode(e *dpb.EnumValueDescriptorProto, n *enumValueNode) {
+ r.nodes[e] = n
+}
+
+func (r *parseResult) putEnumReservedRangeNode(rr *dpb.EnumDescriptorProto_EnumReservedRange, n *rangeNode) {
+ r.nodes[rr] = n
+}
+
+func (r *parseResult) putServiceNode(s *dpb.ServiceDescriptorProto, n *serviceNode) {
+ r.nodes[s] = n
+}
+
+func (r *parseResult) putMethodNode(m *dpb.MethodDescriptorProto, n *methodNode) {
+ r.nodes[m] = n
+}
+
+func parseProto(filename string, r io.Reader, validate bool) (*parseResult, error) {
+ lx := newLexer(r)
+ lx.filename = filename
+ protoParse(lx)
+ if lx.err != nil {
+ if _, ok := lx.err.(ErrorWithSourcePos); ok {
+ return nil, lx.err
+ } else {
+ return nil, ErrorWithSourcePos{Pos: lx.prev(), Underlying: lx.err}
+ }
+ }
+ // parser will not return an error if input is empty, so we
+ // need to also check if the result is non-nil
+ if lx.res == nil {
+ return nil, ErrorWithSourcePos{Pos: lx.prev(), Underlying: errors.New("input is empty")}
+ }
+
+ res, err := createParseResult(filename, lx.res)
+ if err != nil {
+ return nil, err
+ }
+ if validate {
+ if err := basicValidate(res); err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+}
+
+func createParseResult(filename string, file *fileNode) (*parseResult, error) {
+ res := &parseResult{
+ nodes: map[proto.Message]node{},
+ interpretedOptions: map[*optionNode][]int32{},
+ }
+ err := res.createFileDescriptor(filename, file)
+ return res, err
+}
+
+func (r *parseResult) createFileDescriptor(filename string, file *fileNode) error {
+ fd := &dpb.FileDescriptorProto{Name: proto.String(filename)}
+ r.putFileNode(fd, file)
+
+ isProto3 := false
+ if file.syntax != nil {
+ isProto3 = file.syntax.syntax.val == "proto3"
+ // proto2 is the default, so no need to set unless proto3
+ if isProto3 {
+ fd.Syntax = proto.String(file.syntax.syntax.val)
+ }
+ }
+
+ for _, decl := range file.decls {
+ if decl.enum != nil {
+ fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl.enum))
+ } else if decl.extend != nil {
+ r.addExtensions(decl.extend, &fd.Extension, &fd.MessageType, isProto3)
+ } else if decl.imp != nil {
+ file.imports = append(file.imports, decl.imp)
+ index := len(fd.Dependency)
+ fd.Dependency = append(fd.Dependency, decl.imp.name.val)
+ if decl.imp.public {
+ fd.PublicDependency = append(fd.PublicDependency, int32(index))
+ } else if decl.imp.weak {
+ fd.WeakDependency = append(fd.WeakDependency, int32(index))
+ }
+ } else if decl.message != nil {
+ fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl.message, isProto3))
+ } else if decl.option != nil {
+ if fd.Options == nil {
+ fd.Options = &dpb.FileOptions{}
+ }
+ fd.Options.UninterpretedOption = append(fd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.service != nil {
+ fd.Service = append(fd.Service, r.asServiceDescriptor(decl.service))
+ } else if decl.pkg != nil {
+ if fd.Package != nil {
+ return ErrorWithSourcePos{Pos: decl.pkg.start(), Underlying: errors.New("files should have only one package declaration")}
+ }
+ file.pkg = decl.pkg
+ fd.Package = proto.String(decl.pkg.name.val)
+ }
+ }
+ r.fd = fd
+ return nil
+}
+
+func (r *parseResult) asUninterpretedOptions(nodes []*optionNode) []*dpb.UninterpretedOption {
+ opts := make([]*dpb.UninterpretedOption, len(nodes))
+ for i, n := range nodes {
+ opts[i] = r.asUninterpretedOption(n)
+ }
+ return opts
+}
+
+func (r *parseResult) asUninterpretedOption(node *optionNode) *dpb.UninterpretedOption {
+ opt := &dpb.UninterpretedOption{Name: r.asUninterpretedOptionName(node.name.parts)}
+ r.putOptionNode(opt, node)
+
+ switch val := node.val.value().(type) {
+ case bool:
+ if val {
+ opt.IdentifierValue = proto.String("true")
+ } else {
+ opt.IdentifierValue = proto.String("false")
+ }
+ case int64:
+ opt.NegativeIntValue = proto.Int64(val)
+ case uint64:
+ opt.PositiveIntValue = proto.Uint64(val)
+ case float64:
+ opt.DoubleValue = proto.Float64(val)
+ case string:
+ opt.StringValue = []byte(val)
+ case identifier:
+ opt.IdentifierValue = proto.String(string(val))
+ case []*aggregateEntryNode:
+ var buf bytes.Buffer
+ aggToString(val, &buf)
+ aggStr := buf.String()
+ opt.AggregateValue = proto.String(aggStr)
+ }
+ return opt
+}
+
+func (r *parseResult) asUninterpretedOptionName(parts []*optionNamePartNode) []*dpb.UninterpretedOption_NamePart {
+ ret := make([]*dpb.UninterpretedOption_NamePart, len(parts))
+ for i, part := range parts {
+ txt := part.text.val
+ if !part.isExtension {
+ txt = part.text.val[part.offset : part.offset+part.length]
+ }
+ np := &dpb.UninterpretedOption_NamePart{
+ NamePart: proto.String(txt),
+ IsExtension: proto.Bool(part.isExtension),
+ }
+ r.putOptionNamePartNode(np, part)
+ ret[i] = np
+ }
+ return ret
+}
+
+func (r *parseResult) addExtensions(ext *extendNode, flds *[]*dpb.FieldDescriptorProto, msgs *[]*dpb.DescriptorProto, isProto3 bool) {
+ extendee := ext.extendee.val
+ for _, decl := range ext.decls {
+ if decl.field != nil {
+ decl.field.extendee = ext
+ fd := r.asFieldDescriptor(decl.field)
+ fd.Extendee = proto.String(extendee)
+ *flds = append(*flds, fd)
+ } else if decl.group != nil {
+ decl.group.extendee = ext
+ fd, md := r.asGroupDescriptors(decl.group, isProto3)
+ fd.Extendee = proto.String(extendee)
+ *flds = append(*flds, fd)
+ *msgs = append(*msgs, md)
+ }
+ }
+}
+
+func asLabel(lbl *labelNode) *dpb.FieldDescriptorProto_Label {
+ if lbl == nil {
+ return nil
+ }
+ switch {
+ case lbl.repeated:
+ return dpb.FieldDescriptorProto_LABEL_REPEATED.Enum()
+ case lbl.required:
+ return dpb.FieldDescriptorProto_LABEL_REQUIRED.Enum()
+ default:
+ return dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+}
+
+func (r *parseResult) asFieldDescriptor(node *fieldNode) *dpb.FieldDescriptorProto {
+ fd := newFieldDescriptor(node.name.val, node.fldType.val, int32(node.tag.val), asLabel(node.label))
+ r.putFieldNode(fd, node)
+ if len(node.options) > 0 {
+ fd.Options = &dpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(node.options)}
+ }
+ return fd
+}
+
+func newFieldDescriptor(name string, fieldType string, tag int32, lbl *dpb.FieldDescriptorProto_Label) *dpb.FieldDescriptorProto {
+ fd := &dpb.FieldDescriptorProto{
+ Name: proto.String(name),
+ JsonName: proto.String(internal.JsonName(name)),
+ Number: proto.Int32(tag),
+ Label: lbl,
+ }
+ switch fieldType {
+ case "double":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_DOUBLE.Enum()
+ case "float":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_FLOAT.Enum()
+ case "int32":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_INT32.Enum()
+ case "int64":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_INT64.Enum()
+ case "uint32":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_UINT32.Enum()
+ case "uint64":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_UINT64.Enum()
+ case "sint32":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_SINT32.Enum()
+ case "sint64":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_SINT64.Enum()
+ case "fixed32":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_FIXED32.Enum()
+ case "fixed64":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_FIXED64.Enum()
+ case "sfixed32":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_SFIXED32.Enum()
+ case "sfixed64":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_SFIXED64.Enum()
+ case "bool":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_BOOL.Enum()
+ case "string":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_STRING.Enum()
+ case "bytes":
+ fd.Type = dpb.FieldDescriptorProto_TYPE_BYTES.Enum()
+ default:
+ // NB: we don't have enough info to determine whether this is an enum or a message type,
+ // so we'll change it to enum later once we can ascertain if it's an enum reference
+ fd.Type = dpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
+ fd.TypeName = proto.String(fieldType)
+ }
+ return fd
+}
+
+func (r *parseResult) asGroupDescriptors(group *groupNode, isProto3 bool) (*dpb.FieldDescriptorProto, *dpb.DescriptorProto) {
+ fieldName := strings.ToLower(group.name.val)
+ fd := &dpb.FieldDescriptorProto{
+ Name: proto.String(fieldName),
+ JsonName: proto.String(internal.JsonName(fieldName)),
+ Number: proto.Int32(int32(group.tag.val)),
+ Label: asLabel(group.label),
+ Type: dpb.FieldDescriptorProto_TYPE_GROUP.Enum(),
+ TypeName: proto.String(group.name.val),
+ }
+ r.putFieldNode(fd, group)
+ md := &dpb.DescriptorProto{Name: proto.String(group.name.val)}
+ r.putMessageNode(md, group)
+ r.addMessageDecls(md, &group.reserved, group.decls, isProto3)
+ return fd, md
+}
+
+func (r *parseResult) asMapDescriptors(mapField *mapFieldNode, isProto3 bool) (*dpb.FieldDescriptorProto, *dpb.DescriptorProto) {
+ var lbl *dpb.FieldDescriptorProto_Label
+ if !isProto3 {
+ lbl = dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+ keyFd := newFieldDescriptor("key", mapField.keyType.val, 1, lbl)
+ r.putFieldNode(keyFd, mapField.keyField())
+ valFd := newFieldDescriptor("value", mapField.valueType.val, 2, lbl)
+ r.putFieldNode(valFd, mapField.valueField())
+ entryName := internal.InitCap(internal.JsonName(mapField.name.val)) + "Entry"
+ fd := newFieldDescriptor(mapField.name.val, entryName, int32(mapField.tag.val), dpb.FieldDescriptorProto_LABEL_REPEATED.Enum())
+ if len(mapField.options) > 0 {
+ fd.Options = &dpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(mapField.options)}
+ }
+ r.putFieldNode(fd, mapField)
+ md := &dpb.DescriptorProto{
+ Name: proto.String(entryName),
+ Options: &dpb.MessageOptions{MapEntry: proto.Bool(true)},
+ Field: []*dpb.FieldDescriptorProto{keyFd, valFd},
+ }
+ r.putMessageNode(md, mapField)
+ return fd, md
+}
+
+func (r *parseResult) asExtensionRanges(node *extensionRangeNode) []*dpb.DescriptorProto_ExtensionRange {
+ opts := r.asUninterpretedOptions(node.options)
+ ers := make([]*dpb.DescriptorProto_ExtensionRange, len(node.ranges))
+ for i, rng := range node.ranges {
+ er := &dpb.DescriptorProto_ExtensionRange{
+ Start: proto.Int32(rng.st),
+ End: proto.Int32(rng.en + 1),
+ }
+ if len(opts) > 0 {
+ er.Options = &dpb.ExtensionRangeOptions{UninterpretedOption: opts}
+ }
+ r.putExtensionRangeNode(er, rng)
+ ers[i] = er
+ }
+ return ers
+}
+
+func (r *parseResult) asEnumValue(ev *enumValueNode) *dpb.EnumValueDescriptorProto {
+ var num int32
+ if ev.numberP != nil {
+ num = int32(ev.numberP.val)
+ } else {
+ num = int32(ev.numberN.val)
+ }
+ evd := &dpb.EnumValueDescriptorProto{Name: proto.String(ev.name.val), Number: proto.Int32(num)}
+ r.putEnumValueNode(evd, ev)
+ if len(ev.options) > 0 {
+ evd.Options = &dpb.EnumValueOptions{UninterpretedOption: r.asUninterpretedOptions(ev.options)}
+ }
+ return evd
+}
+
+func (r *parseResult) asMethodDescriptor(node *methodNode) *dpb.MethodDescriptorProto {
+ md := &dpb.MethodDescriptorProto{
+ Name: proto.String(node.name.val),
+ InputType: proto.String(node.input.msgType.val),
+ OutputType: proto.String(node.output.msgType.val),
+ }
+ r.putMethodNode(md, node)
+ if node.input.streamKeyword != nil {
+ md.ClientStreaming = proto.Bool(true)
+ }
+ if node.output.streamKeyword != nil {
+ md.ServerStreaming = proto.Bool(true)
+ }
+ // protoc always adds a MethodOptions if there are brackets
+ // We have a non-nil node.options if there are brackets
+ // We do the same to match protoc as closely as possible
+ // https://github.com/protocolbuffers/protobuf/blob/0c3f43a6190b77f1f68b7425d1b7e1a8257a8d0c/src/google/protobuf/compiler/parser.cc#L2152
+ if node.options != nil {
+ md.Options = &dpb.MethodOptions{UninterpretedOption: r.asUninterpretedOptions(node.options)}
+ }
+ return md
+}
+
+func (r *parseResult) asEnumDescriptor(en *enumNode) *dpb.EnumDescriptorProto {
+ ed := &dpb.EnumDescriptorProto{Name: proto.String(en.name.val)}
+ r.putEnumNode(ed, en)
+ for _, decl := range en.decls {
+ if decl.option != nil {
+ if ed.Options == nil {
+ ed.Options = &dpb.EnumOptions{}
+ }
+ ed.Options.UninterpretedOption = append(ed.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.value != nil {
+ ed.Value = append(ed.Value, r.asEnumValue(decl.value))
+ } else if decl.reserved != nil {
+ for _, n := range decl.reserved.names {
+ en.reserved = append(en.reserved, n)
+ ed.ReservedName = append(ed.ReservedName, n.val)
+ }
+ for _, rng := range decl.reserved.ranges {
+ ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng))
+ }
+ }
+ }
+ return ed
+}
+
+func (r *parseResult) asEnumReservedRange(rng *rangeNode) *dpb.EnumDescriptorProto_EnumReservedRange {
+ rr := &dpb.EnumDescriptorProto_EnumReservedRange{
+ Start: proto.Int32(rng.st),
+ End: proto.Int32(rng.en),
+ }
+ r.putEnumReservedRangeNode(rr, rng)
+ return rr
+}
+
+func (r *parseResult) asMessageDescriptor(node *messageNode, isProto3 bool) *dpb.DescriptorProto {
+ msgd := &dpb.DescriptorProto{Name: proto.String(node.name.val)}
+ r.putMessageNode(msgd, node)
+ r.addMessageDecls(msgd, &node.reserved, node.decls, isProto3)
+ return msgd
+}
+
+func (r *parseResult) addMessageDecls(msgd *dpb.DescriptorProto, reservedNames *[]*stringLiteralNode, decls []*messageElement, isProto3 bool) {
+ for _, decl := range decls {
+ if decl.enum != nil {
+ msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl.enum))
+ } else if decl.extend != nil {
+ r.addExtensions(decl.extend, &msgd.Extension, &msgd.NestedType, isProto3)
+ } else if decl.extensionRange != nil {
+ msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl.extensionRange)...)
+ } else if decl.field != nil {
+ msgd.Field = append(msgd.Field, r.asFieldDescriptor(decl.field))
+ } else if decl.mapField != nil {
+ fd, md := r.asMapDescriptors(decl.mapField, isProto3)
+ msgd.Field = append(msgd.Field, fd)
+ msgd.NestedType = append(msgd.NestedType, md)
+ } else if decl.group != nil {
+ fd, md := r.asGroupDescriptors(decl.group, isProto3)
+ msgd.Field = append(msgd.Field, fd)
+ msgd.NestedType = append(msgd.NestedType, md)
+ } else if decl.oneOf != nil {
+ oodIndex := len(msgd.OneofDecl)
+ ood := &dpb.OneofDescriptorProto{Name: proto.String(decl.oneOf.name.val)}
+ r.putOneOfNode(ood, decl.oneOf)
+ msgd.OneofDecl = append(msgd.OneofDecl, ood)
+ for _, oodecl := range decl.oneOf.decls {
+ if oodecl.option != nil {
+ if ood.Options == nil {
+ ood.Options = &dpb.OneofOptions{}
+ }
+ ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl.option))
+ } else if oodecl.field != nil {
+ fd := r.asFieldDescriptor(oodecl.field)
+ fd.OneofIndex = proto.Int32(int32(oodIndex))
+ msgd.Field = append(msgd.Field, fd)
+ }
+ }
+ } else if decl.option != nil {
+ if msgd.Options == nil {
+ msgd.Options = &dpb.MessageOptions{}
+ }
+ msgd.Options.UninterpretedOption = append(msgd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.nested != nil {
+ msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl.nested, isProto3))
+ } else if decl.reserved != nil {
+ for _, n := range decl.reserved.names {
+ *reservedNames = append(*reservedNames, n)
+ msgd.ReservedName = append(msgd.ReservedName, n.val)
+ }
+ for _, rng := range decl.reserved.ranges {
+ msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng))
+ }
+ }
+ }
+}
+
+func (r *parseResult) asMessageReservedRange(rng *rangeNode) *dpb.DescriptorProto_ReservedRange {
+ rr := &dpb.DescriptorProto_ReservedRange{
+ Start: proto.Int32(rng.st),
+ End: proto.Int32(rng.en + 1),
+ }
+ r.putMessageReservedRangeNode(rr, rng)
+ return rr
+}
+
+func (r *parseResult) asServiceDescriptor(svc *serviceNode) *dpb.ServiceDescriptorProto {
+ sd := &dpb.ServiceDescriptorProto{Name: proto.String(svc.name.val)}
+ r.putServiceNode(sd, svc)
+ for _, decl := range svc.decls {
+ if decl.option != nil {
+ if sd.Options == nil {
+ sd.Options = &dpb.ServiceOptions{}
+ }
+ sd.Options.UninterpretedOption = append(sd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.rpc != nil {
+ sd.Method = append(sd.Method, r.asMethodDescriptor(decl.rpc))
+ }
+ }
+ return sd
+}
+
+func toNameParts(ident *identNode, offset int) []*optionNamePartNode {
+ parts := strings.Split(ident.val[offset:], ".")
+ ret := make([]*optionNamePartNode, len(parts))
+ for i, p := range parts {
+ ret[i] = &optionNamePartNode{text: ident, offset: offset, length: len(p)}
+ ret[i].setRange(ident, ident)
+ offset += len(p) + 1
+ }
+ return ret
+}
+
+func checkUint64InInt32Range(lex protoLexer, pos *SourcePos, v uint64) {
+ if v > math.MaxInt32 {
+ lexError(lex, pos, fmt.Sprintf("constant %d is out of range for int32 (%d to %d)", v, math.MinInt32, math.MaxInt32))
+ }
+}
+
+func checkInt64InInt32Range(lex protoLexer, pos *SourcePos, v int64) {
+ if v > math.MaxInt32 || v < math.MinInt32 {
+ lexError(lex, pos, fmt.Sprintf("constant %d is out of range for int32 (%d to %d)", v, math.MinInt32, math.MaxInt32))
+ }
+}
+
+func checkTag(lex protoLexer, pos *SourcePos, v uint64) {
+ if v > internal.MaxTag {
+ lexError(lex, pos, fmt.Sprintf("tag number %d is higher than max allowed tag number (%d)", v, internal.MaxTag))
+ } else if v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd {
+ lexError(lex, pos, fmt.Sprintf("tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd))
+ }
+}
+
+func aggToString(agg []*aggregateEntryNode, buf *bytes.Buffer) {
+ buf.WriteString("{")
+ for _, a := range agg {
+ buf.WriteString(" ")
+ buf.WriteString(a.name.value())
+ if v, ok := a.val.(*aggregateLiteralNode); ok {
+ aggToString(v.elements, buf)
+ } else {
+ buf.WriteString(": ")
+ elementToString(a.val.value(), buf)
+ }
+ }
+ buf.WriteString(" }")
+}
+
+func elementToString(v interface{}, buf *bytes.Buffer) {
+ switch v := v.(type) {
+ case bool, int64, uint64, identifier:
+ fmt.Fprintf(buf, "%v", v)
+ case float64:
+ if math.IsInf(v, 1) {
+ buf.WriteString(": inf")
+ } else if math.IsInf(v, -1) {
+ buf.WriteString(": -inf")
+ } else if math.IsNaN(v) {
+ buf.WriteString(": nan")
+ } else {
+ fmt.Fprintf(buf, ": %v", v)
+ }
+ case string:
+ buf.WriteRune('"')
+ writeEscapedBytes(buf, []byte(v))
+ buf.WriteRune('"')
+ case []valueNode:
+ buf.WriteString(": [")
+ first := true
+ for _, e := range v {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(", ")
+ }
+ elementToString(e.value(), buf)
+ }
+ buf.WriteString("]")
+ case []*aggregateEntryNode:
+ aggToString(v, buf)
+ }
+}
+
+func writeEscapedBytes(buf *bytes.Buffer, b []byte) {
+ for _, c := range b {
+ switch c {
+ case '\n':
+ buf.WriteString("\\n")
+ case '\r':
+ buf.WriteString("\\r")
+ case '\t':
+ buf.WriteString("\\t")
+ case '"':
+ buf.WriteString("\\\"")
+ case '\'':
+ buf.WriteString("\\'")
+ case '\\':
+ buf.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c <= 0x7f && c != '"' && c != '\\' {
+ // simple printable characters
+ buf.WriteByte(c)
+ } else {
+ // use octal escape for all other values
+ buf.WriteRune('\\')
+ buf.WriteByte('0' + ((c >> 6) & 0x7))
+ buf.WriteByte('0' + ((c >> 3) & 0x7))
+ buf.WriteByte('0' + (c & 0x7))
+ }
+ }
+ }
+}
+
+func basicValidate(res *parseResult) error {
+ fd := res.fd
+ isProto3 := fd.GetSyntax() == "proto3"
+
+ for _, md := range fd.MessageType {
+ if err := validateMessage(res, isProto3, "", md); err != nil {
+ return err
+ }
+ }
+
+ for _, ed := range fd.EnumType {
+ if err := validateEnum(res, isProto3, "", ed); err != nil {
+ return err
+ }
+ }
+
+ for _, fld := range fd.Extension {
+ if err := validateField(res, isProto3, "", fld); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func validateMessage(res *parseResult, isProto3 bool, prefix string, md *dpb.DescriptorProto) error {
+ nextPrefix := md.GetName() + "."
+
+ for _, fld := range md.Field {
+ if err := validateField(res, isProto3, nextPrefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Extension {
+ if err := validateField(res, isProto3, nextPrefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, ed := range md.EnumType {
+ if err := validateEnum(res, isProto3, nextPrefix, ed); err != nil {
+ return err
+ }
+ }
+ for _, nmd := range md.NestedType {
+ if err := validateMessage(res, isProto3, nextPrefix, nmd); err != nil {
+ return err
+ }
+ }
+
+ scope := fmt.Sprintf("message %s%s", prefix, md.GetName())
+
+ if isProto3 && len(md.ExtensionRange) > 0 {
+ n := res.getExtensionRangeNode(md.ExtensionRange[0])
+ return ErrorWithSourcePos{Pos: n.start(), Underlying: fmt.Errorf("%s: extension ranges are not allowed in proto3", scope)}
+ }
+
+ if index, err := findOption(res, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil {
+ return err
+ } else if index >= 0 {
+ opt := md.Options.UninterpretedOption[index]
+ optn := res.getOptionNode(opt)
+ md.Options.UninterpretedOption = removeOption(md.Options.UninterpretedOption, index)
+ valid := false
+ if opt.IdentifierValue != nil {
+ if opt.GetIdentifierValue() == "true" {
+ return ErrorWithSourcePos{Pos: optn.getValue().start(), Underlying: fmt.Errorf("%s: map_entry option should not be set explicitly; use map type instead", scope)}
+ } else if opt.GetIdentifierValue() == "false" {
+ md.Options.MapEntry = proto.Bool(false)
+ valid = true
+ }
+ }
+ if !valid {
+ return ErrorWithSourcePos{Pos: optn.getValue().start(), Underlying: fmt.Errorf("%s: expecting bool value for map_entry option", scope)}
+ }
+ }
+
+ // reserved ranges should not overlap
+ rsvd := make(tagRanges, len(md.ReservedRange))
+ for i, r := range md.ReservedRange {
+ n := res.getMessageReservedRangeNode(r)
+ rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+
+ }
+ sort.Sort(rsvd)
+ for i := 1; i < len(rsvd); i++ {
+ if rsvd[i].start < rsvd[i-1].end {
+ return ErrorWithSourcePos{Pos: rsvd[i].node.start(), Underlying: fmt.Errorf("%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1)}
+ }
+ }
+
+ // extensions ranges should not overlap
+ exts := make(tagRanges, len(md.ExtensionRange))
+ for i, r := range md.ExtensionRange {
+ n := res.getExtensionRangeNode(r)
+ exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+ }
+ sort.Sort(exts)
+ for i := 1; i < len(exts); i++ {
+ if exts[i].start < exts[i-1].end {
+ return ErrorWithSourcePos{Pos: exts[i].node.start(), Underlying: fmt.Errorf("%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1)}
+ }
+ }
+
+ // see if any extension range overlaps any reserved range
+ var i, j int // i indexes rsvd; j indexes exts
+ for i < len(rsvd) && j < len(exts) {
+ if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end ||
+ exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end {
+
+ var pos *SourcePos
+ if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end {
+ pos = rsvd[i].node.start()
+ } else {
+ pos = exts[j].node.start()
+ }
+ // ranges overlap
+ return ErrorWithSourcePos{Pos: pos, Underlying: fmt.Errorf("%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1)}
+ }
+ if rsvd[i].start < exts[j].start {
+ i++
+ } else {
+ j++
+ }
+ }
+
+ // now, check that fields don't re-use tags and don't try to use extension
+ // or reserved ranges or reserved names
+ rsvdNames := map[string]struct{}{}
+ for _, n := range md.ReservedName {
+ rsvdNames[n] = struct{}{}
+ }
+ fieldTags := map[int32]string{}
+ for _, fld := range md.Field {
+ fn := res.getFieldNode(fld)
+ if _, ok := rsvdNames[fld.GetName()]; ok {
+ return ErrorWithSourcePos{Pos: fn.fieldName().start(), Underlying: fmt.Errorf("%s: field %s is using a reserved name", scope, fld.GetName())}
+ }
+ if existing := fieldTags[fld.GetNumber()]; existing != "" {
+ return ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber())}
+ }
+ fieldTags[fld.GetNumber()] = fld.GetName()
+ // check reserved ranges
+ r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() })
+ if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() {
+ return ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1)}
+ }
+ // and check extension ranges
+ e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() })
+ if e < len(exts) && exts[e].start <= fld.GetNumber() {
+ return ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1)}
+ }
+ }
+
+ return nil
+}
+
+func validateEnum(res *parseResult, isProto3 bool, prefix string, ed *dpb.EnumDescriptorProto) error {
+ scope := fmt.Sprintf("enum %s%s", prefix, ed.GetName())
+
+ if index, err := findOption(res, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil {
+ return err
+ } else if index >= 0 {
+ opt := ed.Options.UninterpretedOption[index]
+ ed.Options.UninterpretedOption = removeOption(ed.Options.UninterpretedOption, index)
+ valid := false
+ if opt.IdentifierValue != nil {
+ if opt.GetIdentifierValue() == "true" {
+ ed.Options.AllowAlias = proto.Bool(true)
+ valid = true
+ } else if opt.GetIdentifierValue() == "false" {
+ ed.Options.AllowAlias = proto.Bool(false)
+ valid = true
+ }
+ }
+ if !valid {
+ optNode := res.getOptionNode(opt)
+ return ErrorWithSourcePos{Pos: optNode.getValue().start(), Underlying: fmt.Errorf("%s: expecting bool value for allow_alias option", scope)}
+ }
+ }
+
+ if isProto3 && ed.Value[0].GetNumber() != 0 {
+ evNode := res.getEnumValueNode(ed.Value[0])
+ return ErrorWithSourcePos{Pos: evNode.getNumber().start(), Underlying: fmt.Errorf("%s: proto3 requires that first value in enum have numeric value of 0", scope)}
+ }
+
+ if !ed.Options.GetAllowAlias() {
+ // make sure all value numbers are distinct
+ vals := map[int32]string{}
+ for _, evd := range ed.Value {
+ if existing := vals[evd.GetNumber()]; existing != "" {
+ evNode := res.getEnumValueNode(evd)
+ return ErrorWithSourcePos{Pos: evNode.getNumber().start(), Underlying: fmt.Errorf("%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber())}
+ }
+ vals[evd.GetNumber()] = evd.GetName()
+ }
+ }
+
+ // reserved ranges should not overlap
+ rsvd := make(tagRanges, len(ed.ReservedRange))
+ for i, r := range ed.ReservedRange {
+ n := res.getEnumReservedRangeNode(r)
+ rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+ }
+ sort.Sort(rsvd)
+ for i := 1; i < len(rsvd); i++ {
+ if rsvd[i].start <= rsvd[i-1].end {
+ return ErrorWithSourcePos{Pos: rsvd[i].node.start(), Underlying: fmt.Errorf("%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end)}
+ }
+ }
+
+ // now, check that fields don't re-use tags and don't try to use extension
+ // or reserved ranges or reserved names
+ rsvdNames := map[string]struct{}{}
+ for _, n := range ed.ReservedName {
+ rsvdNames[n] = struct{}{}
+ }
+ for _, ev := range ed.Value {
+ evn := res.getEnumValueNode(ev)
+ if _, ok := rsvdNames[ev.GetName()]; ok {
+ return ErrorWithSourcePos{Pos: evn.getName().start(), Underlying: fmt.Errorf("%s: value %s is using a reserved name", scope, ev.GetName())}
+ }
+ // check reserved ranges
+ r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() })
+ if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() {
+ return ErrorWithSourcePos{Pos: evn.getNumber().start(), Underlying: fmt.Errorf("%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end)}
+ }
+ }
+
+ return nil
+}
+
+func validateField(res *parseResult, isProto3 bool, prefix string, fld *dpb.FieldDescriptorProto) error {
+ scope := fmt.Sprintf("field %s%s", prefix, fld.GetName())
+
+ node := res.getFieldNode(fld)
+ if isProto3 {
+ if fld.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+ n := node.(*groupNode)
+ return ErrorWithSourcePos{Pos: n.groupKeyword.start(), Underlying: fmt.Errorf("%s: groups are not allowed in proto3", scope)}
+ }
+ if fld.Label != nil && fld.GetLabel() != dpb.FieldDescriptorProto_LABEL_REPEATED {
+ return ErrorWithSourcePos{Pos: node.fieldLabel().start(), Underlying: fmt.Errorf("%s: field has label %v, but proto3 should omit labels other than 'repeated'", scope, fld.GetLabel())}
+ }
+ if index, err := findOption(res, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil {
+ return err
+ } else if index >= 0 {
+ optNode := res.getOptionNode(fld.Options.GetUninterpretedOption()[index])
+ return ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default values are not allowed in proto3", scope)}
+ }
+ } else {
+ if fld.Label == nil && fld.OneofIndex == nil {
+ return ErrorWithSourcePos{Pos: node.fieldName().start(), Underlying: fmt.Errorf("%s: field has no label, but proto2 must indicate 'optional' or 'required'", scope)}
+ }
+ if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED {
+ return ErrorWithSourcePos{Pos: node.fieldLabel().start(), Underlying: fmt.Errorf("%s: extension fields cannot be 'required'", scope)}
+ }
+ }
+
+ // finally, set any missing label to optional
+ if fld.Label == nil {
+ fld.Label = dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+ return nil
+}
+
+func findOption(res *parseResult, scope string, opts []*dpb.UninterpretedOption, name string) (int, error) {
+ found := -1
+ for i, opt := range opts {
+ if len(opt.Name) != 1 {
+ continue
+ }
+ if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name {
+ continue
+ }
+ if found >= 0 {
+ optNode := res.getOptionNode(opt)
+ return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: option %s cannot be defined more than once", scope, name)}
+ }
+ found = i
+ }
+ return found, nil
+}
+
+func removeOption(uo []*dpb.UninterpretedOption, indexToRemove int) []*dpb.UninterpretedOption {
+ if indexToRemove == 0 {
+ return uo[1:]
+ } else if int(indexToRemove) == len(uo)-1 {
+ return uo[:len(uo)-1]
+ } else {
+ return append(uo[:indexToRemove], uo[indexToRemove+1:]...)
+ }
+}
+
+type tagRange struct {
+ start int32
+ end int32
+ node rangeDecl
+}
+
+type tagRanges []tagRange
+
+func (r tagRanges) Len() int {
+ return len(r)
+}
+
+func (r tagRanges) Less(i, j int) bool {
+ return r[i].start < r[j].start ||
+ (r[i].start == r[j].start && r[i].end < r[j].end)
+}
+
+func (r tagRanges) Swap(i, j int) {
+ r[i], r[j] = r[j], r[i]
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y
new file mode 100644
index 0000000..faf49d9
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y
@@ -0,0 +1,937 @@
+%{
+package protoparse
+
+//lint:file-ignore SA4006 generated parser has unused values
+
+import (
+ "fmt"
+ "math"
+ "unicode"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+%}
+
+// fields inside this union end up as the fields in a structure known
+// as ${PREFIX}SymType, of which a reference is passed to the lexer.
+%union{
+ file *fileNode
+ fileDecls []*fileElement
+ syn *syntaxNode
+ pkg *packageNode
+ imprt *importNode
+ msg *messageNode
+ msgDecls []*messageElement
+ fld *fieldNode
+ mapFld *mapFieldNode
+ grp *groupNode
+ oo *oneOfNode
+ ooDecls []*oneOfElement
+ ext *extensionRangeNode
+ resvd *reservedNode
+ en *enumNode
+ enDecls []*enumElement
+ env *enumValueNode
+ extend *extendNode
+ extDecls []*extendElement
+ svc *serviceNode
+ svcDecls []*serviceElement
+ mtd *methodNode
+ rpcType *rpcTypeNode
+ opts []*optionNode
+ optNm []*optionNamePartNode
+ rngs []*rangeNode
+ names []*stringLiteralNode
+ sl []valueNode
+ agg []*aggregateEntryNode
+ aggName *aggregateNameNode
+ v valueNode
+ str *stringLiteralNode
+ i *negativeIntLiteralNode
+ ui *intLiteralNode
+ f *floatLiteralNode
+ id *identNode
+ b *basicNode
+ err error
+}
+
+// any non-terminal which returns a value needs a type, which is
+// really a field name in the above union struct
+%type <file> file
+%type <syn> syntax
+%type <fileDecls> fileDecl fileDecls
+%type <imprt> import
+%type <pkg> package
+%type <opts> option fieldOption fieldOptions rpcOption rpcOptions
+%type <optNm> optionName optionNameRest optionNameComponent
+%type <v> constant scalarConstant aggregate
+%type <id> name ident typeIdent keyType
+%type <aggName> aggName
+%type <i> negIntLit
+%type <ui> intLit
+%type <f> floatLit
+%type <sl> constantList
+%type <agg> aggFields aggField aggFieldEntry
+%type <fld> field oneofField
+%type <oo> oneof
+%type <grp> group
+%type <mapFld> mapField
+%type <msg> message
+%type <msgDecls> messageItem messageBody
+%type <ooDecls> oneofItem oneofBody
+%type <names> fieldNames
+%type <resvd> msgReserved enumReserved reservedNames
+%type <rngs> tagRange tagRanges enumRange enumRanges
+%type <ext> extensions
+%type <en> enum
+%type <enDecls> enumItem enumBody
+%type <env> enumField
+%type <extend> extend
+%type <extDecls> extendItem extendBody
+%type <str> stringLit
+%type <svc> service
+%type <svcDecls> serviceItem serviceBody
+%type <mtd> rpc
+%type <rpcType> rpcType
+
+// same for terminals
+%token <str> _STRING_LIT
+%token <ui> _INT_LIT
+%token <f> _FLOAT_LIT
+%token <id> _NAME _FQNAME _TYPENAME
+%token <id> _SYNTAX _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED
+%token <id> _DOUBLE _FLOAT _INT32 _INT64 _UINT32 _UINT64 _SINT32 _SINT64 _FIXED32 _FIXED64 _SFIXED32 _SFIXED64
+%token <id> _BOOL _STRING _BYTES _GROUP _ONEOF _MAP _EXTENSIONS _TO _MAX _RESERVED _ENUM _MESSAGE _EXTEND
+%token <id> _SERVICE _RPC _STREAM _RETURNS
+%token <err> _ERROR
+// we define all of these, even ones that aren't used, to improve error messages
+// so it shows the unexpected symbol instead of showing "$unk"
+%token <b> '=' ';' ':' '{' '}' '\\' '/' '?' '.' ',' '>' '<' '+' '-' '(' ')' '[' ']' '*' '&' '^' '%' '$' '#' '@' '!' '~' '`'
+
+%%
+
+file : syntax {
+ $$ = &fileNode{syntax: $1}
+ $$.setRange($1, $1)
+ protolex.(*protoLex).res = $$
+ }
+ | fileDecls {
+ $$ = &fileNode{decls: $1}
+ if len($1) > 0 {
+ $$.setRange($1[0], $1[len($1)-1])
+ }
+ protolex.(*protoLex).res = $$
+ }
+ | syntax fileDecls {
+ $$ = &fileNode{syntax: $1, decls: $2}
+ var end node
+ if len($2) > 0 {
+ end = $2[len($2)-1]
+ } else {
+ end = $1
+ }
+ $$.setRange($1, end)
+ protolex.(*protoLex).res = $$
+ }
+ | {
+ }
+
+fileDecls : fileDecls fileDecl {
+ $$ = append($1, $2...)
+ }
+ | fileDecl
+
+fileDecl : import {
+ $$ = []*fileElement{{imp: $1}}
+ }
+ | package {
+ $$ = []*fileElement{{pkg: $1}}
+ }
+ | option {
+ $$ = []*fileElement{{option: $1[0]}}
+ }
+ | message {
+ $$ = []*fileElement{{message: $1}}
+ }
+ | enum {
+ $$ = []*fileElement{{enum: $1}}
+ }
+ | extend {
+ $$ = []*fileElement{{extend: $1}}
+ }
+ | service {
+ $$ = []*fileElement{{service: $1}}
+ }
+ | ';' {
+ $$ = []*fileElement{{empty: $1}}
+ }
+
+syntax : _SYNTAX '=' stringLit ';' {
+ if $3.val != "proto2" && $3.val != "proto3" {
+ lexError(protolex, $3.start(), "syntax value must be 'proto2' or 'proto3'")
+ }
+ $$ = &syntaxNode{syntax: $3}
+ $$.setRange($1, $4)
+ }
+
+import : _IMPORT stringLit ';' {
+ $$ = &importNode{ name: $2 }
+ $$.setRange($1, $3)
+ }
+ | _IMPORT _WEAK stringLit ';' {
+ $$ = &importNode{ name: $3, weak: true }
+ $$.setRange($1, $4)
+ }
+ | _IMPORT _PUBLIC stringLit ';' {
+ $$ = &importNode{ name: $3, public: true }
+ $$.setRange($1, $4)
+ }
+
+package : _PACKAGE ident ';' {
+ $$ = &packageNode{name: $2}
+ $$.setRange($1, $3)
+ }
+
+ident : name
+ | _FQNAME
+
+option : _OPTION optionName '=' constant ';' {
+ n := &optionNameNode{parts: $2}
+ n.setRange($2[0], $2[len($2)-1])
+ o := &optionNode{name: n, val: $4}
+ o.setRange($1, $5)
+ $$ = []*optionNode{o}
+ }
+
+optionName : ident {
+ $$ = toNameParts($1, 0)
+ }
+ | '(' typeIdent ')' {
+ p := &optionNamePartNode{text: $2, isExtension: true}
+ p.setRange($1, $3)
+ $$ = []*optionNamePartNode{p}
+ }
+ | '(' typeIdent ')' optionNameRest {
+ p := &optionNamePartNode{text: $2, isExtension: true}
+ p.setRange($1, $3)
+ ps := make([]*optionNamePartNode, 1, len($4)+1)
+ ps[0] = p
+ $$ = append(ps, $4...)
+ }
+
+optionNameRest : optionNameComponent
+ | optionNameComponent optionNameRest {
+ $$ = append($1, $2...)
+ }
+
+optionNameComponent : _TYPENAME {
+ $$ = toNameParts($1, 1 /* exclude leading dot */)
+ }
+ | '.' '(' typeIdent ')' {
+ p := &optionNamePartNode{text: $3, isExtension: true}
+ p.setRange($2, $4)
+ $$ = []*optionNamePartNode{p}
+ }
+
+constant : scalarConstant
+ | aggregate
+
+scalarConstant : stringLit {
+ $$ = $1
+ }
+ | intLit {
+ $$ = $1
+ }
+ | negIntLit {
+ $$ = $1
+ }
+ | floatLit {
+ $$ = $1
+ }
+ | name {
+ if $1.val == "true" {
+ $$ = &boolLiteralNode{basicNode: $1.basicNode, val: true}
+ } else if $1.val == "false" {
+ $$ = &boolLiteralNode{basicNode: $1.basicNode, val: false}
+ } else if $1.val == "inf" {
+ f := &floatLiteralNode{val: math.Inf(1)}
+ f.setRange($1, $1)
+ $$ = f
+ } else if $1.val == "nan" {
+ f := &floatLiteralNode{val: math.NaN()}
+ f.setRange($1, $1)
+ $$ = f
+ } else {
+ $$ = $1
+ }
+ }
+
+intLit : _INT_LIT
+ | '+' _INT_LIT {
+ $$ = $2
+ }
+
+negIntLit : '-' _INT_LIT {
+ if $2.val > math.MaxInt64 + 1 {
+ lexError(protolex, $2.start(), fmt.Sprintf("numeric constant %d would underflow (allowed range is %d to %d)", $2.val, int64(math.MinInt64), int64(math.MaxInt64)))
+ }
+ $$ = &negativeIntLiteralNode{val: -int64($2.val)}
+ $$.setRange($1, $2)
+ }
+
+floatLit : _FLOAT_LIT
+ | '-' _FLOAT_LIT {
+ $$ = &floatLiteralNode{val: -$2.val}
+ $$.setRange($1, $2)
+ }
+ | '+' _FLOAT_LIT {
+ $$ = &floatLiteralNode{val: $2.val}
+ $$.setRange($1, $2)
+ }
+ | '+' _INF {
+ $$ = &floatLiteralNode{val: math.Inf(1)}
+ $$.setRange($1, $2)
+ }
+ | '-' _INF {
+ $$ = &floatLiteralNode{val: math.Inf(-1)}
+ $$.setRange($1, $2)
+ }
+
+stringLit : _STRING_LIT
+ | stringLit _STRING_LIT {
+ $$ = &stringLiteralNode{val: $1.val + $2.val}
+ $$.setRange($1, $2)
+ }
+
+aggregate : '{' aggFields '}' {
+ a := &aggregateLiteralNode{elements: $2}
+ a.setRange($1, $3)
+ $$ = a
+ }
+
+aggFields : aggField
+ | aggFields aggField {
+ $$ = append($1, $2...)
+ }
+ | {
+ $$ = nil
+ }
+
+aggField : aggFieldEntry
+ | aggFieldEntry ',' {
+ $$ = $1
+ }
+ | aggFieldEntry ';' {
+ $$ = $1
+ }
+
+aggFieldEntry : aggName ':' scalarConstant {
+ a := &aggregateEntryNode{name: $1, val: $3}
+ a.setRange($1, $3)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '[' ']' {
+ s := &sliceLiteralNode{}
+ s.setRange($3, $4)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $4)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '[' constantList ']' {
+ s := &sliceLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $5)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' aggregate {
+ a := &aggregateEntryNode{name: $1, val: $3}
+ a.setRange($1, $3)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName aggregate {
+ a := &aggregateEntryNode{name: $1, val: $2}
+ a.setRange($1, $2)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $5)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $3}
+ s.setRange($2, $4)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $4)
+ $$ = []*aggregateEntryNode{a}
+ }
+
+aggName : name {
+ $$ = &aggregateNameNode{name: $1}
+ $$.setRange($1, $1)
+ }
+ | '[' ident ']' {
+ $$ = &aggregateNameNode{name: $2, isExtension: true}
+ $$.setRange($1, $3)
+ }
+
+constantList : constant {
+ $$ = []valueNode{$1}
+ }
+ | constantList ',' constant {
+ $$ = append($1, $3)
+ }
+ | constantList ';' constant {
+ $$ = append($1, $3)
+ }
+ | '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $2}
+ s.setRange($1, $3)
+ $$ = []valueNode{s}
+ }
+ | constantList ',' '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ $$ = append($1, s)
+ }
+ | constantList ';' '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ $$ = append($1, s)
+ }
+
+typeIdent : ident
+ | _TYPENAME
+
+field : _REQUIRED typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := &labelNode{basicNode: $1.basicNode, required: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+ $$.setRange($1, $6)
+ }
+ | _OPTIONAL typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := &labelNode{basicNode: $1.basicNode}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+ $$.setRange($1, $6)
+ }
+ | _REPEATED typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := &labelNode{basicNode: $1.basicNode, repeated: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+ $$.setRange($1, $6)
+ }
+ | typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4}
+ $$.setRange($1, $5)
+ }
+ | _REQUIRED typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := &labelNode{basicNode: $1.basicNode, required: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $7}
+ $$.setRange($1, $9)
+ }
+ | _OPTIONAL typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := &labelNode{basicNode: $1.basicNode}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $7}
+ $$.setRange($1, $9)
+ }
+ | _REPEATED typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := &labelNode{basicNode: $1.basicNode, repeated: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $7}
+ $$.setRange($1, $9)
+ }
+ | typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4, options: $6}
+ $$.setRange($1, $8)
+ }
+
+fieldOptions : fieldOptions ',' fieldOption {
+ $$ = append($1, $3...)
+ }
+ | fieldOption
+
+fieldOption: optionName '=' constant {
+ n := &optionNameNode{parts: $1}
+ n.setRange($1[0], $1[len($1)-1])
+ o := &optionNode{name: n, val: $3}
+ o.setRange($1[0], $3)
+ $$ = []*optionNode{o}
+ }
+
+group : _REQUIRED _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $5.start(), $5.val)
+ if !unicode.IsUpper(rune($3.val[0])) {
+ lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+ }
+ lbl := &labelNode{basicNode: $1.basicNode, required: true}
+ $$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+ $$.setRange($1, $8)
+ }
+ | _OPTIONAL _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $5.start(), $5.val)
+ if !unicode.IsUpper(rune($3.val[0])) {
+ lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+ }
+ lbl := &labelNode{basicNode: $1.basicNode}
+ $$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+ $$.setRange($1, $8)
+ }
+ | _REPEATED _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $5.start(), $5.val)
+ if !unicode.IsUpper(rune($3.val[0])) {
+ lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+ }
+ lbl := &labelNode{basicNode: $1.basicNode, repeated: true}
+ $$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+ $$.setRange($1, $8)
+ }
+
+oneof : _ONEOF name '{' oneofBody '}' {
+ c := 0
+ for _, el := range $4 {
+ if el.field != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, $1.start(), "oneof must contain at least one field")
+ }
+ $$ = &oneOfNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+oneofBody : oneofBody oneofItem {
+ $$ = append($1, $2...)
+ }
+ | oneofItem
+ | {
+ $$ = nil
+ }
+
+oneofItem : option {
+ $$ = []*oneOfElement{{option: $1[0]}}
+ }
+ | oneofField {
+ $$ = []*oneOfElement{{field: $1}}
+ }
+ | ';' {
+ $$ = []*oneOfElement{{empty: $1}}
+ }
+
+oneofField : typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4}
+ $$.setRange($1, $5)
+ }
+ | typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4, options: $6}
+ $$.setRange($1, $8)
+ }
+
+mapField : _MAP '<' keyType ',' typeIdent '>' name '=' _INT_LIT ';' {
+ checkTag(protolex, $9.start(), $9.val)
+ $$ = &mapFieldNode{mapKeyword: $1, keyType: $3, valueType: $5, name: $7, tag: $9}
+ $$.setRange($1, $10)
+ }
+ | _MAP '<' keyType ',' typeIdent '>' name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkTag(protolex, $9.start(), $9.val)
+ $$ = &mapFieldNode{mapKeyword: $1, keyType: $3, valueType: $5, name: $7, tag: $9, options: $11}
+ $$.setRange($1, $13)
+ }
+
+keyType : _INT32
+ | _INT64
+ | _UINT32
+ | _UINT64
+ | _SINT32
+ | _SINT64
+ | _FIXED32
+ | _FIXED64
+ | _SFIXED32
+ | _SFIXED64
+ | _BOOL
+ | _STRING
+
+extensions : _EXTENSIONS tagRanges ';' {
+ $$ = &extensionRangeNode{ranges: $2}
+ $$.setRange($1, $3)
+ }
+ | _EXTENSIONS tagRanges '[' fieldOptions ']' ';' {
+ $$ = &extensionRangeNode{ranges: $2, options: $4}
+ $$.setRange($1, $6)
+ }
+
+tagRanges : tagRanges ',' tagRange {
+ $$ = append($1, $3...)
+ }
+ | tagRange
+
+tagRange : _INT_LIT {
+ if $1.val > internal.MaxTag {
+ lexError(protolex, $1.start(), fmt.Sprintf("range includes out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+ r.setRange($1, $1)
+ $$ = []*rangeNode{r}
+ }
+ | _INT_LIT _TO _INT_LIT {
+ if $1.val > internal.MaxTag {
+ lexError(protolex, $1.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+ }
+ if $3.val > internal.MaxTag {
+ lexError(protolex, $3.start(), fmt.Sprintf("range end is out-of-range tag: %d (should be between 0 and %d)", $3.val, internal.MaxTag))
+ }
+ if $1.val > $3.val {
+ lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | _INT_LIT _TO _MAX {
+ if $1.val > internal.MaxTag {
+ lexError(protolex, $1.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: internal.MaxTag}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+
+enumRanges : enumRanges ',' enumRange {
+ $$ = append($1, $3...)
+ }
+ | enumRange
+
+enumRange : _INT_LIT {
+ checkUint64InInt32Range(protolex, $1.start(), $1.val)
+ r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+ r.setRange($1, $1)
+ $$ = []*rangeNode{r}
+ }
+ | negIntLit {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+ r.setRange($1, $1)
+ $$ = []*rangeNode{r}
+ }
+ | _INT_LIT _TO _INT_LIT {
+ checkUint64InInt32Range(protolex, $1.start(), $1.val)
+ checkUint64InInt32Range(protolex, $3.start(), $3.val)
+ if $1.val > $3.val {
+ lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | negIntLit _TO negIntLit {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ checkInt64InInt32Range(protolex, $3.start(), $3.val)
+ if $1.val > $3.val {
+ lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | negIntLit _TO _INT_LIT {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ checkUint64InInt32Range(protolex, $3.start(), $3.val)
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | _INT_LIT _TO _MAX {
+ checkUint64InInt32Range(protolex, $1.start(), $1.val)
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: math.MaxInt32}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | negIntLit _TO _MAX {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: math.MaxInt32}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+
+msgReserved : _RESERVED tagRanges ';' {
+ $$ = &reservedNode{ranges: $2}
+ $$.setRange($1, $3)
+ }
+ | reservedNames
+
+enumReserved : _RESERVED enumRanges ';' {
+ $$ = &reservedNode{ranges: $2}
+ $$.setRange($1, $3)
+ }
+ | reservedNames
+
+reservedNames : _RESERVED fieldNames ';' {
+ rsvd := map[string]struct{}{}
+ for _, n := range $2 {
+ if _, ok := rsvd[n.val]; ok {
+ lexError(protolex, n.start(), fmt.Sprintf("name %q is reserved multiple times", n.val))
+ break
+ }
+ rsvd[n.val] = struct{}{}
+ }
+ $$ = &reservedNode{names: $2}
+ $$.setRange($1, $3)
+ }
+
+fieldNames : fieldNames ',' stringLit {
+ $$ = append($1, $3)
+ }
+ | stringLit {
+ $$ = []*stringLiteralNode{$1}
+ }
+
+enum : _ENUM name '{' enumBody '}' {
+ c := 0
+ for _, el := range $4 {
+ if el.value != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, $1.start(), "enums must define at least one value")
+ }
+ $$ = &enumNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+enumBody : enumBody enumItem {
+ $$ = append($1, $2...)
+ }
+ | enumItem
+ | {
+ $$ = nil
+ }
+
+enumItem : option {
+ $$ = []*enumElement{{option: $1[0]}}
+ }
+ | enumField {
+ $$ = []*enumElement{{value: $1}}
+ }
+ | enumReserved {
+ $$ = []*enumElement{{reserved: $1}}
+ }
+ | ';' {
+ $$ = []*enumElement{{empty: $1}}
+ }
+
+enumField : name '=' _INT_LIT ';' {
+ checkUint64InInt32Range(protolex, $3.start(), $3.val)
+ $$ = &enumValueNode{name: $1, numberP: $3}
+ $$.setRange($1, $4)
+ }
+ | name '=' _INT_LIT '[' fieldOptions ']' ';' {
+ checkUint64InInt32Range(protolex, $3.start(), $3.val)
+ $$ = &enumValueNode{name: $1, numberP: $3, options: $5}
+ $$.setRange($1, $7)
+ }
+ | name '=' negIntLit ';' {
+ checkInt64InInt32Range(protolex, $3.start(), $3.val)
+ $$ = &enumValueNode{name: $1, numberN: $3}
+ $$.setRange($1, $4)
+ }
+ | name '=' negIntLit '[' fieldOptions ']' ';' {
+ checkInt64InInt32Range(protolex, $3.start(), $3.val)
+ $$ = &enumValueNode{name: $1, numberN: $3, options: $5}
+ $$.setRange($1, $7)
+ }
+
+message : _MESSAGE name '{' messageBody '}' {
+ $$ = &messageNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+messageBody : messageBody messageItem {
+ $$ = append($1, $2...)
+ }
+ | messageItem
+ | {
+ $$ = nil
+ }
+
+messageItem : field {
+ $$ = []*messageElement{{field: $1}}
+ }
+ | enum {
+ $$ = []*messageElement{{enum: $1}}
+ }
+ | message {
+ $$ = []*messageElement{{nested: $1}}
+ }
+ | extend {
+ $$ = []*messageElement{{extend: $1}}
+ }
+ | extensions {
+ $$ = []*messageElement{{extensionRange: $1}}
+ }
+ | group {
+ $$ = []*messageElement{{group: $1}}
+ }
+ | option {
+ $$ = []*messageElement{{option: $1[0]}}
+ }
+ | oneof {
+ $$ = []*messageElement{{oneOf: $1}}
+ }
+ | mapField {
+ $$ = []*messageElement{{mapField: $1}}
+ }
+ | msgReserved {
+ $$ = []*messageElement{{reserved: $1}}
+ }
+ | ';' {
+ $$ = []*messageElement{{empty: $1}}
+ }
+
+extend : _EXTEND typeIdent '{' extendBody '}' {
+ c := 0
+ for _, el := range $4 {
+ if el.field != nil || el.group != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, $1.start(), "extend sections must define at least one extension")
+ }
+ $$ = &extendNode{extendee: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+extendBody : extendBody extendItem {
+ $$ = append($1, $2...)
+ }
+ | extendItem
+ | {
+ $$ = nil
+ }
+
+extendItem : field {
+ $$ = []*extendElement{{field: $1}}
+ }
+ | group {
+ $$ = []*extendElement{{group: $1}}
+ }
+ | ';' {
+ $$ = []*extendElement{{empty: $1}}
+ }
+
+service : _SERVICE name '{' serviceBody '}' {
+ $$ = &serviceNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+serviceBody : serviceBody serviceItem {
+ $$ = append($1, $2...)
+ }
+ | serviceItem
+ | {
+ $$ = nil
+ }
+
+// NB: doc suggests support for "stream" declaration, separate from "rpc", but
+// it does not appear to be supported in protoc (doc is likely from grammar for
+// Google-internal version of protoc, with support for streaming stubby)
+serviceItem : option {
+ $$ = []*serviceElement{{option: $1[0]}}
+ }
+ | rpc {
+ $$ = []*serviceElement{{rpc: $1}}
+ }
+ | ';' {
+ $$ = []*serviceElement{{empty: $1}}
+ }
+
+rpc : _RPC name '(' rpcType ')' _RETURNS '(' rpcType ')' ';' {
+ $$ = &methodNode{name: $2, input: $4, output: $8}
+ $$.setRange($1, $10)
+ }
+ | _RPC name '(' rpcType ')' _RETURNS '(' rpcType ')' '{' rpcOptions '}' {
+ $$ = &methodNode{name: $2, input: $4, output: $8, options: $11}
+ $$.setRange($1, $12)
+ }
+
+rpcType : _STREAM typeIdent {
+ $$ = &rpcTypeNode{msgType: $2, streamKeyword: $1}
+ $$.setRange($1, $2)
+ }
+ | typeIdent {
+ $$ = &rpcTypeNode{msgType: $1}
+ $$.setRange($1, $1)
+ }
+
+rpcOptions : rpcOptions rpcOption {
+ $$ = append($1, $2...)
+ }
+ | rpcOption
+ | {
+ $$ = []*optionNode{}
+ }
+
+rpcOption : option {
+ $$ = $1
+ }
+ | ';' {
+ $$ = []*optionNode{}
+ }
+
+name : _NAME
+ | _SYNTAX
+ | _IMPORT
+ | _WEAK
+ | _PUBLIC
+ | _PACKAGE
+ | _OPTION
+ | _TRUE
+ | _FALSE
+ | _INF
+ | _NAN
+ | _REPEATED
+ | _OPTIONAL
+ | _REQUIRED
+ | _DOUBLE
+ | _FLOAT
+ | _INT32
+ | _INT64
+ | _UINT32
+ | _UINT64
+ | _SINT32
+ | _SINT64
+ | _FIXED32
+ | _FIXED64
+ | _SFIXED32
+ | _SFIXED64
+ | _BOOL
+ | _STRING
+ | _BYTES
+ | _GROUP
+ | _ONEOF
+ | _MAP
+ | _EXTENSIONS
+ | _TO
+ | _MAX
+ | _RESERVED
+ | _ENUM
+ | _MESSAGE
+ | _EXTEND
+ | _SERVICE
+ | _RPC
+ | _STREAM
+ | _RETURNS
+
+%%
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go
new file mode 100644
index 0000000..6b8a4e6
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go
@@ -0,0 +1,2093 @@
+// Code generated by goyacc -o proto.y.go -p proto proto.y. DO NOT EDIT.
+
+//line proto.y:2
+package protoparse
+
+import __yyfmt__ "fmt"
+
+//line proto.y:2
+
+//lint:file-ignore SA4006 generated parser has unused values
+
+import (
+ "fmt"
+ "math"
+ "unicode"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+//line proto.y:18
+type protoSymType struct {
+ yys int
+ file *fileNode
+ fileDecls []*fileElement
+ syn *syntaxNode
+ pkg *packageNode
+ imprt *importNode
+ msg *messageNode
+ msgDecls []*messageElement
+ fld *fieldNode
+ mapFld *mapFieldNode
+ grp *groupNode
+ oo *oneOfNode
+ ooDecls []*oneOfElement
+ ext *extensionRangeNode
+ resvd *reservedNode
+ en *enumNode
+ enDecls []*enumElement
+ env *enumValueNode
+ extend *extendNode
+ extDecls []*extendElement
+ svc *serviceNode
+ svcDecls []*serviceElement
+ mtd *methodNode
+ rpcType *rpcTypeNode
+ opts []*optionNode
+ optNm []*optionNamePartNode
+ rngs []*rangeNode
+ names []*stringLiteralNode
+ sl []valueNode
+ agg []*aggregateEntryNode
+ aggName *aggregateNameNode
+ v valueNode
+ str *stringLiteralNode
+ i *negativeIntLiteralNode
+ ui *intLiteralNode
+ f *floatLiteralNode
+ id *identNode
+ b *basicNode
+ err error
+}
+
+const _STRING_LIT = 57346
+const _INT_LIT = 57347
+const _FLOAT_LIT = 57348
+const _NAME = 57349
+const _FQNAME = 57350
+const _TYPENAME = 57351
+const _SYNTAX = 57352
+const _IMPORT = 57353
+const _WEAK = 57354
+const _PUBLIC = 57355
+const _PACKAGE = 57356
+const _OPTION = 57357
+const _TRUE = 57358
+const _FALSE = 57359
+const _INF = 57360
+const _NAN = 57361
+const _REPEATED = 57362
+const _OPTIONAL = 57363
+const _REQUIRED = 57364
+const _DOUBLE = 57365
+const _FLOAT = 57366
+const _INT32 = 57367
+const _INT64 = 57368
+const _UINT32 = 57369
+const _UINT64 = 57370
+const _SINT32 = 57371
+const _SINT64 = 57372
+const _FIXED32 = 57373
+const _FIXED64 = 57374
+const _SFIXED32 = 57375
+const _SFIXED64 = 57376
+const _BOOL = 57377
+const _STRING = 57378
+const _BYTES = 57379
+const _GROUP = 57380
+const _ONEOF = 57381
+const _MAP = 57382
+const _EXTENSIONS = 57383
+const _TO = 57384
+const _MAX = 57385
+const _RESERVED = 57386
+const _ENUM = 57387
+const _MESSAGE = 57388
+const _EXTEND = 57389
+const _SERVICE = 57390
+const _RPC = 57391
+const _STREAM = 57392
+const _RETURNS = 57393
+const _ERROR = 57394
+
+var protoToknames = [...]string{
+ "$end",
+ "error",
+ "$unk",
+ "_STRING_LIT",
+ "_INT_LIT",
+ "_FLOAT_LIT",
+ "_NAME",
+ "_FQNAME",
+ "_TYPENAME",
+ "_SYNTAX",
+ "_IMPORT",
+ "_WEAK",
+ "_PUBLIC",
+ "_PACKAGE",
+ "_OPTION",
+ "_TRUE",
+ "_FALSE",
+ "_INF",
+ "_NAN",
+ "_REPEATED",
+ "_OPTIONAL",
+ "_REQUIRED",
+ "_DOUBLE",
+ "_FLOAT",
+ "_INT32",
+ "_INT64",
+ "_UINT32",
+ "_UINT64",
+ "_SINT32",
+ "_SINT64",
+ "_FIXED32",
+ "_FIXED64",
+ "_SFIXED32",
+ "_SFIXED64",
+ "_BOOL",
+ "_STRING",
+ "_BYTES",
+ "_GROUP",
+ "_ONEOF",
+ "_MAP",
+ "_EXTENSIONS",
+ "_TO",
+ "_MAX",
+ "_RESERVED",
+ "_ENUM",
+ "_MESSAGE",
+ "_EXTEND",
+ "_SERVICE",
+ "_RPC",
+ "_STREAM",
+ "_RETURNS",
+ "_ERROR",
+ "'='",
+ "';'",
+ "':'",
+ "'{'",
+ "'}'",
+ "'\\\\'",
+ "'/'",
+ "'?'",
+ "'.'",
+ "','",
+ "'>'",
+ "'<'",
+ "'+'",
+ "'-'",
+ "'('",
+ "')'",
+ "'['",
+ "']'",
+ "'*'",
+ "'&'",
+ "'^'",
+ "'%'",
+ "'$'",
+ "'#'",
+ "'@'",
+ "'!'",
+ "'~'",
+ "'`'",
+}
+var protoStatenames = [...]string{}
+
+const protoEofCode = 1
+const protoErrCode = 2
+const protoInitialStackSize = 16
+
+//line proto.y:937
+
+//line yacctab:1
+var protoExca = [...]int{
+ -1, 1,
+ 1, -1,
+ -2, 0,
+}
+
+const protoPrivate = 57344
+
+const protoLast = 2050
+
+var protoAct = [...]int{
+
+ 120, 8, 288, 8, 8, 386, 264, 80, 128, 113,
+ 159, 160, 265, 271, 103, 196, 185, 112, 100, 101,
+ 29, 171, 8, 28, 75, 119, 99, 114, 79, 153,
+ 137, 148, 266, 184, 24, 139, 306, 255, 77, 78,
+ 319, 82, 306, 83, 389, 86, 87, 306, 318, 74,
+ 378, 306, 98, 306, 306, 363, 317, 306, 306, 361,
+ 306, 359, 351, 222, 379, 338, 337, 366, 307, 328,
+ 377, 224, 325, 322, 304, 280, 278, 286, 223, 380,
+ 315, 356, 367, 197, 329, 90, 243, 326, 323, 305,
+ 281, 279, 297, 140, 111, 154, 27, 197, 249, 214,
+ 209, 106, 188, 336, 246, 276, 241, 330, 240, 211,
+ 105, 173, 245, 144, 242, 287, 224, 208, 381, 150,
+ 382, 149, 176, 146, 327, 207, 324, 163, 16, 226,
+ 94, 93, 92, 91, 177, 179, 181, 16, 199, 140,
+ 79, 75, 85, 392, 199, 383, 368, 199, 374, 183,
+ 78, 77, 373, 154, 16, 187, 191, 372, 199, 144,
+ 198, 365, 157, 174, 85, 191, 74, 156, 355, 146,
+ 189, 206, 212, 150, 193, 149, 388, 354, 204, 201,
+ 163, 210, 203, 14, 333, 158, 15, 16, 157, 85,
+ 85, 88, 97, 156, 213, 16, 202, 335, 215, 216,
+ 217, 218, 219, 220, 308, 262, 261, 4, 14, 244,
+ 260, 15, 16, 376, 96, 259, 258, 18, 17, 19,
+ 20, 257, 254, 256, 221, 339, 13, 272, 252, 194,
+ 105, 75, 163, 248, 388, 275, 250, 390, 283, 95,
+ 84, 267, 18, 17, 19, 20, 89, 23, 247, 225,
+ 352, 13, 268, 303, 168, 169, 27, 186, 290, 302,
+ 198, 282, 277, 285, 295, 301, 206, 170, 300, 5,
+ 299, 272, 105, 22, 163, 163, 284, 117, 11, 275,
+ 11, 11, 165, 166, 310, 312, 313, 75, 314, 75,
+ 269, 22, 27, 155, 298, 167, 311, 186, 316, 11,
+ 25, 26, 263, 168, 293, 320, 85, 206, 27, 152,
+ 12, 147, 331, 75, 75, 163, 163, 3, 141, 332,
+ 21, 115, 10, 138, 10, 10, 118, 195, 142, 105,
+ 345, 75, 206, 347, 75, 123, 349, 75, 190, 105,
+ 105, 163, 346, 10, 270, 348, 116, 9, 350, 9,
+ 9, 122, 357, 121, 358, 273, 176, 353, 176, 369,
+ 176, 334, 163, 161, 163, 290, 292, 104, 9, 206,
+ 206, 340, 342, 102, 375, 75, 162, 227, 163, 163,
+ 172, 385, 7, 387, 6, 2, 387, 384, 75, 1,
+ 0, 391, 27, 107, 110, 31, 0, 0, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 0, 0, 0, 0, 106, 0, 0, 0, 0, 0,
+ 0, 0, 294, 108, 109, 0, 0, 0, 291, 27,
+ 107, 110, 31, 0, 0, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 0, 0, 0,
+ 0, 106, 0, 0, 0, 0, 0, 0, 0, 253,
+ 108, 109, 0, 0, 251, 27, 107, 110, 31, 0,
+ 0, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 0, 0, 0, 0, 106, 0, 0,
+ 0, 0, 0, 0, 0, 343, 108, 109, 27, 107,
+ 110, 31, 0, 0, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 106, 0, 0, 0, 0, 0, 0, 0, 341, 108,
+ 109, 27, 107, 110, 31, 0, 0, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 0,
+ 0, 0, 0, 106, 0, 0, 0, 0, 0, 0,
+ 0, 31, 108, 109, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 371, 0, 0,
+ 0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 370, 0, 0,
+ 0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 344, 0, 0,
+ 0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 321, 0, 0,
+ 0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 296, 0, 0,
+ 0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 205, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 0, 0,
+ 0, 31, 30, 164, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 76, 31, 30, 81, 32, 33, 34, 35, 36,
+ 133, 38, 39, 40, 41, 127, 126, 125, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 134, 135, 132, 64, 65, 136,
+ 129, 130, 131, 70, 71, 72, 73, 0, 0, 124,
+ 0, 0, 364, 31, 30, 81, 32, 33, 34, 35,
+ 36, 133, 38, 39, 40, 41, 127, 126, 125, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 134, 135, 132, 64, 65,
+ 136, 129, 130, 131, 70, 71, 72, 73, 0, 0,
+ 124, 0, 0, 362, 31, 30, 81, 32, 33, 34,
+ 35, 36, 133, 38, 39, 40, 41, 127, 126, 125,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 134, 135, 132, 64,
+ 65, 136, 129, 130, 131, 70, 71, 72, 73, 0,
+ 0, 124, 0, 0, 360, 31, 30, 81, 32, 33,
+ 34, 35, 36, 133, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 0, 0, 274, 0, 0, 309, 31, 30, 81, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 127,
+ 126, 125, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 0, 0, 151, 0, 0, 200, 31, 30, 81,
+ 32, 33, 34, 35, 36, 133, 38, 39, 40, 41,
+ 127, 126, 125, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 134,
+ 135, 132, 64, 65, 136, 129, 130, 131, 70, 71,
+ 72, 73, 0, 0, 124, 31, 0, 175, 32, 33,
+ 34, 35, 36, 133, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 145, 67, 68, 69, 70, 71, 72, 73,
+ 0, 0, 143, 0, 0, 192, 31, 30, 81, 32,
+ 33, 34, 35, 36, 133, 38, 39, 40, 41, 127,
+ 126, 125, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 134, 135,
+ 132, 64, 65, 136, 129, 130, 131, 70, 71, 72,
+ 73, 0, 0, 124, 31, 30, 81, 32, 33, 34,
+ 35, 36, 133, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 0,
+ 0, 274, 31, 30, 81, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 127, 126, 125, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 31, 0, 151,
+ 32, 33, 34, 35, 36, 133, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 145, 67, 68, 69, 70, 71,
+ 72, 73, 0, 0, 143, 31, 30, 81, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 289, 73,
+ 31, 30, 81, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 31, 30, 81, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 182, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 31, 30, 81, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 180, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 31, 30, 81, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 178, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 31, 30, 0, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 31, 0, 0, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+}
+var protoPact = [...]int{
+
+ 197, -1000, 172, 172, 194, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 288, 1953, 1124, 1998, 1998, 1773,
+ 1998, 172, -1000, 304, 186, 304, 304, -1000, 137, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 193, -1000, 1773, 77, 76, 75,
+ -1000, -1000, 74, 185, -1000, -1000, 160, 138, -1000, 647,
+ 26, 1539, 1680, 1635, 113, -1000, -1000, -1000, 131, -1000,
+ -1000, 302, -1000, -1000, -1000, -1000, 1064, -1000, 277, 249,
+ -1000, 102, 1440, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 1908, 1863, 1818, 1998, 1998,
+ 1998, 1773, 292, 1124, 1998, 38, 252, -1000, 1488, -1000,
+ -1000, -1000, -1000, -1000, 176, 92, -1000, 1389, -1000, -1000,
+ -1000, -1000, 139, -1000, -1000, -1000, -1000, 1998, -1000, 1004,
+ -1000, 63, 45, -1000, 1953, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 102, -1000, 32, -1000, -1000, 1998, 1998, 1998,
+ 1998, 1998, 1998, 171, 9, -1000, 207, 73, 1091, 54,
+ 52, 302, -1000, -1000, 81, 50, -1000, 206, 191, 298,
+ -1000, -1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, 455,
+ -1000, 1064, -33, -1000, 1773, 168, 163, 162, 157, 153,
+ 152, 297, -1000, 1124, 292, 247, 1587, 43, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 304, 22, 21, 78, -1000, 233, 72, 1728,
+ -1000, 388, -1000, 1064, 944, -1000, 24, 289, 265, 263,
+ 260, 254, 248, 20, -2, -1000, 151, -1000, -1000, -1000,
+ 1338, -1000, -1000, -1000, -1000, 1998, 1773, 302, -1000, 1124,
+ -1000, 1124, -1000, -1000, -1000, -1000, -1000, -1000, 12, 1773,
+ -1000, -1000, -14, -1000, 1064, 884, -1000, -1000, 19, 70,
+ 18, 68, 15, 51, -1000, 1124, 1124, 130, 647, -1000,
+ -1000, 144, 40, -4, -5, 174, -1000, -1000, 584, 521,
+ 824, -1000, -1000, 1124, 1539, -1000, 1124, 1539, -1000, 1124,
+ 1539, -8, -1000, -1000, -1000, 245, 1998, 123, 114, 14,
+ -1000, 1064, -1000, 1064, -1000, -9, 1287, -11, 1236, -15,
+ 1185, 107, 13, 93, -1000, -1000, 1728, 764, 704, 103,
+ -1000, 98, -1000, 94, -1000, -1000, -1000, 1124, 208, 2,
+ -1000, -1000, -1000, -1000, -1000, -20, 10, 64, 91, -1000,
+ 1124, -1000, 122, -1000, -26, 180, -1000, -1000, -1000, 89,
+ -1000, -1000, -1000,
+}
+var protoPgo = [...]int{
+
+ 0, 389, 385, 269, 317, 384, 382, 0, 12, 6,
+ 5, 381, 32, 21, 380, 52, 26, 18, 20, 7,
+ 8, 377, 376, 14, 373, 367, 366, 10, 11, 363,
+ 27, 355, 353, 25, 351, 346, 9, 17, 13, 344,
+ 338, 335, 328, 30, 16, 33, 15, 327, 326, 321,
+ 35, 323, 318, 277, 31, 311, 19, 310, 29, 309,
+ 293, 2,
+}
+var protoR1 = [...]int{
+
+ 0, 1, 1, 1, 1, 4, 4, 3, 3, 3,
+ 3, 3, 3, 3, 3, 2, 5, 5, 5, 6,
+ 19, 19, 7, 12, 12, 12, 13, 13, 14, 14,
+ 15, 15, 16, 16, 16, 16, 16, 24, 24, 23,
+ 25, 25, 25, 25, 25, 56, 56, 17, 27, 27,
+ 27, 28, 28, 28, 29, 29, 29, 29, 29, 29,
+ 29, 22, 22, 26, 26, 26, 26, 26, 26, 20,
+ 20, 30, 30, 30, 30, 30, 30, 30, 30, 9,
+ 9, 8, 33, 33, 33, 32, 39, 39, 39, 38,
+ 38, 38, 31, 31, 34, 34, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 48, 48,
+ 45, 45, 44, 44, 44, 47, 47, 46, 46, 46,
+ 46, 46, 46, 46, 41, 41, 42, 42, 43, 40,
+ 40, 49, 51, 51, 51, 50, 50, 50, 50, 52,
+ 52, 52, 52, 35, 37, 37, 37, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 53, 55,
+ 55, 55, 54, 54, 54, 57, 59, 59, 59, 58,
+ 58, 58, 60, 60, 61, 61, 11, 11, 11, 10,
+ 10, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18,
+}
+var protoR2 = [...]int{
+
+ 0, 1, 1, 2, 0, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 4, 3, 4, 4, 3,
+ 1, 1, 5, 1, 3, 4, 1, 2, 1, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 1, 2, 2, 2, 2, 1, 2, 3, 1, 2,
+ 0, 1, 2, 2, 3, 4, 5, 3, 2, 5,
+ 4, 1, 3, 1, 3, 3, 3, 5, 5, 1,
+ 1, 6, 6, 6, 5, 9, 9, 9, 8, 3,
+ 1, 3, 8, 8, 8, 5, 2, 1, 0, 1,
+ 1, 1, 5, 8, 10, 13, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 3, 6,
+ 3, 1, 1, 3, 3, 3, 1, 1, 1, 3,
+ 3, 3, 3, 3, 3, 1, 3, 1, 3, 3,
+ 1, 5, 2, 1, 0, 1, 1, 1, 1, 4,
+ 7, 4, 7, 5, 2, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 5, 2,
+ 1, 0, 1, 1, 1, 5, 2, 1, 0, 1,
+ 1, 1, 10, 12, 2, 1, 2, 1, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1,
+}
+var protoChk = [...]int{
+
+ -1000, -1, -2, -4, 10, -3, -5, -6, -7, -35,
+ -49, -53, -57, 54, 11, 14, 15, 46, 45, 47,
+ 48, -4, -3, 53, -56, 12, 13, 4, -19, -18,
+ 8, 7, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, -12, -19, 67, -18, -18, -20,
+ -19, 9, -18, -56, 54, 4, -56, -56, 54, 53,
+ -20, 56, 56, 56, 56, 54, 54, 54, -15, -16,
+ -17, -56, -24, -23, -25, -18, 56, 5, 65, 66,
+ 6, 68, -37, -36, -30, -49, -35, -53, -48, -33,
+ -7, -32, -34, -41, 54, 22, 21, 20, -20, 45,
+ 46, 47, 41, 15, 39, 40, 44, -43, -51, -50,
+ -7, -52, -42, 54, -18, 44, -43, -55, -54, -30,
+ -33, 54, -59, -58, -7, -60, 54, 49, 54, -27,
+ -28, -29, -22, -18, 69, 5, 6, 18, 5, 6,
+ 18, -13, -14, 9, 61, 57, -36, -20, 38, -20,
+ 38, -20, 38, -18, -45, -44, 5, -18, 64, -45,
+ -40, -56, 57, -50, 53, -47, -46, 5, -23, 66,
+ 57, -54, 57, -58, -18, 57, -28, 62, 54, 55,
+ -17, 64, -19, -13, 67, -18, -18, -18, -18, -18,
+ -18, 53, 54, 69, 62, 42, 56, -21, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 54, 54, 62, 5, -23, 62, 54, 42, 42, 67,
+ -16, 69, -17, 64, -27, 70, -20, 53, 53, 53,
+ 53, 53, 53, 5, -9, -8, -12, -44, 5, 43,
+ -39, -38, -7, -31, 54, -20, 62, -56, 54, 69,
+ 54, 69, -46, 5, 43, -23, 5, 43, -61, 50,
+ -20, 70, -26, -15, 64, -27, 63, 68, 5, 5,
+ 5, 5, 5, 5, 54, 69, 62, 70, 53, 57,
+ -38, -18, -20, -9, -9, 68, -20, 70, 62, 54,
+ -27, 63, 54, 69, 56, 54, 69, 56, 54, 69,
+ 56, -9, -8, 54, -15, 53, 63, 70, 70, 51,
+ -15, 64, -15, 64, 63, -9, -37, -9, -37, -9,
+ -37, 70, 5, -18, 54, 54, 67, -27, -27, 70,
+ 57, 70, 57, 70, 57, 54, 54, 69, 53, -61,
+ 63, 63, 54, 54, 54, -9, 5, 68, 70, 54,
+ 69, 54, 56, 54, -9, -11, -10, -7, 54, 70,
+ 57, -10, 54,
+}
+var protoDef = [...]int{
+
+ 4, -2, 1, 2, 0, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 0, 0, 0, 0, 0, 0,
+ 0, 3, 5, 0, 0, 0, 0, 45, 0, 20,
+ 21, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 0, 23, 0, 0, 0, 0,
+ 69, 70, 0, 0, 16, 46, 0, 0, 19, 0,
+ 0, 146, 134, 161, 168, 15, 17, 18, 0, 30,
+ 31, 32, 33, 34, 35, 36, 50, 37, 0, 0,
+ 40, 24, 0, 145, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 0, 0, 0, 0, 0,
+ 0, 0, 213, 187, 0, 212, 216, 125, 0, 133,
+ 135, 136, 137, 138, 0, 216, 127, 0, 160, 162,
+ 163, 164, 0, 167, 169, 170, 171, 0, 22, 0,
+ 48, 51, 0, 61, 0, 38, 42, 43, 39, 41,
+ 44, 25, 26, 28, 0, 143, 144, 0, 0, 0,
+ 0, 0, 0, 0, 0, 111, 112, 0, 0, 0,
+ 0, 130, 131, 132, 0, 0, 116, 117, 118, 0,
+ 158, 159, 165, 166, 0, 47, 49, 52, 53, 0,
+ 58, 50, 0, 27, 0, 0, 0, 0, 0, 0,
+ 0, 0, 108, 0, 0, 0, 88, 0, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 124, 128, 0, 0, 0, 0, 126, 0, 0, 0,
+ 54, 0, 57, 50, 0, 62, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 80, 0, 110, 113, 114,
+ 0, 87, 89, 90, 91, 0, 0, 129, 139, 0,
+ 141, 0, 115, 119, 122, 120, 121, 123, 0, 222,
+ 175, 55, 0, 63, 50, 0, 60, 29, 0, 0,
+ 0, 0, 0, 0, 74, 0, 0, 0, 0, 85,
+ 86, 0, 0, 0, 0, 0, 174, 56, 0, 0,
+ 0, 59, 71, 0, 146, 72, 0, 146, 73, 0,
+ 146, 0, 79, 109, 81, 0, 0, 0, 0, 0,
+ 64, 50, 65, 50, 66, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 140, 142, 0, 0, 0, 0,
+ 82, 0, 83, 0, 84, 78, 92, 0, 0, 0,
+ 67, 68, 75, 76, 77, 0, 0, 0, 0, 94,
+ 0, 172, 178, 93, 0, 0, 177, 179, 180, 0,
+ 173, 176, 95,
+}
+var protoTok1 = [...]int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 78, 3, 76, 75, 74, 72, 3,
+ 67, 68, 71, 65, 62, 66, 61, 59, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 55, 54,
+ 64, 53, 63, 60, 77, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 69, 58, 70, 73, 3, 80, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 56, 3, 57, 79,
+}
+var protoTok2 = [...]int{
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52,
+}
+var protoTok3 = [...]int{
+ 0,
+}
+
+var protoErrorMessages = [...]struct {
+ state int
+ token int
+ msg string
+}{}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var (
+ protoDebug = 0
+ protoErrorVerbose = false
+)
+
+type protoLexer interface {
+ Lex(lval *protoSymType) int
+ Error(s string)
+}
+
+type protoParser interface {
+ Parse(protoLexer) int
+ Lookahead() int
+}
+
+type protoParserImpl struct {
+ lval protoSymType
+ stack [protoInitialStackSize]protoSymType
+ char int
+}
+
+func (p *protoParserImpl) Lookahead() int {
+ return p.char
+}
+
+func protoNewParser() protoParser {
+ return &protoParserImpl{}
+}
+
+const protoFlag = -1000
+
+func protoTokname(c int) string {
+ if c >= 1 && c-1 < len(protoToknames) {
+ if protoToknames[c-1] != "" {
+ return protoToknames[c-1]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func protoStatname(s int) string {
+ if s >= 0 && s < len(protoStatenames) {
+ if protoStatenames[s] != "" {
+ return protoStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func protoErrorMessage(state, lookAhead int) string {
+ const TOKSTART = 4
+
+ if !protoErrorVerbose {
+ return "syntax error"
+ }
+
+ for _, e := range protoErrorMessages {
+ if e.state == state && e.token == lookAhead {
+ return "syntax error: " + e.msg
+ }
+ }
+
+ res := "syntax error: unexpected " + protoTokname(lookAhead)
+
+ // To match Bison, suggest at most four expected tokens.
+ expected := make([]int, 0, 4)
+
+ // Look for shiftable tokens.
+ base := protoPact[state]
+ for tok := TOKSTART; tok-1 < len(protoToknames); tok++ {
+ if n := base + tok; n >= 0 && n < protoLast && protoChk[protoAct[n]] == tok {
+ if len(expected) == cap(expected) {
+ return res
+ }
+ expected = append(expected, tok)
+ }
+ }
+
+ if protoDef[state] == -2 {
+ i := 0
+ for protoExca[i] != -1 || protoExca[i+1] != state {
+ i += 2
+ }
+
+ // Look for tokens that we accept or reduce.
+ for i += 2; protoExca[i] >= 0; i += 2 {
+ tok := protoExca[i]
+ if tok < TOKSTART || protoExca[i+1] == 0 {
+ continue
+ }
+ if len(expected) == cap(expected) {
+ return res
+ }
+ expected = append(expected, tok)
+ }
+
+ // If the default action is to accept or reduce, give up.
+ if protoExca[i+1] != 0 {
+ return res
+ }
+ }
+
+ for i, tok := range expected {
+ if i == 0 {
+ res += ", expecting "
+ } else {
+ res += " or "
+ }
+ res += protoTokname(tok)
+ }
+ return res
+}
+
+func protolex1(lex protoLexer, lval *protoSymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
+ if char <= 0 {
+ token = protoTok1[0]
+ goto out
+ }
+ if char < len(protoTok1) {
+ token = protoTok1[char]
+ goto out
+ }
+ if char >= protoPrivate {
+ if char < protoPrivate+len(protoTok2) {
+ token = protoTok2[char-protoPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(protoTok3); i += 2 {
+ token = protoTok3[i+0]
+ if token == char {
+ token = protoTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if token == 0 {
+ token = protoTok2[1] /* unknown char */
+ }
+ if protoDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", protoTokname(token), uint(char))
+ }
+ return char, token
+}
+
+func protoParse(protolex protoLexer) int {
+ return protoNewParser().Parse(protolex)
+}
+
+func (protorcvr *protoParserImpl) Parse(protolex protoLexer) int {
+ var proton int
+ var protoVAL protoSymType
+ var protoDollar []protoSymType
+ _ = protoDollar // silence set and not used
+ protoS := protorcvr.stack[:]
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ protostate := 0
+ protorcvr.char = -1
+ prototoken := -1 // protorcvr.char translated into internal numbering
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ protostate = -1
+ protorcvr.char = -1
+ prototoken = -1
+ }()
+ protop := -1
+ goto protostack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+protostack:
+ /* put a state and value onto the stack */
+ if protoDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", protoTokname(prototoken), protoStatname(protostate))
+ }
+
+ protop++
+ if protop >= len(protoS) {
+ nyys := make([]protoSymType, len(protoS)*2)
+ copy(nyys, protoS)
+ protoS = nyys
+ }
+ protoS[protop] = protoVAL
+ protoS[protop].yys = protostate
+
+protonewstate:
+ proton = protoPact[protostate]
+ if proton <= protoFlag {
+ goto protodefault /* simple state */
+ }
+ if protorcvr.char < 0 {
+ protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
+ }
+ proton += prototoken
+ if proton < 0 || proton >= protoLast {
+ goto protodefault
+ }
+ proton = protoAct[proton]
+ if protoChk[proton] == prototoken { /* valid shift */
+ protorcvr.char = -1
+ prototoken = -1
+ protoVAL = protorcvr.lval
+ protostate = proton
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto protostack
+ }
+
+protodefault:
+ /* default state action */
+ proton = protoDef[protostate]
+ if proton == -2 {
+ if protorcvr.char < 0 {
+ protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if protoExca[xi+0] == -1 && protoExca[xi+1] == protostate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ proton = protoExca[xi+0]
+ if proton < 0 || proton == prototoken {
+ break
+ }
+ }
+ proton = protoExca[xi+1]
+ if proton < 0 {
+ goto ret0
+ }
+ }
+ if proton == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ protolex.Error(protoErrorMessage(protostate, prototoken))
+ Nerrs++
+ if protoDebug >= 1 {
+ __yyfmt__.Printf("%s", protoStatname(protostate))
+ __yyfmt__.Printf(" saw %s\n", protoTokname(prototoken))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for protop >= 0 {
+ proton = protoPact[protoS[protop].yys] + protoErrCode
+ if proton >= 0 && proton < protoLast {
+ protostate = protoAct[proton] /* simulate a shift of "error" */
+ if protoChk[protostate] == protoErrCode {
+ goto protostack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if protoDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", protoS[protop].yys)
+ }
+ protop--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if protoDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", protoTokname(prototoken))
+ }
+ if prototoken == protoEofCode {
+ goto ret1
+ }
+ protorcvr.char = -1
+ prototoken = -1
+ goto protonewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production proton */
+ if protoDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", proton, protoStatname(protostate))
+ }
+
+ protont := proton
+ protopt := protop
+ _ = protopt // guard against "declared and not used"
+
+ protop -= protoR2[proton]
+ // protop is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if protop+1 >= len(protoS) {
+ nyys := make([]protoSymType, len(protoS)*2)
+ copy(nyys, protoS)
+ protoS = nyys
+ }
+ protoVAL = protoS[protop+1]
+
+ /* consult goto table to find next state */
+ proton = protoR1[proton]
+ protog := protoPgo[proton]
+ protoj := protog + protoS[protop].yys + 1
+
+ if protoj >= protoLast {
+ protostate = protoAct[protog]
+ } else {
+ protostate = protoAct[protoj]
+ if protoChk[protostate] != -proton {
+ protostate = protoAct[protog]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch protont {
+
+ case 1:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:114
+ {
+ protoVAL.file = &fileNode{syntax: protoDollar[1].syn}
+ protoVAL.file.setRange(protoDollar[1].syn, protoDollar[1].syn)
+ protolex.(*protoLex).res = protoVAL.file
+ }
+ case 2:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:119
+ {
+ protoVAL.file = &fileNode{decls: protoDollar[1].fileDecls}
+ if len(protoDollar[1].fileDecls) > 0 {
+ protoVAL.file.setRange(protoDollar[1].fileDecls[0], protoDollar[1].fileDecls[len(protoDollar[1].fileDecls)-1])
+ }
+ protolex.(*protoLex).res = protoVAL.file
+ }
+ case 3:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:126
+ {
+ protoVAL.file = &fileNode{syntax: protoDollar[1].syn, decls: protoDollar[2].fileDecls}
+ var end node
+ if len(protoDollar[2].fileDecls) > 0 {
+ end = protoDollar[2].fileDecls[len(protoDollar[2].fileDecls)-1]
+ } else {
+ end = protoDollar[1].syn
+ }
+ protoVAL.file.setRange(protoDollar[1].syn, end)
+ protolex.(*protoLex).res = protoVAL.file
+ }
+ case 4:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:137
+ {
+ }
+ case 5:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:140
+ {
+ protoVAL.fileDecls = append(protoDollar[1].fileDecls, protoDollar[2].fileDecls...)
+ }
+ case 7:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:145
+ {
+ protoVAL.fileDecls = []*fileElement{{imp: protoDollar[1].imprt}}
+ }
+ case 8:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:148
+ {
+ protoVAL.fileDecls = []*fileElement{{pkg: protoDollar[1].pkg}}
+ }
+ case 9:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:151
+ {
+ protoVAL.fileDecls = []*fileElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 10:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:154
+ {
+ protoVAL.fileDecls = []*fileElement{{message: protoDollar[1].msg}}
+ }
+ case 11:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:157
+ {
+ protoVAL.fileDecls = []*fileElement{{enum: protoDollar[1].en}}
+ }
+ case 12:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:160
+ {
+ protoVAL.fileDecls = []*fileElement{{extend: protoDollar[1].extend}}
+ }
+ case 13:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:163
+ {
+ protoVAL.fileDecls = []*fileElement{{service: protoDollar[1].svc}}
+ }
+ case 14:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:166
+ {
+ protoVAL.fileDecls = []*fileElement{{empty: protoDollar[1].b}}
+ }
+ case 15:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:170
+ {
+ if protoDollar[3].str.val != "proto2" && protoDollar[3].str.val != "proto3" {
+ lexError(protolex, protoDollar[3].str.start(), "syntax value must be 'proto2' or 'proto3'")
+ }
+ protoVAL.syn = &syntaxNode{syntax: protoDollar[3].str}
+ protoVAL.syn.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 16:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:178
+ {
+ protoVAL.imprt = &importNode{name: protoDollar[2].str}
+ protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 17:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:182
+ {
+ protoVAL.imprt = &importNode{name: protoDollar[3].str, weak: true}
+ protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 18:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:186
+ {
+ protoVAL.imprt = &importNode{name: protoDollar[3].str, public: true}
+ protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 19:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:191
+ {
+ protoVAL.pkg = &packageNode{name: protoDollar[2].id}
+ protoVAL.pkg.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 22:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:199
+ {
+ n := &optionNameNode{parts: protoDollar[2].optNm}
+ n.setRange(protoDollar[2].optNm[0], protoDollar[2].optNm[len(protoDollar[2].optNm)-1])
+ o := &optionNode{name: n, val: protoDollar[4].v}
+ o.setRange(protoDollar[1].id, protoDollar[5].b)
+ protoVAL.opts = []*optionNode{o}
+ }
+ case 23:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:207
+ {
+ protoVAL.optNm = toNameParts(protoDollar[1].id, 0)
+ }
+ case 24:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:210
+ {
+ p := &optionNamePartNode{text: protoDollar[2].id, isExtension: true}
+ p.setRange(protoDollar[1].b, protoDollar[3].b)
+ protoVAL.optNm = []*optionNamePartNode{p}
+ }
+ case 25:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:215
+ {
+ p := &optionNamePartNode{text: protoDollar[2].id, isExtension: true}
+ p.setRange(protoDollar[1].b, protoDollar[3].b)
+ ps := make([]*optionNamePartNode, 1, len(protoDollar[4].optNm)+1)
+ ps[0] = p
+ protoVAL.optNm = append(ps, protoDollar[4].optNm...)
+ }
+ case 27:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:224
+ {
+ protoVAL.optNm = append(protoDollar[1].optNm, protoDollar[2].optNm...)
+ }
+ case 28:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:228
+ {
+ protoVAL.optNm = toNameParts(protoDollar[1].id, 1 /* exclude leading dot */)
+ }
+ case 29:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:231
+ {
+ p := &optionNamePartNode{text: protoDollar[3].id, isExtension: true}
+ p.setRange(protoDollar[2].b, protoDollar[4].b)
+ protoVAL.optNm = []*optionNamePartNode{p}
+ }
+ case 32:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:240
+ {
+ protoVAL.v = protoDollar[1].str
+ }
+ case 33:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:243
+ {
+ protoVAL.v = protoDollar[1].ui
+ }
+ case 34:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:246
+ {
+ protoVAL.v = protoDollar[1].i
+ }
+ case 35:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:249
+ {
+ protoVAL.v = protoDollar[1].f
+ }
+ case 36:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:252
+ {
+ if protoDollar[1].id.val == "true" {
+ protoVAL.v = &boolLiteralNode{basicNode: protoDollar[1].id.basicNode, val: true}
+ } else if protoDollar[1].id.val == "false" {
+ protoVAL.v = &boolLiteralNode{basicNode: protoDollar[1].id.basicNode, val: false}
+ } else if protoDollar[1].id.val == "inf" {
+ f := &floatLiteralNode{val: math.Inf(1)}
+ f.setRange(protoDollar[1].id, protoDollar[1].id)
+ protoVAL.v = f
+ } else if protoDollar[1].id.val == "nan" {
+ f := &floatLiteralNode{val: math.NaN()}
+ f.setRange(protoDollar[1].id, protoDollar[1].id)
+ protoVAL.v = f
+ } else {
+ protoVAL.v = protoDollar[1].id
+ }
+ }
+ case 38:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:271
+ {
+ protoVAL.ui = protoDollar[2].ui
+ }
+ case 39:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:275
+ {
+ if protoDollar[2].ui.val > math.MaxInt64+1 {
+ lexError(protolex, protoDollar[2].ui.start(), fmt.Sprintf("numeric constant %d would underflow (allowed range is %d to %d)", protoDollar[2].ui.val, int64(math.MinInt64), int64(math.MaxInt64)))
+ }
+ protoVAL.i = &negativeIntLiteralNode{val: -int64(protoDollar[2].ui.val)}
+ protoVAL.i.setRange(protoDollar[1].b, protoDollar[2].ui)
+ }
+ case 41:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:284
+ {
+ protoVAL.f = &floatLiteralNode{val: -protoDollar[2].f.val}
+ protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].f)
+ }
+ case 42:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:288
+ {
+ protoVAL.f = &floatLiteralNode{val: protoDollar[2].f.val}
+ protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].f)
+ }
+ case 43:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:292
+ {
+ protoVAL.f = &floatLiteralNode{val: math.Inf(1)}
+ protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].id)
+ }
+ case 44:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:296
+ {
+ protoVAL.f = &floatLiteralNode{val: math.Inf(-1)}
+ protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].id)
+ }
+ case 46:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:302
+ {
+ protoVAL.str = &stringLiteralNode{val: protoDollar[1].str.val + protoDollar[2].str.val}
+ protoVAL.str.setRange(protoDollar[1].str, protoDollar[2].str)
+ }
+ case 47:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:307
+ {
+ a := &aggregateLiteralNode{elements: protoDollar[2].agg}
+ a.setRange(protoDollar[1].b, protoDollar[3].b)
+ protoVAL.v = a
+ }
+ case 49:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:314
+ {
+ protoVAL.agg = append(protoDollar[1].agg, protoDollar[2].agg...)
+ }
+ case 50:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:317
+ {
+ protoVAL.agg = nil
+ }
+ case 52:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:322
+ {
+ protoVAL.agg = protoDollar[1].agg
+ }
+ case 53:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:325
+ {
+ protoVAL.agg = protoDollar[1].agg
+ }
+ case 54:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:329
+ {
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[3].v}
+ a.setRange(protoDollar[1].aggName, protoDollar[3].v)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 55:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:334
+ {
+ s := &sliceLiteralNode{}
+ s.setRange(protoDollar[3].b, protoDollar[4].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[4].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 56:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:341
+ {
+ s := &sliceLiteralNode{elements: protoDollar[4].sl}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[5].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 57:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:348
+ {
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[3].v}
+ a.setRange(protoDollar[1].aggName, protoDollar[3].v)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 58:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:353
+ {
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[2].v}
+ a.setRange(protoDollar[1].aggName, protoDollar[2].v)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 59:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:358
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[5].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 60:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:365
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[3].agg}
+ s.setRange(protoDollar[2].b, protoDollar[4].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[4].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 61:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:373
+ {
+ protoVAL.aggName = &aggregateNameNode{name: protoDollar[1].id}
+ protoVAL.aggName.setRange(protoDollar[1].id, protoDollar[1].id)
+ }
+ case 62:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:377
+ {
+ protoVAL.aggName = &aggregateNameNode{name: protoDollar[2].id, isExtension: true}
+ protoVAL.aggName.setRange(protoDollar[1].b, protoDollar[3].b)
+ }
+ case 63:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:382
+ {
+ protoVAL.sl = []valueNode{protoDollar[1].v}
+ }
+ case 64:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:385
+ {
+ protoVAL.sl = append(protoDollar[1].sl, protoDollar[3].v)
+ }
+ case 65:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:388
+ {
+ protoVAL.sl = append(protoDollar[1].sl, protoDollar[3].v)
+ }
+ case 66:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:391
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[2].agg}
+ s.setRange(protoDollar[1].b, protoDollar[3].b)
+ protoVAL.sl = []valueNode{s}
+ }
+ case 67:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:396
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ protoVAL.sl = append(protoDollar[1].sl, s)
+ }
+ case 68:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:401
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ protoVAL.sl = append(protoDollar[1].sl, s)
+ }
+ case 71:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:410
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, required: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 72:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:416
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 73:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:422
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, repeated: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 74:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:428
+ {
+ checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 75:
+ protoDollar = protoS[protopt-9 : protopt+1]
+//line proto.y:433
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, required: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui, options: protoDollar[7].opts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[9].b)
+ }
+ case 76:
+ protoDollar = protoS[protopt-9 : protopt+1]
+//line proto.y:439
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui, options: protoDollar[7].opts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[9].b)
+ }
+ case 77:
+ protoDollar = protoS[protopt-9 : protopt+1]
+//line proto.y:445
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, repeated: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui, options: protoDollar[7].opts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[9].b)
+ }
+ case 78:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:451
+ {
+ checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui, options: protoDollar[6].opts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 79:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:457
+ {
+ protoVAL.opts = append(protoDollar[1].opts, protoDollar[3].opts...)
+ }
+ case 81:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:462
+ {
+ n := &optionNameNode{parts: protoDollar[1].optNm}
+ n.setRange(protoDollar[1].optNm[0], protoDollar[1].optNm[len(protoDollar[1].optNm)-1])
+ o := &optionNode{name: n, val: protoDollar[3].v}
+ o.setRange(protoDollar[1].optNm[0], protoDollar[3].v)
+ protoVAL.opts = []*optionNode{o}
+ }
+ case 82:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:470
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+ lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+ }
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, required: true}
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].ui, decls: protoDollar[7].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 83:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:479
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+ lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+ }
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode}
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].ui, decls: protoDollar[7].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 84:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:488
+ {
+ checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+ if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+ lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+ }
+ lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, repeated: true}
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].ui, decls: protoDollar[7].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 85:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:498
+ {
+ c := 0
+ for _, el := range protoDollar[4].ooDecls {
+ if el.field != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, protoDollar[1].id.start(), "oneof must contain at least one field")
+ }
+ protoVAL.oo = &oneOfNode{name: protoDollar[2].id, decls: protoDollar[4].ooDecls}
+ protoVAL.oo.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 86:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:512
+ {
+ protoVAL.ooDecls = append(protoDollar[1].ooDecls, protoDollar[2].ooDecls...)
+ }
+ case 88:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:516
+ {
+ protoVAL.ooDecls = nil
+ }
+ case 89:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:520
+ {
+ protoVAL.ooDecls = []*oneOfElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 90:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:523
+ {
+ protoVAL.ooDecls = []*oneOfElement{{field: protoDollar[1].fld}}
+ }
+ case 91:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:526
+ {
+ protoVAL.ooDecls = []*oneOfElement{{empty: protoDollar[1].b}}
+ }
+ case 92:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:530
+ {
+ checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 93:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:535
+ {
+ checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui, options: protoDollar[6].opts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 94:
+ protoDollar = protoS[protopt-10 : protopt+1]
+//line proto.y:541
+ {
+ checkTag(protolex, protoDollar[9].ui.start(), protoDollar[9].ui.val)
+ protoVAL.mapFld = &mapFieldNode{mapKeyword: protoDollar[1].id, keyType: protoDollar[3].id, valueType: protoDollar[5].id, name: protoDollar[7].id, tag: protoDollar[9].ui}
+ protoVAL.mapFld.setRange(protoDollar[1].id, protoDollar[10].b)
+ }
+ case 95:
+ protoDollar = protoS[protopt-13 : protopt+1]
+//line proto.y:546
+ {
+ checkTag(protolex, protoDollar[9].ui.start(), protoDollar[9].ui.val)
+ protoVAL.mapFld = &mapFieldNode{mapKeyword: protoDollar[1].id, keyType: protoDollar[3].id, valueType: protoDollar[5].id, name: protoDollar[7].id, tag: protoDollar[9].ui, options: protoDollar[11].opts}
+ protoVAL.mapFld.setRange(protoDollar[1].id, protoDollar[13].b)
+ }
+ case 108:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:565
+ {
+ protoVAL.ext = &extensionRangeNode{ranges: protoDollar[2].rngs}
+ protoVAL.ext.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 109:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:569
+ {
+ protoVAL.ext = &extensionRangeNode{ranges: protoDollar[2].rngs, options: protoDollar[4].opts}
+ protoVAL.ext.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 110:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:574
+ {
+ protoVAL.rngs = append(protoDollar[1].rngs, protoDollar[3].rngs...)
+ }
+ case 112:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:579
+ {
+ if protoDollar[1].ui.val > internal.MaxTag {
+ lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range includes out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].ui.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[1].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[1].ui.val)}
+ r.setRange(protoDollar[1].ui, protoDollar[1].ui)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 113:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:587
+ {
+ if protoDollar[1].ui.val > internal.MaxTag {
+ lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].ui.val, internal.MaxTag))
+ }
+ if protoDollar[3].ui.val > internal.MaxTag {
+ lexError(protolex, protoDollar[3].ui.start(), fmt.Sprintf("range end is out-of-range tag: %d (should be between 0 and %d)", protoDollar[3].ui.val, internal.MaxTag))
+ }
+ if protoDollar[1].ui.val > protoDollar[3].ui.val {
+ lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].ui.val, protoDollar[3].ui.val))
+ }
+ r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[3].ui.val)}
+ r.setRange(protoDollar[1].ui, protoDollar[3].ui)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 114:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:601
+ {
+ if protoDollar[1].ui.val > internal.MaxTag {
+ lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].ui.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].id, st: int32(protoDollar[1].ui.val), en: internal.MaxTag}
+ r.setRange(protoDollar[1].ui, protoDollar[3].id)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 115:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:610
+ {
+ protoVAL.rngs = append(protoDollar[1].rngs, protoDollar[3].rngs...)
+ }
+ case 117:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:615
+ {
+ checkUint64InInt32Range(protolex, protoDollar[1].ui.start(), protoDollar[1].ui.val)
+ r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[1].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[1].ui.val)}
+ r.setRange(protoDollar[1].ui, protoDollar[1].ui)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 118:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:621
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[1].i, st: int32(protoDollar[1].i.val), en: int32(protoDollar[1].i.val)}
+ r.setRange(protoDollar[1].i, protoDollar[1].i)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 119:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:627
+ {
+ checkUint64InInt32Range(protolex, protoDollar[1].ui.start(), protoDollar[1].ui.val)
+ checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+ if protoDollar[1].ui.val > protoDollar[3].ui.val {
+ lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].ui.val, protoDollar[3].ui.val))
+ }
+ r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[3].ui.val)}
+ r.setRange(protoDollar[1].ui, protoDollar[3].ui)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 120:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:637
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+ checkInt64InInt32Range(protolex, protoDollar[3].i.start(), protoDollar[3].i.val)
+ if protoDollar[1].i.val > protoDollar[3].i.val {
+ lexError(protolex, protoDollar[1].i.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].i.val, protoDollar[3].i.val))
+ }
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].i, st: int32(protoDollar[1].i.val), en: int32(protoDollar[3].i.val)}
+ r.setRange(protoDollar[1].i, protoDollar[3].i)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 121:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:647
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+ checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].ui, st: int32(protoDollar[1].i.val), en: int32(protoDollar[3].ui.val)}
+ r.setRange(protoDollar[1].i, protoDollar[3].ui)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 122:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:654
+ {
+ checkUint64InInt32Range(protolex, protoDollar[1].ui.start(), protoDollar[1].ui.val)
+ r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].id, st: int32(protoDollar[1].ui.val), en: math.MaxInt32}
+ r.setRange(protoDollar[1].ui, protoDollar[3].id)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 123:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:660
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].id, st: int32(protoDollar[1].i.val), en: math.MaxInt32}
+ r.setRange(protoDollar[1].i, protoDollar[3].id)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 124:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:667
+ {
+ protoVAL.resvd = &reservedNode{ranges: protoDollar[2].rngs}
+ protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 126:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:673
+ {
+ protoVAL.resvd = &reservedNode{ranges: protoDollar[2].rngs}
+ protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 128:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:679
+ {
+ rsvd := map[string]struct{}{}
+ for _, n := range protoDollar[2].names {
+ if _, ok := rsvd[n.val]; ok {
+ lexError(protolex, n.start(), fmt.Sprintf("name %q is reserved multiple times", n.val))
+ break
+ }
+ rsvd[n.val] = struct{}{}
+ }
+ protoVAL.resvd = &reservedNode{names: protoDollar[2].names}
+ protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 129:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:692
+ {
+ protoVAL.names = append(protoDollar[1].names, protoDollar[3].str)
+ }
+ case 130:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:695
+ {
+ protoVAL.names = []*stringLiteralNode{protoDollar[1].str}
+ }
+ case 131:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:699
+ {
+ c := 0
+ for _, el := range protoDollar[4].enDecls {
+ if el.value != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, protoDollar[1].id.start(), "enums must define at least one value")
+ }
+ protoVAL.en = &enumNode{name: protoDollar[2].id, decls: protoDollar[4].enDecls}
+ protoVAL.en.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 132:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:713
+ {
+ protoVAL.enDecls = append(protoDollar[1].enDecls, protoDollar[2].enDecls...)
+ }
+ case 134:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:717
+ {
+ protoVAL.enDecls = nil
+ }
+ case 135:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:721
+ {
+ protoVAL.enDecls = []*enumElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 136:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:724
+ {
+ protoVAL.enDecls = []*enumElement{{value: protoDollar[1].env}}
+ }
+ case 137:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:727
+ {
+ protoVAL.enDecls = []*enumElement{{reserved: protoDollar[1].resvd}}
+ }
+ case 138:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:730
+ {
+ protoVAL.enDecls = []*enumElement{{empty: protoDollar[1].b}}
+ }
+ case 139:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:734
+ {
+ checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+ protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberP: protoDollar[3].ui}
+ protoVAL.env.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 140:
+ protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:739
+ {
+ checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+ protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberP: protoDollar[3].ui, options: protoDollar[5].opts}
+ protoVAL.env.setRange(protoDollar[1].id, protoDollar[7].b)
+ }
+ case 141:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:744
+ {
+ checkInt64InInt32Range(protolex, protoDollar[3].i.start(), protoDollar[3].i.val)
+ protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberN: protoDollar[3].i}
+ protoVAL.env.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 142:
+ protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:749
+ {
+ checkInt64InInt32Range(protolex, protoDollar[3].i.start(), protoDollar[3].i.val)
+ protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberN: protoDollar[3].i, options: protoDollar[5].opts}
+ protoVAL.env.setRange(protoDollar[1].id, protoDollar[7].b)
+ }
+ case 143:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:755
+ {
+ protoVAL.msg = &messageNode{name: protoDollar[2].id, decls: protoDollar[4].msgDecls}
+ protoVAL.msg.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 144:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:760
+ {
+ protoVAL.msgDecls = append(protoDollar[1].msgDecls, protoDollar[2].msgDecls...)
+ }
+ case 146:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:764
+ {
+ protoVAL.msgDecls = nil
+ }
+ case 147:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:768
+ {
+ protoVAL.msgDecls = []*messageElement{{field: protoDollar[1].fld}}
+ }
+ case 148:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:771
+ {
+ protoVAL.msgDecls = []*messageElement{{enum: protoDollar[1].en}}
+ }
+ case 149:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:774
+ {
+ protoVAL.msgDecls = []*messageElement{{nested: protoDollar[1].msg}}
+ }
+ case 150:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:777
+ {
+ protoVAL.msgDecls = []*messageElement{{extend: protoDollar[1].extend}}
+ }
+ case 151:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:780
+ {
+ protoVAL.msgDecls = []*messageElement{{extensionRange: protoDollar[1].ext}}
+ }
+ case 152:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:783
+ {
+ protoVAL.msgDecls = []*messageElement{{group: protoDollar[1].grp}}
+ }
+ case 153:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:786
+ {
+ protoVAL.msgDecls = []*messageElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 154:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:789
+ {
+ protoVAL.msgDecls = []*messageElement{{oneOf: protoDollar[1].oo}}
+ }
+ case 155:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:792
+ {
+ protoVAL.msgDecls = []*messageElement{{mapField: protoDollar[1].mapFld}}
+ }
+ case 156:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:795
+ {
+ protoVAL.msgDecls = []*messageElement{{reserved: protoDollar[1].resvd}}
+ }
+ case 157:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:798
+ {
+ protoVAL.msgDecls = []*messageElement{{empty: protoDollar[1].b}}
+ }
+ case 158:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:802
+ {
+ c := 0
+ for _, el := range protoDollar[4].extDecls {
+ if el.field != nil || el.group != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, protoDollar[1].id.start(), "extend sections must define at least one extension")
+ }
+ protoVAL.extend = &extendNode{extendee: protoDollar[2].id, decls: protoDollar[4].extDecls}
+ protoVAL.extend.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 159:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:816
+ {
+ protoVAL.extDecls = append(protoDollar[1].extDecls, protoDollar[2].extDecls...)
+ }
+ case 161:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:820
+ {
+ protoVAL.extDecls = nil
+ }
+ case 162:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:824
+ {
+ protoVAL.extDecls = []*extendElement{{field: protoDollar[1].fld}}
+ }
+ case 163:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:827
+ {
+ protoVAL.extDecls = []*extendElement{{group: protoDollar[1].grp}}
+ }
+ case 164:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:830
+ {
+ protoVAL.extDecls = []*extendElement{{empty: protoDollar[1].b}}
+ }
+ case 165:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:834
+ {
+ protoVAL.svc = &serviceNode{name: protoDollar[2].id, decls: protoDollar[4].svcDecls}
+ protoVAL.svc.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 166:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:839
+ {
+ protoVAL.svcDecls = append(protoDollar[1].svcDecls, protoDollar[2].svcDecls...)
+ }
+ case 168:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:843
+ {
+ protoVAL.svcDecls = nil
+ }
+ case 169:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:850
+ {
+ protoVAL.svcDecls = []*serviceElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 170:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:853
+ {
+ protoVAL.svcDecls = []*serviceElement{{rpc: protoDollar[1].mtd}}
+ }
+ case 171:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:856
+ {
+ protoVAL.svcDecls = []*serviceElement{{empty: protoDollar[1].b}}
+ }
+ case 172:
+ protoDollar = protoS[protopt-10 : protopt+1]
+//line proto.y:860
+ {
+ protoVAL.mtd = &methodNode{name: protoDollar[2].id, input: protoDollar[4].rpcType, output: protoDollar[8].rpcType}
+ protoVAL.mtd.setRange(protoDollar[1].id, protoDollar[10].b)
+ }
+ case 173:
+ protoDollar = protoS[protopt-12 : protopt+1]
+//line proto.y:864
+ {
+ protoVAL.mtd = &methodNode{name: protoDollar[2].id, input: protoDollar[4].rpcType, output: protoDollar[8].rpcType, options: protoDollar[11].opts}
+ protoVAL.mtd.setRange(protoDollar[1].id, protoDollar[12].b)
+ }
+ case 174:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:869
+ {
+ protoVAL.rpcType = &rpcTypeNode{msgType: protoDollar[2].id, streamKeyword: protoDollar[1].id}
+ protoVAL.rpcType.setRange(protoDollar[1].id, protoDollar[2].id)
+ }
+ case 175:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:873
+ {
+ protoVAL.rpcType = &rpcTypeNode{msgType: protoDollar[1].id}
+ protoVAL.rpcType.setRange(protoDollar[1].id, protoDollar[1].id)
+ }
+ case 176:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:878
+ {
+ protoVAL.opts = append(protoDollar[1].opts, protoDollar[2].opts...)
+ }
+ case 178:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:882
+ {
+ protoVAL.opts = []*optionNode{}
+ }
+ case 179:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:886
+ {
+ protoVAL.opts = protoDollar[1].opts
+ }
+ case 180:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:889
+ {
+ protoVAL.opts = []*optionNode{}
+ }
+ }
+ goto protostack /* stack new state and value */
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go
new file mode 100644
index 0000000..d0a61c2
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go
@@ -0,0 +1,612 @@
+package protoparse
+
+import (
+ "bytes"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+func (r *parseResult) generateSourceCodeInfo() *dpb.SourceCodeInfo {
+ if r.nodes == nil {
+ // skip files that do not have AST info (these will be files
+ // that came from well-known descriptors, instead of from source)
+ return nil
+ }
+
+ sci := sourceCodeInfo{commentsUsed: map[*comment]struct{}{}}
+ path := make([]int32, 0, 10)
+
+ fn := r.getFileNode(r.fd).(*fileNode)
+ if fn.syntax != nil {
+ sci.newLoc(fn.syntax, append(path, internal.File_syntaxTag))
+ }
+ if fn.pkg != nil {
+ sci.newLoc(fn.pkg, append(path, internal.File_packageTag))
+ }
+ for i, imp := range fn.imports {
+ sci.newLoc(imp, append(path, internal.File_dependencyTag, int32(i)))
+ }
+
+ // file options
+ r.generateSourceCodeInfoForOptions(&sci, fn.decls, func(n interface{}) *optionNode {
+ return n.(*fileElement).option
+ }, r.fd.Options.GetUninterpretedOption(), append(path, internal.File_optionsTag))
+
+ // message types
+ for i, msg := range r.fd.GetMessageType() {
+ r.generateSourceCodeInfoForMessage(&sci, msg, append(path, internal.File_messagesTag, int32(i)))
+ }
+
+ // enum types
+ for i, enum := range r.fd.GetEnumType() {
+ r.generateSourceCodeInfoForEnum(&sci, enum, append(path, internal.File_enumsTag, int32(i)))
+ }
+
+ // extension fields
+ for i, ext := range r.fd.GetExtension() {
+ r.generateSourceCodeInfoForField(&sci, ext, append(path, internal.File_extensionsTag, int32(i)))
+ }
+
+ // services and methods
+ for i, svc := range r.fd.GetService() {
+ n := r.getServiceNode(svc).(*serviceNode)
+ svcPath := append(path, internal.File_servicesTag, int32(i))
+ sci.newLoc(n, svcPath)
+ sci.newLoc(n.name, append(svcPath, internal.Service_nameTag))
+
+ // service options
+ r.generateSourceCodeInfoForOptions(&sci, n.decls, func(n interface{}) *optionNode {
+ return n.(*serviceElement).option
+ }, svc.Options.GetUninterpretedOption(), append(svcPath, internal.Service_optionsTag))
+
+ // methods
+ for j, mtd := range svc.GetMethod() {
+ mn := r.getMethodNode(mtd).(*methodNode)
+ mtdPath := append(svcPath, internal.Service_methodsTag, int32(j))
+ sci.newLoc(mn, mtdPath)
+ sci.newLoc(mn.name, append(mtdPath, internal.Method_nameTag))
+
+ sci.newLoc(mn.input.msgType, append(mtdPath, internal.Method_inputTag))
+ if mn.input.streamKeyword != nil {
+ sci.newLoc(mn.input.streamKeyword, append(mtdPath, internal.Method_inputStreamTag))
+ }
+ sci.newLoc(mn.output.msgType, append(mtdPath, internal.Method_outputTag))
+ if mn.output.streamKeyword != nil {
+ sci.newLoc(mn.output.streamKeyword, append(mtdPath, internal.Method_outputStreamTag))
+ }
+
+ // method options
+ r.generateSourceCodeInfoForOptions(&sci, mn.options, func(n interface{}) *optionNode {
+ return n.(*optionNode)
+ }, mtd.Options.GetUninterpretedOption(), append(mtdPath, internal.Method_optionsTag))
+ }
+ }
+ return &dpb.SourceCodeInfo{Location: sci.generateLocs()}
+}
+
+func (r *parseResult) generateSourceCodeInfoForOptions(sci *sourceCodeInfo, elements interface{}, extractor func(interface{}) *optionNode, uninterp []*dpb.UninterpretedOption, path []int32) {
+ // Known options are option node elements that have a corresponding
+ // path in r.interpretedOptions. We'll do those first.
+ rv := reflect.ValueOf(elements)
+ for i := 0; i < rv.Len(); i++ {
+ on := extractor(rv.Index(i).Interface())
+ if on == nil {
+ continue
+ }
+ optPath := r.interpretedOptions[on]
+ if len(optPath) > 0 {
+ p := path
+ if optPath[0] == -1 {
+ // used by "default" and "json_name" field pseudo-options
+ // to attribute path to parent element (since those are
+ // stored directly on the descriptor, not its options)
+ p = make([]int32, len(path)-1)
+ copy(p, path)
+ optPath = optPath[1:]
+ }
+ sci.newLoc(on, append(p, optPath...))
+ }
+ }
+
+ // Now uninterpreted options
+ for i, uo := range uninterp {
+ optPath := append(path, internal.UninterpretedOptionsTag, int32(i))
+ on := r.getOptionNode(uo).(*optionNode)
+ sci.newLoc(on, optPath)
+
+ var valTag int32
+ switch {
+ case uo.IdentifierValue != nil:
+ valTag = internal.Uninterpreted_identTag
+ case uo.PositiveIntValue != nil:
+ valTag = internal.Uninterpreted_posIntTag
+ case uo.NegativeIntValue != nil:
+ valTag = internal.Uninterpreted_negIntTag
+ case uo.DoubleValue != nil:
+ valTag = internal.Uninterpreted_doubleTag
+ case uo.StringValue != nil:
+ valTag = internal.Uninterpreted_stringTag
+ case uo.AggregateValue != nil:
+ valTag = internal.Uninterpreted_aggregateTag
+ }
+ if valTag != 0 {
+ sci.newLoc(on.val, append(optPath, valTag))
+ }
+
+ for j, n := range uo.Name {
+ optNmPath := append(optPath, internal.Uninterpreted_nameTag, int32(j))
+ nn := r.getOptionNamePartNode(n).(*optionNamePartNode)
+ sci.newLoc(nn, optNmPath)
+ sci.newLoc(nn.text, append(optNmPath, internal.UninterpretedName_nameTag))
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForMessage(sci *sourceCodeInfo, msg *dpb.DescriptorProto, path []int32) {
+ n := r.getMessageNode(msg)
+ sci.newLoc(n, path)
+
+ var decls []*messageElement
+ var resvdNames []*stringLiteralNode
+ switch n := n.(type) {
+ case *messageNode:
+ decls = n.decls
+ resvdNames = n.reserved
+ case *groupNode:
+ decls = n.decls
+ resvdNames = n.reserved
+ }
+ if decls == nil {
+ // map entry so nothing else to do
+ return
+ }
+
+ sci.newLoc(n.messageName(), append(path, internal.Message_nameTag))
+
+ // message options
+ r.generateSourceCodeInfoForOptions(sci, decls, func(n interface{}) *optionNode {
+ return n.(*messageElement).option
+ }, msg.Options.GetUninterpretedOption(), append(path, internal.Message_optionsTag))
+
+ // fields
+ for i, fld := range msg.GetField() {
+ r.generateSourceCodeInfoForField(sci, fld, append(path, internal.Message_fieldsTag, int32(i)))
+ }
+
+ // one-ofs
+ for i, ood := range msg.GetOneofDecl() {
+ oon := r.getOneOfNode(ood).(*oneOfNode)
+ ooPath := append(path, internal.Message_oneOfsTag, int32(i))
+ sci.newLoc(oon, ooPath)
+ sci.newLoc(oon.name, append(ooPath, internal.OneOf_nameTag))
+
+ // one-of options
+ r.generateSourceCodeInfoForOptions(sci, oon.decls, func(n interface{}) *optionNode {
+ return n.(*oneOfElement).option
+ }, ood.Options.GetUninterpretedOption(), append(ooPath, internal.OneOf_optionsTag))
+ }
+
+ // nested messages
+ for i, nm := range msg.GetNestedType() {
+ r.generateSourceCodeInfoForMessage(sci, nm, append(path, internal.Message_nestedMessagesTag, int32(i)))
+ }
+
+ // nested enums
+ for i, enum := range msg.GetEnumType() {
+ r.generateSourceCodeInfoForEnum(sci, enum, append(path, internal.Message_enumsTag, int32(i)))
+ }
+
+ // nested extensions
+ for i, ext := range msg.GetExtension() {
+ r.generateSourceCodeInfoForField(sci, ext, append(path, internal.Message_extensionsTag, int32(i)))
+ }
+
+ // extension ranges
+ for i, er := range msg.ExtensionRange {
+ rangePath := append(path, internal.Message_extensionRangeTag, int32(i))
+ rn := r.getExtensionRangeNode(er).(*rangeNode)
+ sci.newLoc(rn, rangePath)
+ sci.newLoc(rn.stNode, append(rangePath, internal.ExtensionRange_startTag))
+ if rn.stNode != rn.enNode {
+ sci.newLoc(rn.enNode, append(rangePath, internal.ExtensionRange_endTag))
+ }
+ // now we have to find the extension decl and options that correspond to this range :(
+ for _, d := range decls {
+ found := false
+ if d.extensionRange != nil {
+ for _, r := range d.extensionRange.ranges {
+ if rn == r {
+ found = true
+ break
+ }
+ }
+ }
+ if found {
+ r.generateSourceCodeInfoForOptions(sci, d.extensionRange.options, func(n interface{}) *optionNode {
+ return n.(*optionNode)
+ }, er.Options.GetUninterpretedOption(), append(rangePath, internal.ExtensionRange_optionsTag))
+ break
+ }
+ }
+ }
+
+ // reserved ranges
+ for i, rr := range msg.ReservedRange {
+ rangePath := append(path, internal.Message_reservedRangeTag, int32(i))
+ rn := r.getMessageReservedRangeNode(rr).(*rangeNode)
+ sci.newLoc(rn, rangePath)
+ sci.newLoc(rn.stNode, append(rangePath, internal.ReservedRange_startTag))
+ if rn.stNode != rn.enNode {
+ sci.newLoc(rn.enNode, append(rangePath, internal.ReservedRange_endTag))
+ }
+ }
+
+ // reserved names
+ for i, n := range resvdNames {
+ sci.newLoc(n, append(path, internal.Message_reservedNameTag, int32(i)))
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForEnum(sci *sourceCodeInfo, enum *dpb.EnumDescriptorProto, path []int32) {
+ n := r.getEnumNode(enum).(*enumNode)
+ sci.newLoc(n, path)
+ sci.newLoc(n.name, append(path, internal.Enum_nameTag))
+
+ // enum options
+ r.generateSourceCodeInfoForOptions(sci, n.decls, func(n interface{}) *optionNode {
+ return n.(*enumElement).option
+ }, enum.Options.GetUninterpretedOption(), append(path, internal.Enum_optionsTag))
+
+ // enum values
+ for j, ev := range enum.GetValue() {
+ evn := r.getEnumValueNode(ev).(*enumValueNode)
+ evPath := append(path, internal.Enum_valuesTag, int32(j))
+ sci.newLoc(evn, evPath)
+ sci.newLoc(evn.name, append(evPath, internal.EnumVal_nameTag))
+ sci.newLoc(evn.getNumber(), append(evPath, internal.EnumVal_numberTag))
+
+ // enum value options
+ r.generateSourceCodeInfoForOptions(sci, evn.options, func(n interface{}) *optionNode {
+ return n.(*optionNode)
+ }, ev.Options.GetUninterpretedOption(), append(evPath, internal.EnumVal_optionsTag))
+ }
+
+ // reserved ranges
+ for i, rr := range enum.GetReservedRange() {
+ rangePath := append(path, internal.Enum_reservedRangeTag, int32(i))
+ rn := r.getEnumReservedRangeNode(rr).(*rangeNode)
+ sci.newLoc(rn, rangePath)
+ sci.newLoc(rn.stNode, append(rangePath, internal.ReservedRange_startTag))
+ if rn.stNode != rn.enNode {
+ sci.newLoc(rn.enNode, append(rangePath, internal.ReservedRange_endTag))
+ }
+ }
+
+ // reserved names
+ for i, rn := range n.reserved {
+ sci.newLoc(rn, append(path, internal.Enum_reservedNameTag, int32(i)))
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForField(sci *sourceCodeInfo, fld *dpb.FieldDescriptorProto, path []int32) {
+ n := r.getFieldNode(fld)
+
+ isGroup := false
+ var opts []*optionNode
+ var extendee *extendNode
+ switch n := n.(type) {
+ case *fieldNode:
+ opts = n.options
+ extendee = n.extendee
+ case *mapFieldNode:
+ opts = n.options
+ case *groupNode:
+ isGroup = true
+ extendee = n.extendee
+ case *syntheticMapField:
+ // shouldn't get here since we don't recurse into fields from a mapNode
+ // in generateSourceCodeInfoForMessage... but just in case
+ return
+ }
+
+ sci.newLoc(n, path)
+ if !isGroup {
+ sci.newLoc(n.fieldName(), append(path, internal.Field_nameTag))
+ sci.newLoc(n.fieldType(), append(path, internal.Field_typeTag))
+ }
+ if n.fieldLabel() != nil {
+ sci.newLoc(n.fieldLabel(), append(path, internal.Field_labelTag))
+ }
+ sci.newLoc(n.fieldTag(), append(path, internal.Field_numberTag))
+ if extendee != nil {
+ sci.newLoc(extendee.extendee, append(path, internal.Field_extendeeTag))
+ }
+
+ r.generateSourceCodeInfoForOptions(sci, opts, func(n interface{}) *optionNode {
+ return n.(*optionNode)
+ }, fld.Options.GetUninterpretedOption(), append(path, internal.Field_optionsTag))
+}
+
+type sourceCodeInfo struct {
+ locs []*dpb.SourceCodeInfo_Location
+ commentsUsed map[*comment]struct{}
+}
+
+func (sci *sourceCodeInfo) newLoc(n node, path []int32) {
+ leadingComments := n.leadingComments()
+ trailingComments := n.trailingComments()
+ if sci.commentUsed(leadingComments) {
+ leadingComments = nil
+ }
+ if sci.commentUsed(trailingComments) {
+ trailingComments = nil
+ }
+ detached := groupComments(leadingComments)
+ trail := combineComments(trailingComments)
+ var lead *string
+ if len(leadingComments) > 0 && leadingComments[len(leadingComments)-1].end.Line >= n.start().Line-1 {
+ lead = proto.String(detached[len(detached)-1])
+ detached = detached[:len(detached)-1]
+ }
+ dup := make([]int32, len(path))
+ copy(dup, path)
+ var span []int32
+ if n.start().Line == n.end().Line {
+ span = []int32{int32(n.start().Line) - 1, int32(n.start().Col) - 1, int32(n.end().Col) - 1}
+ } else {
+ span = []int32{int32(n.start().Line) - 1, int32(n.start().Col) - 1, int32(n.end().Line) - 1, int32(n.end().Col) - 1}
+ }
+ sci.locs = append(sci.locs, &dpb.SourceCodeInfo_Location{
+ LeadingDetachedComments: detached,
+ LeadingComments: lead,
+ TrailingComments: trail,
+ Path: dup,
+ Span: span,
+ })
+}
+
+func (sci *sourceCodeInfo) commentUsed(c []*comment) bool {
+ if len(c) == 0 {
+ return false
+ }
+ if _, ok := sci.commentsUsed[c[0]]; ok {
+ return true
+ }
+
+ sci.commentsUsed[c[0]] = struct{}{}
+ return false
+}
+
+func groupComments(comments []*comment) []string {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ var groups []string
+ singleLineStyle := comments[0].text[:2] == "//"
+ line := comments[0].end.Line
+ start := 0
+ for i := 1; i < len(comments); i++ {
+ c := comments[i]
+ prevSingleLine := singleLineStyle
+ singleLineStyle = strings.HasPrefix(comments[i].text, "//")
+ if !singleLineStyle || prevSingleLine != singleLineStyle || c.start.Line > line+1 {
+ // new group!
+ groups = append(groups, *combineComments(comments[start:i]))
+ start = i
+ }
+ line = c.end.Line
+ }
+ // don't forget last group
+ groups = append(groups, *combineComments(comments[start:]))
+
+ return groups
+}
+
+func combineComments(comments []*comment) *string {
+ if len(comments) == 0 {
+ return nil
+ }
+ first := true
+ var buf bytes.Buffer
+ for _, c := range comments {
+ if first {
+ first = false
+ } else {
+ buf.WriteByte('\n')
+ }
+ if c.text[:2] == "//" {
+ buf.WriteString(c.text[2:])
+ } else {
+ lines := strings.Split(c.text[2:len(c.text)-2], "\n")
+ first := true
+ for _, l := range lines {
+ if first {
+ first = false
+ } else {
+ buf.WriteByte('\n')
+ }
+
+ // strip a prefix of whitespace followed by '*'
+ j := 0
+ for j < len(l) {
+ if l[j] != ' ' && l[j] != '\t' {
+ break
+ }
+ j++
+ }
+ if j == len(l) {
+ l = ""
+ } else if l[j] == '*' {
+ l = l[j+1:]
+ } else if j > 0 {
+ l = " " + l[j:]
+ }
+
+ buf.WriteString(l)
+ }
+ }
+ }
+ return proto.String(buf.String())
+}
+
+func (sci *sourceCodeInfo) generateLocs() []*dpb.SourceCodeInfo_Location {
+ // generate intermediate locations: paths between root (inclusive) and the
+ // leaf locations already created, these will not have comments but will
+ // have aggregate span, than runs from min(start pos) to max(end pos) for
+ // all descendent paths.
+
+ if len(sci.locs) == 0 {
+ // nothing to generate
+ return nil
+ }
+
+ var root locTrie
+ for _, loc := range sci.locs {
+ root.add(loc.Path, loc)
+ }
+ root.fillIn()
+ locs := make([]*dpb.SourceCodeInfo_Location, 0, root.countLocs())
+ root.aggregate(&locs)
+ // finally, sort the resulting slice by location
+ sort.Slice(locs, func(i, j int) bool {
+ startI, endI := getSpanPositions(locs[i].Span)
+ startJ, endJ := getSpanPositions(locs[j].Span)
+ cmp := compareSlice(startI, startJ)
+ if cmp == 0 {
+ // if start position is the same, sort by end position _decreasing_
+ // (so enclosing locations will appear before leaves)
+ cmp = -compareSlice(endI, endJ)
+ if cmp == 0 {
+ // start and end position are the same? so break ties using path
+ cmp = compareSlice(locs[i].Path, locs[j].Path)
+ }
+ }
+ return cmp < 0
+ })
+ return locs
+}
+
+type locTrie struct {
+ children map[int32]*locTrie
+ loc *dpb.SourceCodeInfo_Location
+}
+
+func (t *locTrie) add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+ if len(path) == 0 {
+ t.loc = loc
+ return
+ }
+ child := t.children[path[0]]
+ if child == nil {
+ if t.children == nil {
+ t.children = map[int32]*locTrie{}
+ }
+ child = &locTrie{}
+ t.children[path[0]] = child
+ }
+ child.add(path[1:], loc)
+}
+
+func (t *locTrie) fillIn() {
+ var path []int32
+ var start, end []int32
+ for _, child := range t.children {
+ // recurse
+ child.fillIn()
+ if t.loc == nil {
+ // maintain min(start) and max(end) so we can
+ // populate t.loc below
+ childStart, childEnd := getSpanPositions(child.loc.Span)
+
+ if start == nil {
+ if path == nil {
+ path = child.loc.Path[:len(child.loc.Path)-1]
+ }
+ start = childStart
+ end = childEnd
+ } else {
+ if compareSlice(childStart, start) < 0 {
+ start = childStart
+ }
+ if compareSlice(childEnd, end) > 0 {
+ end = childEnd
+ }
+ }
+ }
+ }
+
+ if t.loc == nil {
+ var span []int32
+ // we don't use append below because we want a new slice
+ // that doesn't share underlying buffer with spans from
+ // any other location
+ if start[0] == end[0] {
+ span = []int32{start[0], start[1], end[1]}
+ } else {
+ span = []int32{start[0], start[1], end[0], end[1]}
+ }
+ t.loc = &dpb.SourceCodeInfo_Location{
+ Path: path,
+ Span: span,
+ }
+ }
+}
+
+func (t *locTrie) countLocs() int {
+ count := 0
+ if t.loc != nil {
+ count = 1
+ }
+ for _, ch := range t.children {
+ count += ch.countLocs()
+ }
+ return count
+}
+
+func (t *locTrie) aggregate(dest *[]*dpb.SourceCodeInfo_Location) {
+ if t.loc != nil {
+ *dest = append(*dest, t.loc)
+ }
+ for _, child := range t.children {
+ child.aggregate(dest)
+ }
+}
+
+func getSpanPositions(span []int32) (start, end []int32) {
+ start = span[:2]
+ if len(span) == 3 {
+ end = []int32{span[0], span[2]}
+ } else {
+ end = span[2:]
+ }
+ return
+}
+
+func compareSlice(a, b []int32) int {
+ end := len(a)
+ if len(b) < end {
+ end = len(b)
+ }
+ for i := 0; i < end; i++ {
+ if a[i] < b[i] {
+ return -1
+ }
+ if a[i] > b[i] {
+ return 1
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(a) > len(b) {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go
new file mode 100644
index 0000000..59bcdd3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go
@@ -0,0 +1,49 @@
+package protoparse
+
+import (
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ // link in packages that include the standard protos included with protoc
+ _ "github.com/golang/protobuf/protoc-gen-go/plugin"
+ _ "github.com/golang/protobuf/ptypes/any"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ _ "github.com/golang/protobuf/ptypes/empty"
+ _ "github.com/golang/protobuf/ptypes/struct"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
+ _ "github.com/golang/protobuf/ptypes/wrappers"
+ _ "google.golang.org/genproto/protobuf/api"
+ _ "google.golang.org/genproto/protobuf/field_mask"
+ _ "google.golang.org/genproto/protobuf/ptype"
+ _ "google.golang.org/genproto/protobuf/source_context"
+
+ "github.com/jhump/protoreflect/internal"
+)
+
+// All files that are included with protoc are also included with this package
+// so that clients do not need to explicitly supply a copy of these protos (just
+// like callers of protoc do not need to supply them).
+var standardImports map[string]*dpb.FileDescriptorProto
+
+func init() {
+ standardFilenames := []string{
+ "google/protobuf/any.proto",
+ "google/protobuf/api.proto",
+ "google/protobuf/compiler/plugin.proto",
+ "google/protobuf/descriptor.proto",
+ "google/protobuf/duration.proto",
+ "google/protobuf/empty.proto",
+ "google/protobuf/field_mask.proto",
+ "google/protobuf/source_context.proto",
+ "google/protobuf/struct.proto",
+ "google/protobuf/timestamp.proto",
+ "google/protobuf/type.proto",
+ "google/protobuf/wrappers.proto",
+ }
+
+ standardImports = map[string]*dpb.FileDescriptorProto{}
+ for _, fn := range standardFilenames {
+ fd, err := internal.LoadFileDescriptor(fn)
+ if err == nil {
+ standardImports[fn] = fd
+ }
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
new file mode 100644
index 0000000..c03fd64
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
@@ -0,0 +1,1696 @@
+---- desc_test_comments.proto ----
+
+
+:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:119:2
+
+
+ > syntax:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:8:19
+ Leading detached comment [0]:
+ This is the first detached comment for the syntax.
+ Leading detached comment [1]:
+
+ This is a second detached comment.
+
+ Leading detached comment [2]:
+ This is a third.
+ Leading comments:
+ Syntax comment...
+ Trailing comments:
+ Syntax trailer.
+
+
+ > package:
+desc_test_comments.proto:12:1
+desc_test_comments.proto:12:17
+ Leading comments:
+ And now the package declaration
+
+
+ > options:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+
+
+ > options > go_package:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+ Leading comments:
+ option comments FTW!!!
+
+
+ > dependency:
+desc_test_comments.proto:17:1
+desc_test_comments.proto:18:34
+
+
+ > dependency[0]:
+desc_test_comments.proto:17:1
+desc_test_comments.proto:17:38
+
+
+ > dependency[1]:
+desc_test_comments.proto:18:1
+desc_test_comments.proto:18:34
+
+
+ > message_type:
+desc_test_comments.proto:25:1
+desc_test_comments.proto:89:2
+
+
+ > message_type[0]:
+desc_test_comments.proto:25:1
+desc_test_comments.proto:89:2
+ Leading detached comment [0]:
+ Multiple white space lines (like above) cannot
+ be preserved...
+ Leading comments:
+ We need a request for our RPC service below.
+ Trailing comments:
+ And next we'll need some extensions...
+
+
+ > message_type[0] > name:
+desc_test_comments.proto:25:68
+desc_test_comments.proto:25:75
+ Leading detached comment [0]:
+ detached message name
+ Leading comments:
+ request with a capital R
+ Trailing comments:
+ trailer
+
+
+ > message_type[0] > options:
+desc_test_comments.proto:26:3
+desc_test_comments.proto:35:54
+
+
+ > message_type[0] > options > deprecated:
+desc_test_comments.proto:26:3
+desc_test_comments.proto:26:28
+
+
+ > message_type[0] > field:
+desc_test_comments.proto:29:2
+desc_test_comments.proto:66:3
+
+
+ > message_type[0] > field[0]:
+desc_test_comments.proto:29:2
+desc_test_comments.proto:32:92
+ Leading comments:
+ A field comment
+ Trailing comments:
+ field trailer #1...
+
+
+ > message_type[0] > field[0] > label:
+desc_test_comments.proto:29:2
+desc_test_comments.proto:29:10
+
+
+ > message_type[0] > field[0] > type:
+desc_test_comments.proto:29:11
+desc_test_comments.proto:29:16
+
+
+ > message_type[0] > field[0] > name:
+desc_test_comments.proto:29:17
+desc_test_comments.proto:29:20
+
+
+ > message_type[0] > field[0] > number:
+desc_test_comments.proto:29:63
+desc_test_comments.proto:29:64
+ Leading detached comment [0]:
+ detached tag
+ Leading comments:
+ tag numero uno
+ Trailing comments:
+ tag trailer
+ that spans multiple lines...
+ more than two.
+
+
+ > message_type[0] > field[0] > options:
+desc_test_comments.proto:32:5
+desc_test_comments.proto:32:90
+
+
+ > message_type[0] > field[0] > options > packed:
+desc_test_comments.proto:32:5
+desc_test_comments.proto:32:16
+
+
+ > message_type[0] > field[0] > json_name:
+desc_test_comments.proto:32:18
+desc_test_comments.proto:32:35
+
+
+ > message_type[0] > field[0] > options > ffubar:
+desc_test_comments.proto:32:37
+desc_test_comments.proto:32:62
+
+
+ > message_type[0] > field[0] > options > ffubar[0]:
+desc_test_comments.proto:32:37
+desc_test_comments.proto:32:62
+
+
+ > message_type[0] > field[0] > options > ffubarb:
+desc_test_comments.proto:32:64
+desc_test_comments.proto:32:90
+
+
+ > message_type[0] > options > mfubar:
+desc_test_comments.proto:35:20
+desc_test_comments.proto:35:54
+ Leading comments:
+ lead mfubar
+ Trailing comments:
+ trailing mfubar
+
+
+ > message_type[0] > field[1]:
+desc_test_comments.proto:42:22
+desc_test_comments.proto:43:63
+ Leading detached comment [0]:
+ some detached comments
+ Leading detached comment [1]:
+ some detached comments
+ Leading detached comment [2]:
+ Another field comment
+ Leading comments:
+ label comment
+
+
+ > message_type[0] > field[1] > label:
+desc_test_comments.proto:42:22
+desc_test_comments.proto:42:30
+
+
+ > message_type[0] > field[1] > type:
+desc_test_comments.proto:42:50
+desc_test_comments.proto:42:56
+ Leading comments:
+ type comment
+
+
+ > message_type[0] > field[1] > name:
+desc_test_comments.proto:42:76
+desc_test_comments.proto:42:80
+ Leading comments:
+ name comment
+
+
+ > message_type[0] > field[1] > number:
+desc_test_comments.proto:42:83
+desc_test_comments.proto:42:84
+
+
+ > message_type[0] > field[1] > default_value:
+desc_test_comments.proto:43:23
+desc_test_comments.proto:43:40
+ Leading comments:
+ default lead
+ Trailing comments:
+ default trail
+
+
+ > message_type[0] > extension_range:
+desc_test_comments.proto:46:13
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[0]:
+desc_test_comments.proto:46:13
+desc_test_comments.proto:46:23
+
+
+ > message_type[0] > extension_range[0] > start:
+desc_test_comments.proto:46:13
+desc_test_comments.proto:46:16
+
+
+ > message_type[0] > extension_range[0] > end:
+desc_test_comments.proto:46:20
+desc_test_comments.proto:46:23
+
+
+ > message_type[0] > extension_range[1]:
+desc_test_comments.proto:47:13
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[1] > start:
+desc_test_comments.proto:47:13
+desc_test_comments.proto:47:16
+
+
+ > message_type[0] > extension_range[1] > end:
+desc_test_comments.proto:47:20
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[1] > options:
+desc_test_comments.proto:47:25
+desc_test_comments.proto:47:100
+
+
+ > message_type[0] > extension_range[1] > options > exfubarb:
+desc_test_comments.proto:47:25
+desc_test_comments.proto:47:67
+
+
+ > message_type[0] > extension_range[1] > options > exfubar:
+desc_test_comments.proto:47:69
+desc_test_comments.proto:47:100
+
+
+ > message_type[0] > extension_range[1] > options > exfubar[0]:
+desc_test_comments.proto:47:69
+desc_test_comments.proto:47:100
+
+
+ > message_type[0] > reserved_range:
+desc_test_comments.proto:51:50
+desc_test_comments.proto:51:68
+
+
+ > message_type[0] > reserved_range[0]:
+desc_test_comments.proto:51:50
+desc_test_comments.proto:51:58
+
+
+ > message_type[0] > reserved_range[0] > start:
+desc_test_comments.proto:51:50
+desc_test_comments.proto:51:52
+
+
+ > message_type[0] > reserved_range[0] > end:
+desc_test_comments.proto:51:56
+desc_test_comments.proto:51:58
+
+
+ > message_type[0] > reserved_range[1]:
+desc_test_comments.proto:51:60
+desc_test_comments.proto:51:68
+
+
+ > message_type[0] > reserved_range[1] > start:
+desc_test_comments.proto:51:60
+desc_test_comments.proto:51:62
+
+
+ > message_type[0] > reserved_range[1] > end:
+desc_test_comments.proto:51:66
+desc_test_comments.proto:51:68
+
+
+ > message_type[0] > reserved_name:
+desc_test_comments.proto:52:11
+desc_test_comments.proto:52:30
+
+
+ > message_type[0] > reserved_name[0]:
+desc_test_comments.proto:52:11
+desc_test_comments.proto:52:16
+
+
+ > message_type[0] > reserved_name[1]:
+desc_test_comments.proto:52:18
+desc_test_comments.proto:52:23
+
+
+ > message_type[0] > reserved_name[2]:
+desc_test_comments.proto:52:25
+desc_test_comments.proto:52:30
+
+
+ > message_type[0] > field[2]:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:66:3
+ Leading comments:
+ Group comment
+
+
+ > message_type[0] > nested_type:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:66:3
+
+
+ > message_type[0] > nested_type[0]:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:66:3
+
+
+ > message_type[0] > field[2] > label:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:55:10
+
+
+ > message_type[0] > nested_type[0] > name:
+desc_test_comments.proto:55:34
+desc_test_comments.proto:55:40
+ Leading comments:
+ group name
+
+
+ > message_type[0] > field[2] > number:
+desc_test_comments.proto:55:43
+desc_test_comments.proto:55:44
+
+
+ > message_type[0] > nested_type[0] > options:
+desc_test_comments.proto:56:3
+desc_test_comments.proto:61:50
+
+
+ > message_type[0] > nested_type[0] > options > mfubar:
+desc_test_comments.proto:56:3
+desc_test_comments.proto:56:38
+
+
+ > message_type[0] > nested_type[0] > field:
+desc_test_comments.proto:58:3
+desc_test_comments.proto:64:27
+
+
+ > message_type[0] > nested_type[0] > field[0]:
+desc_test_comments.proto:58:3
+desc_test_comments.proto:58:27
+
+
+ > message_type[0] > nested_type[0] > field[0] > label:
+desc_test_comments.proto:58:3
+desc_test_comments.proto:58:11
+
+
+ > message_type[0] > nested_type[0] > field[0] > type:
+desc_test_comments.proto:58:12
+desc_test_comments.proto:58:18
+
+
+ > message_type[0] > nested_type[0] > field[0] > name:
+desc_test_comments.proto:58:19
+desc_test_comments.proto:58:22
+
+
+ > message_type[0] > nested_type[0] > field[0] > number:
+desc_test_comments.proto:58:25
+desc_test_comments.proto:58:26
+
+
+ > message_type[0] > nested_type[0] > field[1]:
+desc_test_comments.proto:59:3
+desc_test_comments.proto:59:26
+
+
+ > message_type[0] > nested_type[0] > field[1] > label:
+desc_test_comments.proto:59:3
+desc_test_comments.proto:59:11
+
+
+ > message_type[0] > nested_type[0] > field[1] > type:
+desc_test_comments.proto:59:12
+desc_test_comments.proto:59:17
+
+
+ > message_type[0] > nested_type[0] > field[1] > name:
+desc_test_comments.proto:59:18
+desc_test_comments.proto:59:21
+
+
+ > message_type[0] > nested_type[0] > field[1] > number:
+desc_test_comments.proto:59:24
+desc_test_comments.proto:59:25
+
+
+ > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor:
+desc_test_comments.proto:61:3
+desc_test_comments.proto:61:50
+
+
+ > message_type[0] > nested_type[0] > field[2]:
+desc_test_comments.proto:64:3
+desc_test_comments.proto:64:27
+ Leading comments:
+ Leading comment...
+ Trailing comments:
+ Trailing comment...
+
+
+ > message_type[0] > nested_type[0] > field[2] > label:
+desc_test_comments.proto:64:3
+desc_test_comments.proto:64:11
+
+
+ > message_type[0] > nested_type[0] > field[2] > type:
+desc_test_comments.proto:64:12
+desc_test_comments.proto:64:18
+
+
+ > message_type[0] > nested_type[0] > field[2] > name:
+desc_test_comments.proto:64:19
+desc_test_comments.proto:64:22
+
+
+ > message_type[0] > nested_type[0] > field[2] > number:
+desc_test_comments.proto:64:25
+desc_test_comments.proto:64:26
+
+
+ > message_type[0] > enum_type:
+desc_test_comments.proto:68:2
+desc_test_comments.proto:88:3
+
+
+ > message_type[0] > enum_type[0]:
+desc_test_comments.proto:68:2
+desc_test_comments.proto:88:3
+
+
+ > message_type[0] > enum_type[0] > name:
+desc_test_comments.proto:68:7
+desc_test_comments.proto:68:22
+ Trailing comments:
+ "super"!
+
+
+ > message_type[0] > enum_type[0] > value:
+desc_test_comments.proto:72:3
+desc_test_comments.proto:85:17
+
+
+ > message_type[0] > enum_type[0] > value[0]:
+desc_test_comments.proto:72:3
+desc_test_comments.proto:72:72
+
+
+ > message_type[0] > enum_type[0] > value[0] > name:
+desc_test_comments.proto:72:3
+desc_test_comments.proto:72:8
+
+
+ > message_type[0] > enum_type[0] > value[0] > number:
+desc_test_comments.proto:72:11
+desc_test_comments.proto:72:12
+
+
+ > message_type[0] > enum_type[0] > value[0] > options:
+desc_test_comments.proto:72:14
+desc_test_comments.proto:72:70
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > evfubars:
+desc_test_comments.proto:72:14
+desc_test_comments.proto:72:42
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > evfubar:
+desc_test_comments.proto:72:44
+desc_test_comments.proto:72:70
+
+
+ > message_type[0] > enum_type[0] > value[1]:
+desc_test_comments.proto:73:3
+desc_test_comments.proto:73:86
+
+
+ > message_type[0] > enum_type[0] > value[1] > name:
+desc_test_comments.proto:73:3
+desc_test_comments.proto:73:8
+
+
+ > message_type[0] > enum_type[0] > value[1] > number:
+desc_test_comments.proto:73:11
+desc_test_comments.proto:73:12
+
+
+ > message_type[0] > enum_type[0] > value[1] > options:
+desc_test_comments.proto:73:15
+desc_test_comments.proto:73:84
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > evfubaruf:
+desc_test_comments.proto:73:15
+desc_test_comments.proto:73:43
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > evfubaru:
+desc_test_comments.proto:73:59
+desc_test_comments.proto:73:84
+
+
+ > message_type[0] > enum_type[0] > value[2]:
+desc_test_comments.proto:74:3
+desc_test_comments.proto:74:13
+
+
+ > message_type[0] > enum_type[0] > value[2] > name:
+desc_test_comments.proto:74:3
+desc_test_comments.proto:74:8
+
+
+ > message_type[0] > enum_type[0] > value[2] > number:
+desc_test_comments.proto:74:11
+desc_test_comments.proto:74:12
+
+
+ > message_type[0] > enum_type[0] > value[3]:
+desc_test_comments.proto:75:3
+desc_test_comments.proto:75:14
+
+
+ > message_type[0] > enum_type[0] > value[3] > name:
+desc_test_comments.proto:75:3
+desc_test_comments.proto:75:9
+
+
+ > message_type[0] > enum_type[0] > value[3] > number:
+desc_test_comments.proto:75:12
+desc_test_comments.proto:75:13
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:77:3
+desc_test_comments.proto:87:36
+
+
+ > message_type[0] > enum_type[0] > options > efubars:
+desc_test_comments.proto:77:3
+desc_test_comments.proto:77:38
+
+
+ > message_type[0] > enum_type[0] > value[4]:
+desc_test_comments.proto:79:3
+desc_test_comments.proto:79:13
+
+
+ > message_type[0] > enum_type[0] > value[4] > name:
+desc_test_comments.proto:79:3
+desc_test_comments.proto:79:8
+
+
+ > message_type[0] > enum_type[0] > value[4] > number:
+desc_test_comments.proto:79:11
+desc_test_comments.proto:79:12
+
+
+ > message_type[0] > enum_type[0] > value[5]:
+desc_test_comments.proto:80:3
+desc_test_comments.proto:80:15
+
+
+ > message_type[0] > enum_type[0] > value[5] > name:
+desc_test_comments.proto:80:3
+desc_test_comments.proto:80:10
+
+
+ > message_type[0] > enum_type[0] > value[5] > number:
+desc_test_comments.proto:80:13
+desc_test_comments.proto:80:14
+
+
+ > message_type[0] > enum_type[0] > value[6]:
+desc_test_comments.proto:81:3
+desc_test_comments.proto:81:46
+
+
+ > message_type[0] > enum_type[0] > value[6] > name:
+desc_test_comments.proto:81:3
+desc_test_comments.proto:81:10
+
+
+ > message_type[0] > enum_type[0] > value[6] > number:
+desc_test_comments.proto:81:13
+desc_test_comments.proto:81:14
+
+
+ > message_type[0] > enum_type[0] > value[6] > options:
+desc_test_comments.proto:81:16
+desc_test_comments.proto:81:44
+
+
+ > message_type[0] > enum_type[0] > value[6] > options > evfubarsf:
+desc_test_comments.proto:81:16
+desc_test_comments.proto:81:44
+
+
+ > message_type[0] > enum_type[0] > value[7]:
+desc_test_comments.proto:82:3
+desc_test_comments.proto:82:14
+
+
+ > message_type[0] > enum_type[0] > value[7] > name:
+desc_test_comments.proto:82:3
+desc_test_comments.proto:82:9
+
+
+ > message_type[0] > enum_type[0] > value[7] > number:
+desc_test_comments.proto:82:12
+desc_test_comments.proto:82:13
+
+
+ > message_type[0] > enum_type[0] > value[8]:
+desc_test_comments.proto:83:3
+desc_test_comments.proto:83:17
+
+
+ > message_type[0] > enum_type[0] > value[8] > name:
+desc_test_comments.proto:83:3
+desc_test_comments.proto:83:12
+
+
+ > message_type[0] > enum_type[0] > value[8] > number:
+desc_test_comments.proto:83:15
+desc_test_comments.proto:83:16
+
+
+ > message_type[0] > enum_type[0] > value[9]:
+desc_test_comments.proto:84:3
+desc_test_comments.proto:84:13
+
+
+ > message_type[0] > enum_type[0] > value[9] > name:
+desc_test_comments.proto:84:3
+desc_test_comments.proto:84:8
+
+
+ > message_type[0] > enum_type[0] > value[9] > number:
+desc_test_comments.proto:84:11
+desc_test_comments.proto:84:12
+
+
+ > message_type[0] > enum_type[0] > value[10]:
+desc_test_comments.proto:85:3
+desc_test_comments.proto:85:17
+
+
+ > message_type[0] > enum_type[0] > value[10] > name:
+desc_test_comments.proto:85:3
+desc_test_comments.proto:85:9
+
+
+ > message_type[0] > enum_type[0] > value[10] > number:
+desc_test_comments.proto:85:12
+desc_test_comments.proto:85:16
+
+
+ > message_type[0] > enum_type[0] > options > efubar:
+desc_test_comments.proto:87:3
+desc_test_comments.proto:87:36
+
+
+ > extension[0] > extendee:
+desc_test_comments.proto:94:1
+desc_test_comments.proto:94:8
+ Leading comments:
+ extendee comment
+
+
+ > extension[1] > extendee:
+desc_test_comments.proto:94:1
+desc_test_comments.proto:94:8
+
+
+ > extension:
+desc_test_comments.proto:96:2
+desc_test_comments.proto:98:30
+
+
+ > extension[0]:
+desc_test_comments.proto:96:2
+desc_test_comments.proto:96:30
+ Leading comments:
+ comment for guid1
+
+
+ > extension[0] > label:
+desc_test_comments.proto:96:2
+desc_test_comments.proto:96:10
+
+
+ > extension[0] > type:
+desc_test_comments.proto:96:11
+desc_test_comments.proto:96:17
+
+
+ > extension[0] > name:
+desc_test_comments.proto:96:18
+desc_test_comments.proto:96:23
+
+
+ > extension[0] > number:
+desc_test_comments.proto:96:26
+desc_test_comments.proto:96:29
+
+
+ > extension[1]:
+desc_test_comments.proto:98:2
+desc_test_comments.proto:98:30
+ Leading comments:
+ ... and a comment for guid2
+
+
+ > extension[1] > label:
+desc_test_comments.proto:98:2
+desc_test_comments.proto:98:10
+
+
+ > extension[1] > type:
+desc_test_comments.proto:98:11
+desc_test_comments.proto:98:17
+
+
+ > extension[1] > name:
+desc_test_comments.proto:98:18
+desc_test_comments.proto:98:23
+
+
+ > extension[1] > number:
+desc_test_comments.proto:98:26
+desc_test_comments.proto:98:29
+
+
+ > service:
+desc_test_comments.proto:103:1
+desc_test_comments.proto:119:2
+
+
+ > service[0]:
+desc_test_comments.proto:103:1
+desc_test_comments.proto:119:2
+ Leading comments:
+ Service comment
+ Trailing comments:
+ service trailer
+
+
+ > service[0] > name:
+desc_test_comments.proto:103:28
+desc_test_comments.proto:103:38
+ Leading comments:
+ service name
+
+
+ > service[0] > options:
+desc_test_comments.proto:104:2
+desc_test_comments.proto:108:38
+
+
+ > service[0] > options > sfubar:
+desc_test_comments.proto:104:2
+desc_test_comments.proto:105:40
+
+
+ > service[0] > options > sfubar > id:
+desc_test_comments.proto:104:2
+desc_test_comments.proto:104:36
+
+
+ > service[0] > options > sfubar > name:
+desc_test_comments.proto:105:2
+desc_test_comments.proto:105:40
+
+
+ > service[0] > options > deprecated:
+desc_test_comments.proto:106:2
+desc_test_comments.proto:106:28
+
+
+ > service[0] > options > sfubare:
+desc_test_comments.proto:108:2
+desc_test_comments.proto:108:38
+
+
+ > service[0] > method:
+desc_test_comments.proto:111:2
+desc_test_comments.proto:118:3
+
+
+ > service[0] > method[0]:
+desc_test_comments.proto:111:2
+desc_test_comments.proto:112:70
+ Leading comments:
+ Method comment
+
+
+ > service[0] > method[0] > name:
+desc_test_comments.proto:111:21
+desc_test_comments.proto:111:33
+ Leading comments:
+ rpc name
+ Trailing comments:
+ comment A
+
+
+ > service[0] > method[0] > client_streaming:
+desc_test_comments.proto:111:66
+desc_test_comments.proto:111:72
+ Leading comments:
+ comment B
+
+
+ > service[0] > method[0] > input_type:
+desc_test_comments.proto:111:89
+desc_test_comments.proto:111:96
+ Leading comments:
+ comment C
+
+
+ > service[0] > method[0] > output_type:
+desc_test_comments.proto:112:43
+desc_test_comments.proto:112:50
+ Leading comments:
+comment E
+
+
+ > service[0] > method[1]:
+desc_test_comments.proto:114:2
+desc_test_comments.proto:118:3
+
+
+ > service[0] > method[1] > name:
+desc_test_comments.proto:114:6
+desc_test_comments.proto:114:14
+
+
+ > service[0] > method[1] > input_type:
+desc_test_comments.proto:114:16
+desc_test_comments.proto:114:23
+
+
+ > service[0] > method[1] > output_type:
+desc_test_comments.proto:114:34
+desc_test_comments.proto:114:55
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:115:3
+desc_test_comments.proto:117:42
+
+
+ > service[0] > method[1] > options > deprecated:
+desc_test_comments.proto:115:3
+desc_test_comments.proto:115:28
+
+
+ > service[0] > method[1] > options > mtfubar:
+desc_test_comments.proto:116:3
+desc_test_comments.proto:116:39
+
+
+ > service[0] > method[1] > options > mtfubar[0]:
+desc_test_comments.proto:116:3
+desc_test_comments.proto:116:39
+
+
+ > service[0] > method[1] > options > mtfubard:
+desc_test_comments.proto:117:3
+desc_test_comments.proto:117:42
+---- desc_test_options.proto ----
+
+
+:
+desc_test_options.proto:1:1
+desc_test_options.proto:62:34
+
+
+ > syntax:
+desc_test_options.proto:1:1
+desc_test_options.proto:1:19
+
+
+ > options:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > options > go_package:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > package:
+desc_test_options.proto:5:1
+desc_test_options.proto:5:20
+
+
+ > dependency:
+desc_test_options.proto:7:1
+desc_test_options.proto:7:43
+
+
+ > dependency[0]:
+desc_test_options.proto:7:1
+desc_test_options.proto:7:43
+
+
+ > extension[0] > extendee:
+desc_test_options.proto:9:8
+desc_test_options.proto:9:38
+
+
+ > extension:
+desc_test_options.proto:10:2
+desc_test_options.proto:62:34
+
+
+ > extension[0]:
+desc_test_options.proto:10:2
+desc_test_options.proto:10:31
+
+
+ > extension[0] > label:
+desc_test_options.proto:10:2
+desc_test_options.proto:10:10
+
+
+ > extension[0] > type:
+desc_test_options.proto:10:11
+desc_test_options.proto:10:15
+
+
+ > extension[0] > name:
+desc_test_options.proto:10:16
+desc_test_options.proto:10:22
+
+
+ > extension[0] > number:
+desc_test_options.proto:10:25
+desc_test_options.proto:10:30
+
+
+ > extension[1] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[2] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[1]:
+desc_test_options.proto:14:2
+desc_test_options.proto:14:33
+
+
+ > extension[1] > label:
+desc_test_options.proto:14:2
+desc_test_options.proto:14:10
+
+
+ > extension[1] > type:
+desc_test_options.proto:14:11
+desc_test_options.proto:14:17
+
+
+ > extension[1] > name:
+desc_test_options.proto:14:18
+desc_test_options.proto:14:24
+
+
+ > extension[1] > number:
+desc_test_options.proto:14:27
+desc_test_options.proto:14:32
+
+
+ > extension[2]:
+desc_test_options.proto:15:2
+desc_test_options.proto:15:33
+
+
+ > extension[2] > label:
+desc_test_options.proto:15:2
+desc_test_options.proto:15:10
+
+
+ > extension[2] > type:
+desc_test_options.proto:15:11
+desc_test_options.proto:15:16
+
+
+ > extension[2] > name:
+desc_test_options.proto:15:17
+desc_test_options.proto:15:24
+
+
+ > extension[2] > number:
+desc_test_options.proto:15:27
+desc_test_options.proto:15:32
+
+
+ > extension[3] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[4] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[5] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[6] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[7] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[3]:
+desc_test_options.proto:19:2
+desc_test_options.proto:19:32
+
+
+ > extension[3] > label:
+desc_test_options.proto:19:2
+desc_test_options.proto:19:10
+
+
+ > extension[3] > type:
+desc_test_options.proto:19:11
+desc_test_options.proto:19:16
+
+
+ > extension[3] > name:
+desc_test_options.proto:19:17
+desc_test_options.proto:19:23
+
+
+ > extension[3] > number:
+desc_test_options.proto:19:26
+desc_test_options.proto:19:31
+
+
+ > extension[4]:
+desc_test_options.proto:20:2
+desc_test_options.proto:20:34
+
+
+ > extension[4] > label:
+desc_test_options.proto:20:2
+desc_test_options.proto:20:10
+
+
+ > extension[4] > type:
+desc_test_options.proto:20:11
+desc_test_options.proto:20:17
+
+
+ > extension[4] > name:
+desc_test_options.proto:20:18
+desc_test_options.proto:20:25
+
+
+ > extension[4] > number:
+desc_test_options.proto:20:28
+desc_test_options.proto:20:33
+
+
+ > extension[5]:
+desc_test_options.proto:21:2
+desc_test_options.proto:21:37
+
+
+ > extension[5] > label:
+desc_test_options.proto:21:2
+desc_test_options.proto:21:10
+
+
+ > extension[5] > type:
+desc_test_options.proto:21:11
+desc_test_options.proto:21:19
+
+
+ > extension[5] > name:
+desc_test_options.proto:21:20
+desc_test_options.proto:21:28
+
+
+ > extension[5] > number:
+desc_test_options.proto:21:31
+desc_test_options.proto:21:36
+
+
+ > extension[6]:
+desc_test_options.proto:22:2
+desc_test_options.proto:22:34
+
+
+ > extension[6] > label:
+desc_test_options.proto:22:2
+desc_test_options.proto:22:10
+
+
+ > extension[6] > type:
+desc_test_options.proto:22:11
+desc_test_options.proto:22:17
+
+
+ > extension[6] > name:
+desc_test_options.proto:22:18
+desc_test_options.proto:22:25
+
+
+ > extension[6] > number:
+desc_test_options.proto:22:28
+desc_test_options.proto:22:33
+
+
+ > extension[7]:
+desc_test_options.proto:23:2
+desc_test_options.proto:23:36
+
+
+ > extension[7] > label:
+desc_test_options.proto:23:2
+desc_test_options.proto:23:10
+
+
+ > extension[7] > type:
+desc_test_options.proto:23:11
+desc_test_options.proto:23:18
+
+
+ > extension[7] > name:
+desc_test_options.proto:23:19
+desc_test_options.proto:23:27
+
+
+ > extension[7] > number:
+desc_test_options.proto:23:30
+desc_test_options.proto:23:35
+
+
+ > extension[8] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[9] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[10] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[11] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[12] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[8]:
+desc_test_options.proto:27:2
+desc_test_options.proto:27:33
+
+
+ > extension[8] > label:
+desc_test_options.proto:27:2
+desc_test_options.proto:27:10
+
+
+ > extension[8] > type:
+desc_test_options.proto:27:11
+desc_test_options.proto:27:16
+
+
+ > extension[8] > name:
+desc_test_options.proto:27:17
+desc_test_options.proto:27:24
+
+
+ > extension[8] > number:
+desc_test_options.proto:27:27
+desc_test_options.proto:27:32
+
+
+ > extension[9]:
+desc_test_options.proto:28:2
+desc_test_options.proto:28:35
+
+
+ > extension[9] > label:
+desc_test_options.proto:28:2
+desc_test_options.proto:28:10
+
+
+ > extension[9] > type:
+desc_test_options.proto:28:11
+desc_test_options.proto:28:17
+
+
+ > extension[9] > name:
+desc_test_options.proto:28:18
+desc_test_options.proto:28:26
+
+
+ > extension[9] > number:
+desc_test_options.proto:28:29
+desc_test_options.proto:28:34
+
+
+ > extension[10]:
+desc_test_options.proto:29:2
+desc_test_options.proto:29:38
+
+
+ > extension[10] > label:
+desc_test_options.proto:29:2
+desc_test_options.proto:29:10
+
+
+ > extension[10] > type:
+desc_test_options.proto:29:11
+desc_test_options.proto:29:19
+
+
+ > extension[10] > name:
+desc_test_options.proto:29:20
+desc_test_options.proto:29:29
+
+
+ > extension[10] > number:
+desc_test_options.proto:29:32
+desc_test_options.proto:29:37
+
+
+ > extension[11]:
+desc_test_options.proto:30:2
+desc_test_options.proto:30:35
+
+
+ > extension[11] > label:
+desc_test_options.proto:30:2
+desc_test_options.proto:30:10
+
+
+ > extension[11] > type:
+desc_test_options.proto:30:11
+desc_test_options.proto:30:17
+
+
+ > extension[11] > name:
+desc_test_options.proto:30:18
+desc_test_options.proto:30:26
+
+
+ > extension[11] > number:
+desc_test_options.proto:30:29
+desc_test_options.proto:30:34
+
+
+ > extension[12]:
+desc_test_options.proto:31:2
+desc_test_options.proto:31:37
+
+
+ > extension[12] > label:
+desc_test_options.proto:31:2
+desc_test_options.proto:31:10
+
+
+ > extension[12] > type:
+desc_test_options.proto:31:11
+desc_test_options.proto:31:18
+
+
+ > extension[12] > name:
+desc_test_options.proto:31:19
+desc_test_options.proto:31:28
+
+
+ > extension[12] > number:
+desc_test_options.proto:31:31
+desc_test_options.proto:31:36
+
+
+ > extension[13] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[14] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[13]:
+desc_test_options.proto:35:2
+desc_test_options.proto:35:46
+
+
+ > extension[13] > label:
+desc_test_options.proto:35:2
+desc_test_options.proto:35:10
+
+
+ > extension[13] > type:
+desc_test_options.proto:35:11
+desc_test_options.proto:35:30
+
+
+ > extension[13] > name:
+desc_test_options.proto:35:31
+desc_test_options.proto:35:37
+
+
+ > extension[13] > number:
+desc_test_options.proto:35:40
+desc_test_options.proto:35:45
+
+
+ > extension[14]:
+desc_test_options.proto:36:2
+desc_test_options.proto:36:44
+
+
+ > extension[14] > label:
+desc_test_options.proto:36:2
+desc_test_options.proto:36:10
+
+
+ > extension[14] > type:
+desc_test_options.proto:36:11
+desc_test_options.proto:36:27
+
+
+ > extension[14] > name:
+desc_test_options.proto:36:28
+desc_test_options.proto:36:35
+
+
+ > extension[14] > number:
+desc_test_options.proto:36:38
+desc_test_options.proto:36:43
+
+
+ > extension[15] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[16] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[15]:
+desc_test_options.proto:40:2
+desc_test_options.proto:40:33
+
+
+ > extension[15] > label:
+desc_test_options.proto:40:2
+desc_test_options.proto:40:10
+
+
+ > extension[15] > type:
+desc_test_options.proto:40:11
+desc_test_options.proto:40:16
+
+
+ > extension[15] > name:
+desc_test_options.proto:40:17
+desc_test_options.proto:40:24
+
+
+ > extension[15] > number:
+desc_test_options.proto:40:27
+desc_test_options.proto:40:32
+
+
+ > extension[16]:
+desc_test_options.proto:41:2
+desc_test_options.proto:41:35
+
+
+ > extension[16] > label:
+desc_test_options.proto:41:2
+desc_test_options.proto:41:10
+
+
+ > extension[16] > type:
+desc_test_options.proto:41:11
+desc_test_options.proto:41:17
+
+
+ > extension[16] > name:
+desc_test_options.proto:41:18
+desc_test_options.proto:41:26
+
+
+ > extension[16] > number:
+desc_test_options.proto:41:29
+desc_test_options.proto:41:34
+
+
+ > message_type:
+desc_test_options.proto:45:1
+desc_test_options.proto:48:2
+
+
+ > message_type[0]:
+desc_test_options.proto:45:1
+desc_test_options.proto:48:2
+ Leading comments:
+ Test message used by custom options
+
+
+ > message_type[0] > name:
+desc_test_options.proto:45:9
+desc_test_options.proto:45:28
+
+
+ > message_type[0] > field:
+desc_test_options.proto:46:2
+desc_test_options.proto:47:27
+
+
+ > message_type[0] > field[0]:
+desc_test_options.proto:46:2
+desc_test_options.proto:46:25
+
+
+ > message_type[0] > field[0] > label:
+desc_test_options.proto:46:2
+desc_test_options.proto:46:10
+
+
+ > message_type[0] > field[0] > type:
+desc_test_options.proto:46:11
+desc_test_options.proto:46:17
+
+
+ > message_type[0] > field[0] > name:
+desc_test_options.proto:46:18
+desc_test_options.proto:46:20
+
+
+ > message_type[0] > field[0] > number:
+desc_test_options.proto:46:23
+desc_test_options.proto:46:24
+
+
+ > message_type[0] > field[1]:
+desc_test_options.proto:47:2
+desc_test_options.proto:47:27
+
+
+ > message_type[0] > field[1] > label:
+desc_test_options.proto:47:2
+desc_test_options.proto:47:10
+
+
+ > message_type[0] > field[1] > type:
+desc_test_options.proto:47:11
+desc_test_options.proto:47:17
+
+
+ > message_type[0] > field[1] > name:
+desc_test_options.proto:47:18
+desc_test_options.proto:47:22
+
+
+ > message_type[0] > field[1] > number:
+desc_test_options.proto:47:25
+desc_test_options.proto:47:26
+
+
+ > enum_type:
+desc_test_options.proto:51:1
+desc_test_options.proto:53:2
+
+
+ > enum_type[0]:
+desc_test_options.proto:51:1
+desc_test_options.proto:53:2
+ Leading comments:
+ Test enum used by custom options
+
+
+ > enum_type[0] > name:
+desc_test_options.proto:51:6
+desc_test_options.proto:51:22
+
+
+ > enum_type[0] > value:
+desc_test_options.proto:52:2
+desc_test_options.proto:52:12
+
+
+ > enum_type[0] > value[0]:
+desc_test_options.proto:52:2
+desc_test_options.proto:52:12
+
+
+ > enum_type[0] > value[0] > name:
+desc_test_options.proto:52:2
+desc_test_options.proto:52:7
+
+
+ > enum_type[0] > value[0] > number:
+desc_test_options.proto:52:10
+desc_test_options.proto:52:11
+
+
+ > extension[17] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[18] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[17]:
+desc_test_options.proto:56:2
+desc_test_options.proto:56:34
+
+
+ > extension[17] > label:
+desc_test_options.proto:56:2
+desc_test_options.proto:56:10
+
+
+ > extension[17] > type:
+desc_test_options.proto:56:11
+desc_test_options.proto:56:17
+
+
+ > extension[17] > name:
+desc_test_options.proto:56:18
+desc_test_options.proto:56:25
+
+
+ > extension[17] > number:
+desc_test_options.proto:56:28
+desc_test_options.proto:56:33
+
+
+ > extension[18]:
+desc_test_options.proto:57:2
+desc_test_options.proto:57:34
+
+
+ > extension[18] > label:
+desc_test_options.proto:57:2
+desc_test_options.proto:57:10
+
+
+ > extension[18] > type:
+desc_test_options.proto:57:11
+desc_test_options.proto:57:16
+
+
+ > extension[18] > name:
+desc_test_options.proto:57:17
+desc_test_options.proto:57:25
+
+
+ > extension[18] > number:
+desc_test_options.proto:57:28
+desc_test_options.proto:57:33
+
+
+ > extension[19] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[20] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[19]:
+desc_test_options.proto:61:2
+desc_test_options.proto:61:34
+
+
+ > extension[19] > label:
+desc_test_options.proto:61:2
+desc_test_options.proto:61:10
+
+
+ > extension[19] > type:
+desc_test_options.proto:61:11
+desc_test_options.proto:61:17
+
+
+ > extension[19] > name:
+desc_test_options.proto:61:18
+desc_test_options.proto:61:25
+
+
+ > extension[19] > number:
+desc_test_options.proto:61:28
+desc_test_options.proto:61:33
+
+
+ > extension[20]:
+desc_test_options.proto:62:2
+desc_test_options.proto:62:34
+
+
+ > extension[20] > label:
+desc_test_options.proto:62:2
+desc_test_options.proto:62:10
+
+
+ > extension[20] > type:
+desc_test_options.proto:62:11
+desc_test_options.proto:62:16
+
+
+ > extension[20] > name:
+desc_test_options.proto:62:17
+desc_test_options.proto:62:25
+
+
+ > extension[20] > number:
+desc_test_options.proto:62:28
+desc_test_options.proto:62:33
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
new file mode 100644
index 0000000..b56e8ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
@@ -0,0 +1,7 @@
+// Package protoprint provides a mechanism to generate protobuf source code
+// from descriptors.
+//
+// This can be useful to turn file descriptor sets (produced by protoc) back
+// into proto IDL code. Combined with the protoreflect/builder package, it can
+// also be used to perform code generation of proto source code.
+package protoprint
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
new file mode 100644
index 0000000..d8f7f22
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
@@ -0,0 +1,2288 @@
+package protoprint
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// Printer knows how to format file descriptors as proto source code. Its fields
+// provide some control over how the resulting source file is constructed and
+// formatted.
+type Printer struct {
+ // If true, comments are rendered using "/*" style comments. Otherwise, they
+ // are printed using "//" style line comments.
+ PreferMultiLineStyleComments bool
+
+ // If true, elements are sorted into a canonical order.
+ //
+ // The canonical order for elements in a file follows:
+ // 1. Syntax
+ // 2. Package
+ // 3. Imports (sorted lexically)
+ // 4. Options (sorted by name, standard options before custom options)
+ // 5. Messages (sorted by name)
+ // 6. Enums (sorted by name)
+ // 7. Services (sorted by name)
+ // 8. Extensions (grouped by extendee, sorted by extendee+tag)
+ //
+ // The canonical order of elements in a message follows:
+ // 1. Options (sorted by name, standard options before custom options)
+ // 2. Fields and One-Ofs (sorted by tag; one-ofs interleaved based on the
+ // minimum tag therein)
+ // 3. Nested Messages (sorted by name)
+ // 4. Nested Enums (sorted by name)
+ // 5. Extension ranges (sorted by starting tag number)
+ // 6. Nested Extensions (grouped by extendee, sorted by extendee+tag)
+ // 7. Reserved ranges (sorted by starting tag number)
+ // 8. Reserved names (sorted lexically)
+ //
+ // Methods are sorted within a service by name and appear after any service
+ // options (which are sorted by name, standard options before custom ones).
+ // Enum values are sorted within an enum, first by numeric value then by
+ // name, and also appear after any enum options.
+ //
+ // Options for fields, enum values, and extension ranges are sorted by name,
+ // standard options before custom ones.
+ SortElements bool
+
+ // The indentation used. Any characters other than spaces or tabs will be
+ // replaced with spaces. If unset/empty, two spaces will be used.
+ Indent string
+
+ // If true, detached comments (between elements) will be ignored.
+ //
+ // Deprecated: Use OmitComments bitmask instead.
+ OmitDetachedComments bool
+
+ // A bitmask of comment types to omit. If unset, all comments will be
+ // included. Use CommentsAll to not print any comments.
+ OmitComments CommentType
+
+ // If true, trailing comments that typically appear on the same line as an
+ // element (option, field, enum value, method) will be printed on a separate
+ // line instead.
+ //
+ // So, with this set, you'll get output like so:
+ //
+ // // leading comment for field
+ // repeated string names = 1;
+ // // trailing comment
+ //
+ // If left false, the printer will try to emit trailing comments on the same
+ // line instead:
+ //
+ // // leading comment for field
+ // repeated string names = 1; // trailing comment
+ //
+ // If the trailing comment has more than one line, it will automatically be
+ // forced to the next line. Also, elements that end with "}" instead of ";"
+ // will have trailing comments rendered on the subsequent line.
+ TrailingCommentsOnSeparateLine bool
+
+ // If true, the printed output will eschew any blank lines, which otherwise
+ // appear between descriptor elements and comment blocks. Note that this if
+ // detached comments are being printed, this will cause them to be merged
+ // into the subsequent leading comments. Similarly, any element trailing
+ // comments will be merged into the subsequent leading comments.
+ Compact bool
+
+ // If true, all references to messages, extensions, and enums (such as in
+ // options, field types, and method request and response types) will be
+ // fully-qualified. When left unset, the referenced elements will contain
+ // only as much qualifier as is required.
+ //
+ // For example, if a message is in the same package as the reference, the
+ // simple name can be used. If a message shares some context with the
+ // reference, only the unshared context needs to be included. For example:
+ //
+ // message Foo {
+ // message Bar {
+ // enum Baz {
+ // ZERO = 0;
+ // ONE = 1;
+ // }
+ // }
+ //
+ // // This field shares some context as the enum it references: they are
+ // // both inside of the namespace Foo:
+ // // field is "Foo.my_baz"
+ // // enum is "Foo.Bar.Baz"
+ // // So we only need to qualify the reference with the context that they
+ // // do NOT have in common:
+ // Bar.Baz my_baz = 1;
+ // }
+ //
+ // When printing fully-qualified names, they will be preceded by a dot, to
+ // avoid any ambiguity that they might be relative vs. fully-qualified.
+ ForceFullyQualifiedNames bool
+}
+
+// CommentType is a kind of comments in a proto source file. This can be used
+// as a bitmask.
+type CommentType int
+
+const (
+ // CommentsDetached refers to comments that are not "attached" to any
+ // source element. They are attributed to the subsequent element in the
+ // file as "detached" comments.
+ CommentsDetached CommentType = 1 << iota
+ // CommentsTrailing refers to a comment block immediately following an
+ // element in the source file. If another element immediately follows
+ // the trailing comment, it is instead considered a leading comment for
+ // that subsequent element.
+ CommentsTrailing
+ // CommentsLeading refers to a comment block immediately preceding an
+ // element in the source file. For high-level elements (those that have
+ // their own descriptor), these are used as doc comments for that element.
+ CommentsLeading
+ // CommentsTokens refers to any comments (leading, trailing, or detached)
+ // on low-level elements in the file. "High-level" elements have their own
+ // descriptors, e.g. messages, enums, fields, services, and methods. But
+ // comments can appear anywhere (such as around identifiers and keywords,
+ // sprinkled inside the declarations of a high-level element). This class
+ // of comments are for those extra comments sprinkled into the file.
+ CommentsTokens
+
+ // CommentsNonDoc refers to comments that are *not* doc comments. This is a
+ // bitwise union of everything other than CommentsLeading. If you configure
+ // a printer to omit this, only doc comments on descriptor elements will be
+ // included in the printed output.
+ CommentsNonDoc = CommentsDetached | CommentsTrailing | CommentsTokens
+ // CommentsAll indicates all kinds of comments. If you configure a printer
+ // to omit this, no comments will appear in the printed output, even if the
+ // input descriptors had source info and comments.
+ CommentsAll = -1
+)
+
+// PrintProtoFiles prints all of the given file descriptors. The given open
+// function is given a file name and is responsible for creating the outputs and
+// returning the corresponding writer.
+func (p *Printer) PrintProtoFiles(fds []*desc.FileDescriptor, open func(name string) (io.WriteCloser, error)) error {
+ for _, fd := range fds {
+ w, err := open(fd.GetName())
+ if err != nil {
+ return fmt.Errorf("failed to open %s: %v", fd.GetName(), err)
+ }
+ err = func() error {
+ defer w.Close()
+ return p.PrintProtoFile(fd, w)
+ }()
+ if err != nil {
+ return fmt.Errorf("failed to write %s: %v", fd.GetName(), err)
+ }
+ }
+ return nil
+}
+
+// PrintProtosToFileSystem prints all of the given file descriptors to files in
+// the given directory. If file names in the given descriptors include path
+// information, they will be relative to the given root.
+func (p *Printer) PrintProtosToFileSystem(fds []*desc.FileDescriptor, rootDir string) error {
+ return p.PrintProtoFiles(fds, func(name string) (io.WriteCloser, error) {
+ fullPath := filepath.Join(rootDir, name)
+ dir := filepath.Dir(fullPath)
+ if err := os.MkdirAll(dir, os.ModePerm); err != nil {
+ return nil, err
+ }
+ return os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ })
+}
+
+// pkg represents a package name
+type pkg string
+
+// imp represents an imported file name
+type imp string
+
+// ident represents an identifier
+type ident string
+
+// option represents a resolved descriptor option
+type option struct {
+ name string
+ val interface{}
+}
+
+// reservedRange represents a reserved range from a message or enum
+type reservedRange struct {
+ start, end int32
+}
+
+// PrintProtoFile prints the given single file descriptor to the given writer.
+func (p *Printer) PrintProtoFile(fd *desc.FileDescriptor, out io.Writer) error {
+ return p.printProto(fd, out)
+}
+
+// PrintProto prints the given descriptor and returns the resulting string. This
+// can be used to print proto files, but it can also be used to get the proto
+// "source form" for any kind of descriptor, which can be a more user-friendly
+// way to present descriptors that are intended for human consumption.
+func (p *Printer) PrintProtoToString(dsc desc.Descriptor) (string, error) {
+ var buf bytes.Buffer
+ if err := p.printProto(dsc, &buf); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+func (p *Printer) printProto(dsc desc.Descriptor, out io.Writer) error {
+ w := newWriter(out)
+
+ if p.Indent == "" {
+ // default indent to two spaces
+ p.Indent = " "
+ } else {
+ // indent must be all spaces or tabs, so convert other chars to spaces
+ ind := make([]rune, 0, len(p.Indent))
+ for _, r := range p.Indent {
+ if r == '\t' {
+ ind = append(ind, r)
+ } else {
+ ind = append(ind, ' ')
+ }
+ }
+ p.Indent = string(ind)
+ }
+ if p.OmitDetachedComments {
+ p.OmitComments |= CommentsDetached
+ }
+
+ er := dynamic.ExtensionRegistry{}
+ er.AddExtensionsFromFileRecursively(dsc.GetFile())
+ mf := dynamic.NewMessageFactoryWithExtensionRegistry(&er)
+ fdp := dsc.GetFile().AsFileDescriptorProto()
+ sourceInfo := internal.CreateSourceInfoMap(fdp)
+ extendOptionLocations(sourceInfo)
+
+ path := findElement(dsc)
+ switch d := dsc.(type) {
+ case *desc.FileDescriptor:
+ p.printFile(d, mf, w, sourceInfo)
+ case *desc.MessageDescriptor:
+ p.printMessage(d, mf, w, sourceInfo, path, 0)
+ case *desc.FieldDescriptor:
+ var scope string
+ if md, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ scope = md.GetFullyQualifiedName()
+ } else {
+ scope = d.GetFile().GetPackage()
+ }
+ if d.IsExtension() {
+ fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(path, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, 0, p.qualifyName(d.GetFile().GetPackage(), scope, d.GetOwner().GetFullyQualifiedName()))
+ fmt.Fprintln(w, "{")
+
+ p.printField(d, mf, w, sourceInfo, path, scope, 1)
+
+ fmt.Fprintln(w, "}")
+ } else {
+ p.printField(d, mf, w, sourceInfo, path, scope, 0)
+ }
+ case *desc.OneOfDescriptor:
+ md := d.GetOwner()
+ elements := elementAddrs{dsc: md}
+ for i := range md.GetFields() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+ }
+ p.printOneOf(d, elements, 0, mf, w, sourceInfo, path[:len(path)-1], 0, path[len(path)-1])
+ case *desc.EnumDescriptor:
+ p.printEnum(d, mf, w, sourceInfo, path, 0)
+ case *desc.EnumValueDescriptor:
+ p.printEnumValue(d, mf, w, sourceInfo, path, 0)
+ case *desc.ServiceDescriptor:
+ p.printService(d, mf, w, sourceInfo, path, 0)
+ case *desc.MethodDescriptor:
+ p.printMethod(d, mf, w, sourceInfo, path, 0)
+ }
+
+ return w.err
+}
+
+func findElement(dsc desc.Descriptor) []int32 {
+ if dsc.GetParent() == nil {
+ return nil
+ }
+ path := findElement(dsc.GetParent())
+ switch d := dsc.(type) {
+ case *desc.MessageDescriptor:
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_nestedMessagesTag, getMessageIndex(d, pm.GetNestedMessageTypes()))
+ }
+ return append(path, internal.File_messagesTag, getMessageIndex(d, d.GetFile().GetMessageTypes()))
+
+ case *desc.FieldDescriptor:
+ if d.IsExtension() {
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_extensionsTag, getFieldIndex(d, pm.GetNestedExtensions()))
+ }
+ return append(path, internal.File_extensionsTag, getFieldIndex(d, d.GetFile().GetExtensions()))
+ }
+ return append(path, internal.Message_fieldsTag, getFieldIndex(d, d.GetOwner().GetFields()))
+
+ case *desc.OneOfDescriptor:
+ return append(path, internal.Message_oneOfsTag, getOneOfIndex(d, d.GetOwner().GetOneOfs()))
+
+ case *desc.EnumDescriptor:
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_enumsTag, getEnumIndex(d, pm.GetNestedEnumTypes()))
+ }
+ return append(path, internal.File_enumsTag, getEnumIndex(d, d.GetFile().GetEnumTypes()))
+
+ case *desc.EnumValueDescriptor:
+ return append(path, internal.Enum_valuesTag, getEnumValueIndex(d, d.GetEnum().GetValues()))
+
+ case *desc.ServiceDescriptor:
+ return append(path, internal.File_servicesTag, getServiceIndex(d, d.GetFile().GetServices()))
+
+ case *desc.MethodDescriptor:
+ return append(path, internal.Service_methodsTag, getMethodIndex(d, d.GetService().GetMethods()))
+
+ default:
+ panic(fmt.Sprintf("unexpected descriptor type: %T", dsc))
+ }
+}
+
+func getMessageIndex(md *desc.MessageDescriptor, list []*desc.MessageDescriptor) int32 {
+ for i := range list {
+ if md == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of message %s", md.GetFullyQualifiedName()))
+}
+
+func getFieldIndex(fd *desc.FieldDescriptor, list []*desc.FieldDescriptor) int32 {
+ for i := range list {
+ if fd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of field %s", fd.GetFullyQualifiedName()))
+}
+
+func getOneOfIndex(ood *desc.OneOfDescriptor, list []*desc.OneOfDescriptor) int32 {
+ for i := range list {
+ if ood == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of oneof %s", ood.GetFullyQualifiedName()))
+}
+
+func getEnumIndex(ed *desc.EnumDescriptor, list []*desc.EnumDescriptor) int32 {
+ for i := range list {
+ if ed == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of enum %s", ed.GetFullyQualifiedName()))
+}
+
+func getEnumValueIndex(evd *desc.EnumValueDescriptor, list []*desc.EnumValueDescriptor) int32 {
+ for i := range list {
+ if evd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of enum value %s", evd.GetFullyQualifiedName()))
+}
+
+func getServiceIndex(sd *desc.ServiceDescriptor, list []*desc.ServiceDescriptor) int32 {
+ for i := range list {
+ if sd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of service %s", sd.GetFullyQualifiedName()))
+}
+
+func getMethodIndex(mtd *desc.MethodDescriptor, list []*desc.MethodDescriptor) int32 {
+ for i := range list {
+ if mtd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of method %s", mtd.GetFullyQualifiedName()))
+}
+
+func (p *Printer) newLine(w io.Writer) {
+ if !p.Compact {
+ fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printFile(fd *desc.FileDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap) {
+ opts, err := p.extractOptions(fd, fd.GetOptions(), mf)
+ if err != nil {
+ return
+ }
+
+ fdp := fd.AsFileDescriptorProto()
+ path := make([]int32, 1)
+
+ path[0] = internal.File_packageTag
+ sourceInfo.PutIfAbsent(append(path, 0), sourceInfo.Get(path))
+
+ path[0] = internal.File_syntaxTag
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ syn := fdp.GetSyntax()
+ if syn == "" {
+ syn = "proto2"
+ }
+ fmt.Fprintf(w, "syntax = %q;", syn)
+ })
+ p.newLine(w)
+
+ elements := elementAddrs{dsc: fd, opts: opts}
+ if fdp.Package != nil {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_packageTag, elementIndex: 0, order: -3})
+ }
+ for i := range fd.AsFileDescriptorProto().GetDependency() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_dependencyTag, elementIndex: i, order: -2})
+ }
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.File_optionsTag, -1, opts)...)
+ for i := range fd.GetMessageTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_messagesTag, elementIndex: i})
+ }
+ for i := range fd.GetEnumTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_enumsTag, elementIndex: i})
+ }
+ for i := range fd.GetServices() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_servicesTag, elementIndex: i})
+ }
+ for i := range fd.GetExtensions() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_extensionsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, nil)
+
+ pkgName := fd.GetPackage()
+
+ var ext *desc.FieldDescriptor
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+ path = []int32{el.elementType, int32(el.elementIndex)}
+ if el.elementType == internal.File_extensionsTag {
+ fld := d.(*desc.FieldDescriptor)
+ if ext == nil || ext.GetOwner() != fld.GetOwner() {
+ // need to open a new extend block
+ if ext != nil {
+ // close preceding extend block
+ fmt.Fprintln(w, "}")
+ }
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ ext = fld
+ fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(path, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, 0, p.qualifyName(pkgName, pkgName, fld.GetOwner().GetFullyQualifiedName()))
+ fmt.Fprintln(w, "{")
+ } else {
+ p.newLine(w)
+ }
+ p.printField(fld, mf, w, sourceInfo, path, pkgName, 1)
+ } else {
+ if ext != nil {
+ // close preceding extend block
+ fmt.Fprintln(w, "}")
+ ext = nil
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ switch d := d.(type) {
+ case pkg:
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ fmt.Fprintf(w, "package %s;", d)
+ })
+ case imp:
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ fmt.Fprintf(w, "import %q;", d)
+ })
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, path, 0)
+ case *desc.MessageDescriptor:
+ p.printMessage(d, mf, w, sourceInfo, path, 0)
+ case *desc.EnumDescriptor:
+ p.printEnum(d, mf, w, sourceInfo, path, 0)
+ case *desc.ServiceDescriptor:
+ p.printService(d, mf, w, sourceInfo, path, 0)
+ }
+ }
+ }
+
+ if ext != nil {
+ // close trailing extend block
+ fmt.Fprintln(w, "}")
+ }
+}
+
+func (p *Printer) sort(elements elementAddrs, sourceInfo internal.SourceInfoMap, path []int32) {
+ if p.SortElements {
+ // canonical sorted order
+ sort.Stable(elements)
+ } else {
+ // use source order (per location information in SourceCodeInfo); or
+ // if that isn't present use declaration order, but grouped by type
+ sort.Stable(elementSrcOrder{
+ elementAddrs: elements,
+ sourceInfo: sourceInfo,
+ prefix: path,
+ })
+ }
+}
+
+func (p *Printer) qualifyName(pkg, scope string, fqn string) string {
+ if p.ForceFullyQualifiedNames {
+ // forcing fully-qualified names; make sure to include preceding dot
+ if fqn[0] == '.' {
+ return fqn
+ }
+ return fmt.Sprintf(".%s", fqn)
+ }
+
+ // compute relative name (so no leading dot)
+ if fqn[0] == '.' {
+ fqn = fqn[1:]
+ }
+ if len(scope) > 0 && scope[len(scope)-1] != '.' {
+ scope = scope + "."
+ }
+ for scope != "" {
+ if strings.HasPrefix(fqn, scope) {
+ return fqn[len(scope):]
+ }
+ if scope == pkg+"." {
+ break
+ }
+ pos := strings.LastIndex(scope[:len(scope)-1], ".")
+ scope = scope[:pos+1]
+ }
+ return fqn
+}
+
+func (p *Printer) typeString(fld *desc.FieldDescriptor, scope string) string {
+ if fld.IsMap() {
+ return fmt.Sprintf("map<%s, %s>", p.typeString(fld.GetMapKeyType(), scope), p.typeString(fld.GetMapValueType(), scope))
+ }
+ switch fld.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_INT32:
+ return "int32"
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ return "int64"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ return "uint32"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ return "uint64"
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ return "sint32"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return "sint64"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ return "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ return "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ return "sfixed32"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ return "sfixed64"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ return "float"
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return "double"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return "bool"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return "string"
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetEnumType().GetFullyQualifiedName())
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetMessageType().GetFullyQualifiedName())
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ return fld.GetMessageType().GetName()
+ }
+ panic(fmt.Sprintf("invalid type: %v", fld.GetType()))
+}
+
+func (p *Printer) printMessage(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "message ")
+ nameSi := sourceInfo.Get(append(path, internal.Message_nameTag))
+ p.printElementString(nameSi, w, indent, md.GetName())
+ fmt.Fprintln(w, "{")
+
+ p.printMessageBody(md, mf, w, sourceInfo, path, indent+1)
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printMessageBody(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ opts, err := p.extractOptions(md, md.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: md, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Message_optionsTag, -1, opts)...)
+ for i := range md.AsDescriptorProto().GetReservedRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedRangeTag, elementIndex: i})
+ }
+ for i := range md.AsDescriptorProto().GetReservedName() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedNameTag, elementIndex: i})
+ }
+ for i := range md.AsDescriptorProto().GetExtensionRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionRangeTag, elementIndex: i})
+ }
+ for i, fld := range md.GetFields() {
+ if fld.IsMap() || fld.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for map types or groups since
+ // they get special treatment
+ skip[fld.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+ }
+ for i := range md.GetNestedMessageTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_nestedMessagesTag, elementIndex: i})
+ }
+ for i := range md.GetNestedEnumTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_enumsTag, elementIndex: i})
+ }
+ for i := range md.GetNestedExtensions() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ pkg := md.GetFile().GetPackage()
+ scope := md.GetFullyQualifiedName()
+
+ var ext *desc.FieldDescriptor
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+ if el.elementType == internal.Message_extensionsTag {
+ // extension
+ fld := d.(*desc.FieldDescriptor)
+ if ext == nil || ext.GetOwner() != fld.GetOwner() {
+ // need to open a new extend block
+ if ext != nil {
+ // close preceding extend block
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ }
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ ext = fld
+ p.indent(w, indent)
+ fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(childPath, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, indent, p.qualifyName(pkg, scope, fld.GetOwner().GetFullyQualifiedName()))
+ fmt.Fprintln(w, "{")
+ } else {
+ p.newLine(w)
+ }
+ p.printField(fld, mf, w, sourceInfo, childPath, scope, indent+1)
+ } else {
+ if ext != nil {
+ // close preceding extend block
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ ext = nil
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ switch d := d.(type) {
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.FieldDescriptor:
+ ood := d.GetOneOf()
+ if ood == nil {
+ p.printField(d, mf, w, sourceInfo, childPath, scope, indent)
+ } else if !skip[ood] {
+ // print the one-of, including all of its fields
+ p.printOneOf(ood, elements, i, mf, w, sourceInfo, path, indent, d.AsFieldDescriptorProto().GetOneofIndex())
+ skip[ood] = true
+ }
+ case *desc.MessageDescriptor:
+ p.printMessage(d, mf, w, sourceInfo, childPath, indent)
+ case *desc.EnumDescriptor:
+ p.printEnum(d, mf, w, sourceInfo, childPath, indent)
+ case *descriptor.DescriptorProto_ExtensionRange:
+ // collapse ranges into a single "extensions" block
+ ranges := []*descriptor.DescriptorProto_ExtensionRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ extr := elements.at(elnext).(*descriptor.DescriptorProto_ExtensionRange)
+ if !areEqual(d.Options, extr.Options, mf) {
+ break
+ }
+ ranges = append(ranges, extr)
+ addrs = append(addrs, elnext)
+ skip[extr] = true
+ }
+ p.printExtensionRanges(md, ranges, addrs, mf, w, sourceInfo, path, indent)
+ case reservedRange:
+ // collapse reserved ranges into a single "reserved" block
+ ranges := []reservedRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rr := elements.at(elnext).(reservedRange)
+ ranges = append(ranges, rr)
+ addrs = append(addrs, elnext)
+ skip[rr] = true
+ }
+ p.printReservedRanges(ranges, false, addrs, w, sourceInfo, path, indent)
+ case string: // reserved name
+ // collapse reserved names into a single "reserved" block
+ names := []string{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rn := elements.at(elnext).(string)
+ names = append(names, rn)
+ addrs = append(addrs, elnext)
+ skip[rn] = true
+ }
+ p.printReservedNames(names, addrs, w, sourceInfo, path, indent)
+ }
+ }
+ }
+
+ if ext != nil {
+ // close trailing extend block
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ }
+}
+
+func areEqual(a, b proto.Message, mf *dynamic.MessageFactory) bool {
+ // proto.Equal doesn't handle unknown extensions very well :(
+ // so we convert to a dynamic message (which should know about all extensions via
+ // extension registry) and then compare
+ return dynamic.MessagesEqual(asDynamicIfPossible(a, mf), asDynamicIfPossible(b, mf))
+}
+
+func asDynamicIfPossible(msg proto.Message, mf *dynamic.MessageFactory) proto.Message {
+ if dm, ok := msg.(*dynamic.Message); ok {
+ return dm
+ } else {
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err == nil {
+ dm := mf.NewDynamicMessage(md)
+ if dm.ConvertFrom(msg) == nil {
+ return dm
+ }
+ }
+ }
+ return msg
+}
+
+func (p *Printer) printField(fld *desc.FieldDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, scope string, indent int) {
+ var groupPath []int32
+ var si *descriptor.SourceCodeInfo_Location
+ if isGroup(fld) {
+ // compute path to group message type
+ groupPath = make([]int32, len(path)-2)
+ copy(groupPath, path)
+ var groupMsgIndex int32
+ md := fld.GetParent().(*desc.MessageDescriptor)
+ for i, nmd := range md.GetNestedMessageTypes() {
+ if nmd == fld.GetMessageType() {
+ // found it
+ groupMsgIndex = int32(i)
+ break
+ }
+ }
+ groupPath = append(groupPath, internal.Message_nestedMessagesTag, groupMsgIndex)
+
+ // the group message is where the field's comments and position are stored
+ si = sourceInfo.Get(groupPath)
+ } else {
+ si = sourceInfo.Get(path)
+ }
+
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+ if shouldEmitLabel(fld) {
+ locSi := sourceInfo.Get(append(path, internal.Field_labelTag))
+ p.printElementString(locSi, w, indent, labelString(fld.GetLabel()))
+ }
+
+ if isGroup(fld) {
+ fmt.Fprint(w, "group ")
+
+ typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+ p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+ fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+ fmt.Fprintln(w, "{")
+ p.printMessageBody(fld.GetMessageType(), mf, w, sourceInfo, groupPath, indent+1)
+
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ } else {
+ typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+ p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+
+ nameSi := sourceInfo.Get(append(path, internal.Field_nameTag))
+ p.printElementString(nameSi, w, indent, fld.GetName())
+ fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+ opts, err := p.extractOptions(fld, fld.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ // we use negative values for "extras" keys so they can't collide
+ // with legit option tags
+
+ if !fld.GetFile().IsProto3() && fld.AsFieldDescriptorProto().DefaultValue != nil {
+ defVal := fld.GetDefaultValue()
+ if fld.GetEnumType() != nil {
+ defVal = fld.GetEnumType().FindValueByNumber(defVal.(int32))
+ }
+ opts[-internal.Field_defaultTag] = []option{{name: "default", val: defVal}}
+ }
+
+ jsn := fld.AsFieldDescriptorProto().GetJsonName()
+ if jsn != "" && jsn != internal.JsonName(fld.GetName()) {
+ opts[-internal.Field_jsonNameTag] = []option{{name: "json_name", val: jsn}}
+ }
+
+ elements := elementAddrs{dsc: fld, opts: opts}
+ elements.addrs = optionsAsElementAddrs(internal.Field_optionsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+ p.printOptionElementsShort(elements, w, sourceInfo, path, indent)
+
+ fmt.Fprint(w, ";")
+ }
+ })
+}
+
+func shouldEmitLabel(fld *desc.FieldDescriptor) bool {
+ return !fld.IsMap() && fld.GetOneOf() == nil && (fld.GetLabel() != descriptor.FieldDescriptorProto_LABEL_OPTIONAL || !fld.GetFile().IsProto3())
+}
+
+func labelString(lbl descriptor.FieldDescriptorProto_Label) string {
+ switch lbl {
+ case descriptor.FieldDescriptorProto_LABEL_OPTIONAL:
+ return "optional"
+ case descriptor.FieldDescriptorProto_LABEL_REQUIRED:
+ return "required"
+ case descriptor.FieldDescriptorProto_LABEL_REPEATED:
+ return "repeated"
+ }
+ panic(fmt.Sprintf("invalid label: %v", lbl))
+}
+
+func isGroup(fld *desc.FieldDescriptor) bool {
+ return fld.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+}
+
+func (p *Printer) printOneOf(ood *desc.OneOfDescriptor, parentElements elementAddrs, startFieldIndex int, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int, ooIndex int32) {
+ oopath := append(parentPath, internal.Message_oneOfsTag, ooIndex)
+ oosi := sourceInfo.Get(oopath)
+ p.printElement(true, oosi, w, indent, func(w *writer) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "oneof ")
+ extNameSi := sourceInfo.Get(append(oopath, internal.OneOf_nameTag))
+ p.printElementString(extNameSi, w, indent, ood.GetName())
+ fmt.Fprintln(w, "{")
+
+ indent++
+ opts, err := p.extractOptions(ood, ood.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: ood, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.OneOf_optionsTag, -1, opts)...)
+
+ count := len(ood.GetChoices())
+ for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ {
+ el := parentElements.addrs[idx]
+ if el.elementType != internal.Message_fieldsTag {
+ continue
+ }
+ if parentElements.at(el).(*desc.FieldDescriptor).GetOneOf() == ood {
+ // negative tag indicates that this element is actually a sibling, not a child
+ elements.addrs = append(elements.addrs, elementAddr{elementType: -internal.Message_fieldsTag, elementIndex: el.elementIndex})
+ count--
+ }
+ }
+
+ p.sort(elements, sourceInfo, oopath)
+
+ scope := ood.GetOwner().GetFullyQualifiedName()
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ switch d := elements.at(el).(type) {
+ case []option:
+ childPath := append(oopath, el.elementType, int32(el.elementIndex))
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.FieldDescriptor:
+ childPath := append(parentPath, -el.elementType, int32(el.elementIndex))
+ p.printField(d, mf, w, sourceInfo, childPath, scope, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printExtensionRanges(parent *desc.MessageDescriptor, ranges []*descriptor.DescriptorProto_ExtensionRange, addrs []elementAddr, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "extensions ")
+
+ var opts *descriptor.ExtensionRangeOptions
+ var elPath []int32
+ first := true
+ for i, extr := range ranges {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ opts = extr.Options
+ el := addrs[i]
+ elPath = append(parentPath, el.elementType, int32(el.elementIndex))
+ si := sourceInfo.Get(elPath)
+ p.printElement(true, si, w, inline(indent), func(w *writer) {
+ if extr.GetStart() == extr.GetEnd()-1 {
+ fmt.Fprintf(w, "%d ", extr.GetStart())
+ } else if extr.GetEnd()-1 == internal.MaxTag {
+ fmt.Fprintf(w, "%d to max ", extr.GetStart())
+ } else {
+ fmt.Fprintf(w, "%d to %d ", extr.GetStart(), extr.GetEnd()-1)
+ }
+ })
+ }
+ dsc := extensionRange{owner: parent, extRange: ranges[0]}
+ p.printOptionsShort(dsc, opts, mf, internal.ExtensionRange_optionsTag, w, sourceInfo, elPath, indent)
+
+ fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedRanges(ranges []reservedRange, isEnum bool, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "reserved ")
+
+ first := true
+ for i, rr := range ranges {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ el := addrs[i]
+ si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+ p.printElement(false, si, w, inline(indent), func(w *writer) {
+ if rr.start == rr.end {
+ fmt.Fprintf(w, "%d ", rr.start)
+ } else if (rr.end == internal.MaxTag && !isEnum) ||
+ (rr.end == math.MaxInt32 && isEnum) {
+ fmt.Fprintf(w, "%d to max ", rr.start)
+ } else {
+ fmt.Fprintf(w, "%d to %d ", rr.start, rr.end)
+ }
+ })
+ }
+
+ fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedNames(names []string, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "reserved ")
+
+ first := true
+ for i, name := range names {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ el := addrs[i]
+ si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+ p.printElementString(si, w, indent, quotedString(name))
+ }
+
+ fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printEnum(ed *desc.EnumDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "enum ")
+ nameSi := sourceInfo.Get(append(path, internal.Enum_nameTag))
+ p.printElementString(nameSi, w, indent, ed.GetName())
+ fmt.Fprintln(w, "{")
+
+ indent++
+ opts, err := p.extractOptions(ed, ed.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: ed, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Enum_optionsTag, -1, opts)...)
+ for i := range ed.GetValues() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_valuesTag, elementIndex: i})
+ }
+ for i := range ed.AsEnumDescriptorProto().GetReservedRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedRangeTag, elementIndex: i})
+ }
+ for i := range ed.AsEnumDescriptorProto().GetReservedName() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedNameTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := d.(type) {
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.EnumValueDescriptor:
+ p.printEnumValue(d, mf, w, sourceInfo, childPath, indent)
+ case reservedRange:
+ // collapse reserved ranges into a single "reserved" block
+ ranges := []reservedRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rr := elements.at(elnext).(reservedRange)
+ ranges = append(ranges, rr)
+ addrs = append(addrs, elnext)
+ skip[rr] = true
+ }
+ p.printReservedRanges(ranges, true, addrs, w, sourceInfo, path, indent)
+ case string: // reserved name
+ // collapse reserved names into a single "reserved" block
+ names := []string{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rn := elements.at(elnext).(string)
+ names = append(names, rn)
+ addrs = append(addrs, elnext)
+ skip[rn] = true
+ }
+ p.printReservedNames(names, addrs, w, sourceInfo, path, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printEnumValue(evd *desc.EnumValueDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ nameSi := sourceInfo.Get(append(path, internal.EnumVal_nameTag))
+ p.printElementString(nameSi, w, indent, evd.GetName())
+ fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.EnumVal_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", evd.GetNumber()))
+
+ p.printOptionsShort(evd, evd.GetOptions(), mf, internal.EnumVal_optionsTag, w, sourceInfo, path, indent)
+
+ fmt.Fprint(w, ";")
+ })
+}
+
+func (p *Printer) printService(sd *desc.ServiceDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "service ")
+ nameSi := sourceInfo.Get(append(path, internal.Service_nameTag))
+ p.printElementString(nameSi, w, indent, sd.GetName())
+ fmt.Fprintln(w, "{")
+
+ indent++
+
+ opts, err := p.extractOptions(sd, sd.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: sd, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Service_optionsTag, -1, opts)...)
+ for i := range sd.GetMethods() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Service_methodsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := elements.at(el).(type) {
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.MethodDescriptor:
+ p.printMethod(d, mf, w, sourceInfo, childPath, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printMethod(mtd *desc.MethodDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ pkg := mtd.GetFile().GetPackage()
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "rpc ")
+ nameSi := sourceInfo.Get(append(path, internal.Method_nameTag))
+ p.printElementString(nameSi, w, indent, mtd.GetName())
+
+ fmt.Fprint(w, "( ")
+ inSi := sourceInfo.Get(append(path, internal.Method_inputTag))
+ inName := p.qualifyName(pkg, pkg, mtd.GetInputType().GetFullyQualifiedName())
+ if mtd.IsClientStreaming() {
+ inName = "stream " + inName
+ }
+ p.printElementString(inSi, w, indent, inName)
+
+ fmt.Fprint(w, ") returns ( ")
+
+ outSi := sourceInfo.Get(append(path, internal.Method_outputTag))
+ outName := p.qualifyName(pkg, pkg, mtd.GetOutputType().GetFullyQualifiedName())
+ if mtd.IsServerStreaming() {
+ outName = "stream " + outName
+ }
+ p.printElementString(outSi, w, indent, outName)
+ fmt.Fprint(w, ") ")
+
+ opts, err := p.extractOptions(mtd, mtd.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ if len(opts) > 0 {
+ fmt.Fprintln(w, "{")
+ indent++
+
+ elements := elementAddrs{dsc: mtd, opts: opts}
+ elements.addrs = optionsAsElementAddrs(internal.Method_optionsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+ path = append(path, internal.Method_optionsTag)
+
+ for i, addr := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+ o := elements.at(addr).([]option)
+ p.printOptionsLong(o, w, sourceInfo, path, indent)
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ } else {
+ fmt.Fprint(w, ";")
+ }
+ })
+}
+
+func (p *Printer) printOptionsLong(opts []option, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ p.printOptions(opts, w, indent,
+ func(i int32) *descriptor.SourceCodeInfo_Location {
+ return sourceInfo.Get(append(path, i))
+ },
+ func(w *writer, indent int, opt option) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "option ")
+ p.printOption(opt.name, opt.val, w, indent)
+ fmt.Fprint(w, ";")
+ })
+}
+
+func (p *Printer) printOptionsShort(dsc interface{}, optsMsg proto.Message, mf *dynamic.MessageFactory, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ d, ok := dsc.(desc.Descriptor)
+ if !ok {
+ d = dsc.(extensionRange).owner
+ }
+ opts, err := p.extractOptions(d, optsMsg, mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: dsc, opts: opts}
+ elements.addrs = optionsAsElementAddrs(optsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+ p.printOptionElementsShort(elements, w, sourceInfo, path, indent)
+}
+
+func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ if len(addrs.addrs) == 0 {
+ return
+ }
+ first := true
+ fmt.Fprint(w, "[")
+ for _, addr := range addrs.addrs {
+ opts := addrs.at(addr).([]option)
+ var childPath []int32
+ if addr.elementIndex < 0 {
+ // pseudo-option
+ childPath = append(path, int32(-addr.elementIndex))
+ } else {
+ childPath = append(path, addr.elementType, int32(addr.elementIndex))
+ }
+ p.printOptions(opts, w, inline(indent),
+ func(i int32) *descriptor.SourceCodeInfo_Location {
+ p := childPath
+ if addr.elementIndex >= 0 {
+ p = append(p, i)
+ }
+ return sourceInfo.Get(p)
+ },
+ func(w *writer, indent int, opt option) {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ p.printOption(opt.name, opt.val, w, indent)
+ fmt.Fprint(w, " ") // trailing space
+ })
+ }
+ fmt.Fprint(w, "]")
+}
+
+func (p *Printer) printOptions(opts []option, w *writer, indent int, siFetch func(i int32) *descriptor.SourceCodeInfo_Location, fn func(w *writer, indent int, opt option)) {
+ for i, opt := range opts {
+ si := siFetch(int32(i))
+ p.printElement(false, si, w, indent, func(w *writer) {
+ fn(w, indent, opt)
+ })
+ }
+}
+
+func inline(indent int) int {
+ if indent < 0 {
+ // already inlined
+ return indent
+ }
+ // negative indent means inline; indent 2 stops further in case value wraps
+ return -indent - 2
+}
+
+func sortKeys(m map[interface{}]interface{}) []interface{} {
+ res := make(sortedKeys, len(m))
+ i := 0
+ for k := range m {
+ res[i] = k
+ i++
+ }
+ sort.Sort(res)
+ return ([]interface{})(res)
+}
+
+type sortedKeys []interface{}
+
+func (k sortedKeys) Len() int {
+ return len(k)
+}
+
+func (k sortedKeys) Swap(i, j int) {
+ k[i], k[j] = k[j], k[i]
+}
+
+func (k sortedKeys) Less(i, j int) bool {
+ switch i := k[i].(type) {
+ case int32:
+ return i < k[j].(int32)
+ case uint32:
+ return i < k[j].(uint32)
+ case int64:
+ return i < k[j].(int64)
+ case uint64:
+ return i < k[j].(uint64)
+ case string:
+ return i < k[j].(string)
+ case bool:
+ return !i && k[j].(bool)
+ default:
+ panic(fmt.Sprintf("invalid type for map key: %T", i))
+ }
+}
+
+func (p *Printer) printOption(name string, optVal interface{}, w *writer, indent int) {
+ fmt.Fprintf(w, "%s = ", name)
+
+ switch optVal := optVal.(type) {
+ case int32, uint32, int64, uint64:
+ fmt.Fprintf(w, "%d", optVal)
+ case float32, float64:
+ fmt.Fprintf(w, "%f", optVal)
+ case string:
+ fmt.Fprintf(w, "%s", quotedString(optVal))
+ case []byte:
+ fmt.Fprintf(w, "%s", quotedString(string(optVal)))
+ case bool:
+ fmt.Fprintf(w, "%v", optVal)
+ case ident:
+ fmt.Fprintf(w, "%s", optVal)
+ case *desc.EnumValueDescriptor:
+ fmt.Fprintf(w, "%s", optVal.GetName())
+ case proto.Message:
+ // TODO: if value is too long, marshal to text format with indentation to
+ // make output prettier (also requires correctly indenting subsequent lines)
+
+ // TODO: alternate approach so we can apply p.ForceFullyQualifiedNames
+ // inside the resulting value?
+
+ fmt.Fprintf(w, "{ %s }", proto.CompactTextString(optVal))
+ default:
+ panic(fmt.Sprintf("unknown type of value %T for field %s", optVal, name))
+ }
+}
+
+type edgeKind int
+
+const (
+ edgeKindOption edgeKind = iota
+ edgeKindFile
+ edgeKindMessage
+ edgeKindField
+ edgeKindOneOf
+ edgeKindExtensionRange
+ edgeKindEnum
+ edgeKindEnumVal
+ edgeKindService
+ edgeKindMethod
+)
+
+// edges in simple state machine for matching options paths
+// whose prefix should be included in source info to handle
+// the way options are printed (which cannot always include
+// the full path from original source)
+var edges = map[edgeKind]map[int32]edgeKind{
+ edgeKindFile: {
+ internal.File_optionsTag: edgeKindOption,
+ internal.File_messagesTag: edgeKindMessage,
+ internal.File_enumsTag: edgeKindEnum,
+ internal.File_extensionsTag: edgeKindField,
+ internal.File_servicesTag: edgeKindService,
+ },
+ edgeKindMessage: {
+ internal.Message_optionsTag: edgeKindOption,
+ internal.Message_fieldsTag: edgeKindField,
+ internal.Message_oneOfsTag: edgeKindOneOf,
+ internal.Message_nestedMessagesTag: edgeKindMessage,
+ internal.Message_enumsTag: edgeKindEnum,
+ internal.Message_extensionsTag: edgeKindField,
+ internal.Message_extensionRangeTag: edgeKindExtensionRange,
+ // TODO: reserved range tag
+ },
+ edgeKindField: {
+ internal.Field_optionsTag: edgeKindOption,
+ },
+ edgeKindOneOf: {
+ internal.OneOf_optionsTag: edgeKindOption,
+ },
+ edgeKindExtensionRange: {
+ internal.ExtensionRange_optionsTag: edgeKindOption,
+ },
+ edgeKindEnum: {
+ internal.Enum_optionsTag: edgeKindOption,
+ internal.Enum_valuesTag: edgeKindEnumVal,
+ },
+ edgeKindEnumVal: {
+ internal.EnumVal_optionsTag: edgeKindOption,
+ },
+ edgeKindService: {
+ internal.Service_optionsTag: edgeKindOption,
+ internal.Service_methodsTag: edgeKindMethod,
+ },
+ edgeKindMethod: {
+ internal.Method_optionsTag: edgeKindOption,
+ },
+}
+
+func extendOptionLocations(sc internal.SourceInfoMap) {
+ for _, loc := range sc {
+ allowed := edges[edgeKindFile]
+ for i := 0; i+1 < len(loc.Path); i += 2 {
+ nextKind, ok := allowed[loc.Path[i]]
+ if !ok {
+ break
+ }
+ if nextKind == edgeKindOption {
+ // We've found an option entry. This could be arbitrarily
+ // deep (for options that nested messages) or it could end
+ // abruptly (for non-repeated fields). But we need a path
+ // that is exactly the path-so-far plus two: the option tag
+ // and an optional index for repeated option fields (zero
+ // for non-repeated option fields). This is used for
+ // querying source info when printing options.
+ // for sorting elements
+ newPath := make([]int32, i+3)
+ copy(newPath, loc.Path)
+ sc.PutIfAbsent(newPath, loc)
+ // we do another path of path-so-far plus two, but with
+ // explicit zero index -- just in case this actual path has
+ // an extra path element, but it's not an index (e.g the
+ // option field is not repeated, but the source info we are
+ // looking at indicates a tag of a nested field)
+ newPath[len(newPath)-1] = 0
+ sc.PutIfAbsent(newPath, loc)
+ // finally, we need the path-so-far plus one, just the option
+ // tag, for sorting option groups
+ newPath = newPath[:len(newPath)-1]
+ sc.PutIfAbsent(newPath, loc)
+
+ break
+ } else {
+ allowed = edges[nextKind]
+ }
+ }
+ }
+}
+
+func (p *Printer) extractOptions(dsc desc.Descriptor, opts proto.Message, mf *dynamic.MessageFactory) (map[int32][]option, error) {
+ md, err := desc.LoadMessageDescriptorForMessage(opts)
+ if err != nil {
+ return nil, err
+ }
+ dm := mf.NewDynamicMessage(md)
+ if err = dm.ConvertFrom(opts); err != nil {
+ return nil, fmt.Errorf("failed convert %s to dynamic message: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ pkg := dsc.GetFile().GetPackage()
+ var scope string
+ if _, ok := dsc.(*desc.FileDescriptor); ok {
+ scope = pkg
+ } else {
+ scope = dsc.GetFullyQualifiedName()
+ }
+
+ options := map[int32][]option{}
+ var uninterpreted []interface{}
+ for _, fldset := range [][]*desc.FieldDescriptor{md.GetFields(), mf.GetExtensionRegistry().AllExtensionsForType(md.GetFullyQualifiedName())} {
+ for _, fld := range fldset {
+ if dm.HasField(fld) {
+ val := dm.GetField(fld)
+ var opts []option
+ var name string
+ if fld.IsExtension() {
+ name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName()))
+ } else {
+ name = fld.GetName()
+ }
+ switch val := val.(type) {
+ case []interface{}:
+ if fld.GetNumber() == internal.UninterpretedOptionsTag {
+ // we handle uninterpreted options differently
+ uninterpreted = val
+ continue
+ }
+
+ for _, e := range val {
+ if fld.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ ev := fld.GetEnumType().FindValueByNumber(e.(int32))
+ if ev == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ e = ev
+ }
+ var name string
+ if fld.IsExtension() {
+ name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName()))
+ } else {
+ name = fld.GetName()
+ }
+ opts = append(opts, option{name: name, val: e})
+ }
+ case map[interface{}]interface{}:
+ for k := range sortKeys(val) {
+ v := val[k]
+ vf := fld.GetMapValueType()
+ if vf.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ ev := vf.GetEnumType().FindValueByNumber(v.(int32))
+ if ev == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ v = ev
+ }
+ entry := mf.NewDynamicMessage(fld.GetMessageType())
+ entry.SetFieldByNumber(1, k)
+ entry.SetFieldByNumber(2, v)
+ opts = append(opts, option{name: name, val: entry})
+ }
+ default:
+ if fld.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ ev := fld.GetEnumType().FindValueByNumber(val.(int32))
+ if ev == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ val = ev
+ }
+ opts = append(opts, option{name: name, val: val})
+ }
+ if len(opts) > 0 {
+ options[fld.GetNumber()] = opts
+ }
+ }
+ }
+ }
+
+ // if there are uninterpreted options, add those too
+ if len(uninterpreted) > 0 {
+ opts := make([]option, len(uninterpreted))
+ for i, u := range uninterpreted {
+ var unint *descriptor.UninterpretedOption
+ if un, ok := u.(*descriptor.UninterpretedOption); ok {
+ unint = un
+ } else {
+ dm := u.(*dynamic.Message)
+ unint = &descriptor.UninterpretedOption{}
+ if err := dm.ConvertTo(unint); err != nil {
+ return nil, err
+ }
+ }
+
+ var buf bytes.Buffer
+ for ni, n := range unint.Name {
+ if ni > 0 {
+ buf.WriteByte('.')
+ }
+ if n.GetIsExtension() {
+ fmt.Fprintf(&buf, "(%s)", n.GetNamePart())
+ } else {
+ buf.WriteString(n.GetNamePart())
+ }
+ }
+
+ var v interface{}
+ switch {
+ case unint.IdentifierValue != nil:
+ v = ident(unint.GetIdentifierValue())
+ case unint.StringValue != nil:
+ v = string(unint.GetStringValue())
+ case unint.DoubleValue != nil:
+ v = unint.GetDoubleValue()
+ case unint.PositiveIntValue != nil:
+ v = unint.GetPositiveIntValue()
+ case unint.NegativeIntValue != nil:
+ v = unint.GetNegativeIntValue()
+ case unint.AggregateValue != nil:
+ v = ident(unint.GetAggregateValue())
+ }
+
+ opts[i] = option{name: buf.String(), val: v}
+ }
+ options[internal.UninterpretedOptionsTag] = opts
+ }
+
+ return options, nil
+}
+
+func optionsAsElementAddrs(optionsTag int32, order int, opts map[int32][]option) []elementAddr {
+ var optAddrs []elementAddr
+ for tag := range opts {
+ optAddrs = append(optAddrs, elementAddr{elementType: optionsTag, elementIndex: int(tag), order: order})
+ }
+ sort.Sort(optionsByName{addrs: optAddrs, opts: opts})
+ return optAddrs
+}
+
+// quotedString implements the text format for string literals for protocol
+// buffers. This form is also acceptable for string literals in option values
+// by the protocol buffer compiler, protoc.
+func quotedString(s string) string {
+ var b bytes.Buffer
+ // use WriteByte here to get any needed indent
+ b.WriteByte('"')
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ b.WriteString("\\n")
+ case '\r':
+ b.WriteString("\\r")
+ case '\t':
+ b.WriteString("\\t")
+ case '"':
+ b.WriteString("\\")
+ case '\\':
+ b.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c < 0x7f {
+ b.WriteByte(c)
+ } else {
+ fmt.Fprintf(&b, "\\%03o", c)
+ }
+ }
+ }
+ b.WriteByte('"')
+
+ return b.String()
+}
+
+type elementAddr struct {
+ elementType int32
+ elementIndex int
+ order int
+}
+
+type elementAddrs struct {
+ addrs []elementAddr
+ dsc interface{}
+ opts map[int32][]option
+}
+
+func (a elementAddrs) Len() int {
+ return len(a.addrs)
+}
+
+func (a elementAddrs) Less(i, j int) bool {
+ // explicit order is considered first
+ if a.addrs[i].order < a.addrs[j].order {
+ return true
+ } else if a.addrs[i].order > a.addrs[j].order {
+ return false
+ }
+ // if order is equal, sort by element type
+ if a.addrs[i].elementType < a.addrs[j].elementType {
+ return true
+ } else if a.addrs[i].elementType > a.addrs[j].elementType {
+ return false
+ }
+
+ di := a.at(a.addrs[i])
+ dj := a.at(a.addrs[j])
+
+ switch vi := di.(type) {
+ case *desc.FieldDescriptor:
+ // fields are ordered by tag number
+ vj := dj.(*desc.FieldDescriptor)
+ // regular fields before extensions; extensions grouped by extendee
+ if !vi.IsExtension() && vj.IsExtension() {
+ return true
+ } else if vi.IsExtension() && !vj.IsExtension() {
+ return false
+ } else if vi.IsExtension() && vj.IsExtension() {
+ if vi.GetOwner() != vj.GetOwner() {
+ return vi.GetOwner().GetFullyQualifiedName() < vj.GetOwner().GetFullyQualifiedName()
+ }
+ }
+ return vi.GetNumber() < vj.GetNumber()
+
+ case *desc.EnumValueDescriptor:
+ // enum values ordered by number then name
+ vj := dj.(*desc.EnumValueDescriptor)
+ if vi.GetNumber() == vj.GetNumber() {
+ return vi.GetName() < vj.GetName()
+ }
+ return vi.GetNumber() < vj.GetNumber()
+
+ case *descriptor.DescriptorProto_ExtensionRange:
+ // extension ranges ordered by tag
+ return vi.GetStart() < dj.(*descriptor.DescriptorProto_ExtensionRange).GetStart()
+
+ case reservedRange:
+ // reserved ranges ordered by tag, too
+ return vi.start < dj.(reservedRange).start
+
+ case string:
+ // reserved names lexically sorted
+ return vi < dj.(string)
+
+ case pkg:
+ // reserved names lexically sorted
+ return vi < dj.(pkg)
+
+ case imp:
+ // reserved names lexically sorted
+ return vi < dj.(imp)
+
+ case []option:
+ // options sorted by name, extensions last
+ return optionLess(vi, dj.([]option))
+
+ default:
+ // all other descriptors ordered by name
+ return di.(desc.Descriptor).GetName() < dj.(desc.Descriptor).GetName()
+ }
+}
+
+func (a elementAddrs) Swap(i, j int) {
+ a.addrs[i], a.addrs[j] = a.addrs[j], a.addrs[i]
+}
+
+func (a elementAddrs) at(addr elementAddr) interface{} {
+ switch dsc := a.dsc.(type) {
+ case *desc.FileDescriptor:
+ switch addr.elementType {
+ case internal.File_packageTag:
+ return pkg(dsc.GetPackage())
+ case internal.File_dependencyTag:
+ return imp(dsc.AsFileDescriptorProto().GetDependency()[addr.elementIndex])
+ case internal.File_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.File_messagesTag:
+ return dsc.GetMessageTypes()[addr.elementIndex]
+ case internal.File_enumsTag:
+ return dsc.GetEnumTypes()[addr.elementIndex]
+ case internal.File_servicesTag:
+ return dsc.GetServices()[addr.elementIndex]
+ case internal.File_extensionsTag:
+ return dsc.GetExtensions()[addr.elementIndex]
+ }
+ case *desc.MessageDescriptor:
+ switch addr.elementType {
+ case internal.Message_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Message_fieldsTag:
+ return dsc.GetFields()[addr.elementIndex]
+ case internal.Message_nestedMessagesTag:
+ return dsc.GetNestedMessageTypes()[addr.elementIndex]
+ case internal.Message_enumsTag:
+ return dsc.GetNestedEnumTypes()[addr.elementIndex]
+ case internal.Message_extensionsTag:
+ return dsc.GetNestedExtensions()[addr.elementIndex]
+ case internal.Message_extensionRangeTag:
+ return dsc.AsDescriptorProto().GetExtensionRange()[addr.elementIndex]
+ case internal.Message_reservedRangeTag:
+ rng := dsc.AsDescriptorProto().GetReservedRange()[addr.elementIndex]
+ return reservedRange{start: rng.GetStart(), end: rng.GetEnd() - 1}
+ case internal.Message_reservedNameTag:
+ return dsc.AsDescriptorProto().GetReservedName()[addr.elementIndex]
+ }
+ case *desc.FieldDescriptor:
+ if addr.elementType == internal.Field_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case *desc.OneOfDescriptor:
+ switch addr.elementType {
+ case internal.OneOf_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case -internal.Message_fieldsTag:
+ return dsc.GetOwner().GetFields()[addr.elementIndex]
+ }
+ case *desc.EnumDescriptor:
+ switch addr.elementType {
+ case internal.Enum_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Enum_valuesTag:
+ return dsc.GetValues()[addr.elementIndex]
+ case internal.Enum_reservedRangeTag:
+ rng := dsc.AsEnumDescriptorProto().GetReservedRange()[addr.elementIndex]
+ return reservedRange{start: rng.GetStart(), end: rng.GetEnd()}
+ case internal.Enum_reservedNameTag:
+ return dsc.AsEnumDescriptorProto().GetReservedName()[addr.elementIndex]
+ }
+ case *desc.EnumValueDescriptor:
+ if addr.elementType == internal.EnumVal_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case *desc.ServiceDescriptor:
+ switch addr.elementType {
+ case internal.Service_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Service_methodsTag:
+ return dsc.GetMethods()[addr.elementIndex]
+ }
+ case *desc.MethodDescriptor:
+ if addr.elementType == internal.Method_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case extensionRange:
+ if addr.elementType == internal.ExtensionRange_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ }
+
+ panic(fmt.Sprintf("location for unknown field %d of %T", addr.elementType, a.dsc))
+}
+
+type extensionRange struct {
+ owner *desc.MessageDescriptor
+ extRange *descriptor.DescriptorProto_ExtensionRange
+}
+
+type elementSrcOrder struct {
+ elementAddrs
+ sourceInfo internal.SourceInfoMap
+ prefix []int32
+}
+
+func (a elementSrcOrder) Less(i, j int) bool {
+ ti := a.addrs[i].elementType
+ ei := a.addrs[i].elementIndex
+
+ tj := a.addrs[j].elementType
+ ej := a.addrs[j].elementIndex
+
+ var si, sj *descriptor.SourceCodeInfo_Location
+ if ei < 0 {
+ si = a.sourceInfo.Get(append(a.prefix, -int32(ei)))
+ } else if ti < 0 {
+ p := make([]int32, len(a.prefix)-2)
+ copy(p, a.prefix)
+ si = a.sourceInfo.Get(append(p, ti, int32(ei)))
+ } else {
+ si = a.sourceInfo.Get(append(a.prefix, ti, int32(ei)))
+ }
+ if ej < 0 {
+ sj = a.sourceInfo.Get(append(a.prefix, -int32(ej)))
+ } else if tj < 0 {
+ p := make([]int32, len(a.prefix)-2)
+ copy(p, a.prefix)
+ sj = a.sourceInfo.Get(append(p, tj, int32(ej)))
+ } else {
+ sj = a.sourceInfo.Get(append(a.prefix, tj, int32(ej)))
+ }
+
+ if (si == nil) != (sj == nil) {
+ // generally, we put unknown elements after known ones;
+ // except package and option elements go first
+
+ // i will be unknown and j will be known
+ swapped := false
+ if si != nil {
+ si, sj = sj, si
+ // no need to swap ti and tj because we don't use tj anywhere below
+ ti = tj
+ swapped = true
+ }
+ switch a.dsc.(type) {
+ case *desc.FileDescriptor:
+ if ti == internal.File_packageTag || ti == internal.File_optionsTag {
+ return !swapped
+ }
+ case *desc.MessageDescriptor:
+ if ti == internal.Message_optionsTag {
+ return !swapped
+ }
+ case *desc.EnumDescriptor:
+ if ti == internal.Enum_optionsTag {
+ return !swapped
+ }
+ case *desc.ServiceDescriptor:
+ if ti == internal.Service_optionsTag {
+ return !swapped
+ }
+ }
+ return swapped
+
+ } else if si == nil || sj == nil {
+ // let stable sort keep unknown elements in same relative order
+ return false
+ }
+
+ for idx := 0; idx < len(sj.Span); idx++ {
+ if idx >= len(si.Span) {
+ return true
+ }
+ if si.Span[idx] < sj.Span[idx] {
+ return true
+ }
+ if si.Span[idx] > sj.Span[idx] {
+ return false
+ }
+ }
+ return false
+}
+
+type optionsByName struct {
+ addrs []elementAddr
+ opts map[int32][]option
+}
+
+func (o optionsByName) Len() int {
+ return len(o.addrs)
+}
+
+func (o optionsByName) Less(i, j int) bool {
+ oi := o.opts[int32(o.addrs[i].elementIndex)]
+ oj := o.opts[int32(o.addrs[j].elementIndex)]
+ return optionLess(oi, oj)
+}
+
+func optionLess(i, j []option) bool {
+ ni := i[0].name
+ nj := j[0].name
+ if ni[0] != '(' && nj[0] == '(' {
+ return true
+ } else if ni[0] == '(' && nj[0] != '(' {
+ return false
+ }
+ return ni < nj
+}
+
+func (o optionsByName) Swap(i, j int) {
+ o.addrs[i], o.addrs[j] = o.addrs[j], o.addrs[i]
+}
+
+func (p *Printer) printElement(isDecriptor bool, si *descriptor.SourceCodeInfo_Location, w *writer, indent int, el func(*writer)) {
+ includeComments := isDecriptor || p.includeCommentType(CommentsTokens)
+
+ if includeComments && si != nil {
+ p.printLeadingComments(si, w, indent)
+ }
+ el(w)
+ if includeComments && si != nil {
+ p.printTrailingComments(si, w, indent)
+ }
+ if indent >= 0 && !w.newline {
+ // if we're not printing inline but element did not have trailing newline, add one now
+ fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printElementString(si *descriptor.SourceCodeInfo_Location, w *writer, indent int, str string) {
+ p.printElement(false, si, w, inline(indent), func(w *writer) {
+ fmt.Fprintf(w, "%s ", str)
+ })
+}
+
+func (p *Printer) includeCommentType(c CommentType) bool {
+ return (p.OmitComments & c) == 0
+}
+
+func (p *Printer) printLeadingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) bool {
+ endsInNewLine := false
+
+ if p.includeCommentType(CommentsDetached) {
+ for _, c := range si.GetLeadingDetachedComments() {
+ if p.printComment(c, w, indent, true) {
+ // if comment ended in newline, add another newline to separate
+ // this comment from the next
+ p.newLine(w)
+ endsInNewLine = true
+ } else if indent < 0 {
+ // comment did not end in newline and we are trying to inline?
+ // just add a space to separate this comment from what follows
+ fmt.Fprint(w, " ")
+ endsInNewLine = false
+ } else {
+ // comment did not end in newline and we are *not* trying to inline?
+ // add newline to end of comment and add another to separate this
+ // comment from what follows
+ fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ p.newLine(w)
+ endsInNewLine = true
+ }
+ }
+ }
+
+ if p.includeCommentType(CommentsLeading) && si.GetLeadingComments() != "" {
+ endsInNewLine = p.printComment(si.GetLeadingComments(), w, indent, true)
+ if !endsInNewLine {
+ if indent >= 0 {
+ // leading comment didn't end with newline but needs one
+ // (because we're *not* inlining)
+ fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ endsInNewLine = true
+ } else {
+ // space between comment and following element when inlined
+ fmt.Fprint(w, " ")
+ }
+ }
+ }
+
+ return endsInNewLine
+}
+
+func (p *Printer) printTrailingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) {
+ if p.includeCommentType(CommentsTrailing) && si.GetTrailingComments() != "" {
+ if !p.printComment(si.GetTrailingComments(), w, indent, p.TrailingCommentsOnSeparateLine) && indent >= 0 {
+ // trailing comment didn't end with newline but needs one
+ // (because we're *not* inlining)
+ fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ } else if indent < 0 {
+ fmt.Fprint(w, " ")
+ }
+ }
+}
+
+func (p *Printer) printComment(comments string, w *writer, indent int, forceNextLine bool) bool {
+ if comments == "" {
+ return false
+ }
+
+ var multiLine bool
+ if indent < 0 {
+ // use multi-line style when inlining
+ multiLine = true
+ } else {
+ multiLine = p.PreferMultiLineStyleComments
+ }
+ if multiLine && strings.Contains(comments, "*/") {
+ // can't emit '*/' in a multi-line style comment
+ multiLine = false
+ }
+
+ lines := strings.Split(comments, "\n")
+
+ // first, remove leading and trailing blank lines
+ if lines[0] == "" {
+ lines = lines[1:]
+ }
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ if len(lines) == 0 {
+ return false
+ }
+
+ if indent >= 0 && !w.newline {
+ // last element did not have trailing newline, so we
+ // either need to tack on newline or, if comment is
+ // just one line, inline it on the end
+ if forceNextLine || len(lines) > 1 {
+ fmt.Fprintln(w)
+ } else {
+ if !w.space {
+ fmt.Fprint(w, " ")
+ }
+ indent = inline(indent)
+ }
+ }
+
+ if len(lines) == 1 && multiLine {
+ p.indent(w, indent)
+ line := lines[0]
+ if line[0] == ' ' && line[len(line)-1] != ' ' {
+ // add trailing space for symmetry
+ line += " "
+ }
+ fmt.Fprintf(w, "/*%s*/", line)
+ if indent >= 0 {
+ fmt.Fprintln(w)
+ return true
+ }
+ return false
+ }
+
+ if multiLine {
+ // multi-line style comments that actually span multiple lines
+ // get a blank line before and after so that comment renders nicely
+ lines = append(lines, "", "")
+ copy(lines[1:], lines)
+ lines[0] = ""
+ }
+
+ for i, l := range lines {
+ p.maybeIndent(w, indent, i > 0)
+ if multiLine {
+ if i == 0 {
+ // first line
+ fmt.Fprintf(w, "/*%s\n", strings.TrimRight(l, " \t"))
+ } else if i == len(lines)-1 {
+ // last line
+ if l == "" {
+ fmt.Fprint(w, " */")
+ } else {
+ fmt.Fprintf(w, " *%s*/", l)
+ }
+ if indent >= 0 {
+ fmt.Fprintln(w)
+ }
+ } else {
+ fmt.Fprintf(w, " *%s\n", strings.TrimRight(l, " \t"))
+ }
+ } else {
+ fmt.Fprintf(w, "//%s\n", strings.TrimRight(l, " \t"))
+ }
+ }
+
+ // single-line comments always end in newline; multi-line comments only
+ // end in newline for non-negative (e.g. non-inlined) indentation
+ return !multiLine || indent >= 0
+}
+
+func (p *Printer) indent(w io.Writer, indent int) {
+ for i := 0; i < indent; i++ {
+ fmt.Fprint(w, p.Indent)
+ }
+}
+
+func (p *Printer) maybeIndent(w io.Writer, indent int, requireIndent bool) {
+ if indent < 0 && requireIndent {
+ p.indent(w, -indent)
+ } else {
+ p.indent(w, indent)
+ }
+}
+
+type writer struct {
+ io.Writer
+ err error
+ space bool
+ newline bool
+}
+
+func newWriter(w io.Writer) *writer {
+ return &writer{Writer: w, newline: true}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ w.newline = false
+
+ if w.space {
+ // skip any trailing space if the following
+ // character is semicolon, comma, or close bracket
+ if p[0] != ';' && p[0] != ',' && p[0] != ']' {
+ _, err := w.Writer.Write([]byte{' '})
+ if err != nil {
+ w.err = err
+ return 0, err
+ }
+ }
+ w.space = false
+ }
+
+ if p[len(p)-1] == ' ' {
+ w.space = true
+ p = p[:len(p)-1]
+ }
+ if len(p) > 0 && p[len(p)-1] == '\n' {
+ w.newline = true
+ }
+
+ num, err := w.Writer.Write(p)
+ if err != nil {
+ w.err = err
+ } else if w.space {
+ // pretend space was written
+ num++
+ }
+ return num, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
new file mode 100644
index 0000000..b1fbe7c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -0,0 +1,714 @@
+package dynamic
+
+// Binary serialization and de-serialization for dynamic messages
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// defaultDeterminism, if true, will mean that calls to Marshal will produce
+// deterministic output. This is used to make the output of proto.Marshal(...)
+// deterministic (since there is no way to have that convey determinism intent).
+// **This is only used from tests.**
+var defaultDeterminism = false
+
+// Marshal serializes this message to bytes, returning an error if the operation
+// fails. The resulting bytes are in the standard protocol buffer binary format.
+func (m *Message) Marshal() ([]byte, error) {
+ var b codedBuffer
+ if err := m.marshal(&b, defaultDeterminism); err != nil {
+ return nil, err
+ }
+ return b.buf, nil
+}
+
+// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a
+// new byte slice to marshal into, it uses the provided byte slice. The backing array
+// for the returned byte slice *may* be the same as the one that was passed in, but
+// it's not guaranteed as a new backing array will automatically be allocated if
+// more bytes need to be written than the provided buffer has capacity for.
+func (m *Message) MarshalAppend(b []byte) ([]byte, error) {
+ codedBuf := codedBuffer{buf: b}
+ if err := m.marshal(&codedBuf, defaultDeterminism); err != nil {
+ return nil, err
+ }
+ return codedBuf.buf, nil
+}
+
+// MarshalDeterministic serializes this message to bytes in a deterministic way,
+// returning an error if the operation fails. This differs from Marshal in that
+// map keys will be sorted before serializing to bytes. The protobuf spec does
+// not define ordering for map entries, so Marshal will use standard Go map
+// iteration order (which will be random). But for cases where determinism is
+// more important than performance, use this method instead.
+func (m *Message) MarshalDeterministic() ([]byte, error) {
+ var b codedBuffer
+ if err := m.marshal(&b, true); err != nil {
+ return nil, err
+ }
+ return b.buf, nil
+}
+
+func (m *Message) marshal(b *codedBuffer, deterministic bool) error {
+ if err := m.marshalKnownFields(b, deterministic); err != nil {
+ return err
+ }
+ return m.marshalUnknownFields(b)
+}
+
+func (m *Message) marshalKnownFields(b *codedBuffer, deterministic bool) error {
+ for _, tag := range m.knownFieldTags() {
+ itag := int32(tag)
+ val := m.values[itag]
+ fd := m.FindFieldDescriptor(itag)
+ if fd == nil {
+ panic(fmt.Sprintf("Couldn't find field for tag %d", itag))
+ }
+ if err := marshalField(itag, fd, val, b, deterministic); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *Message) marshalUnknownFields(b *codedBuffer) error {
+ for _, tag := range m.unknownFieldTags() {
+ itag := int32(tag)
+ sl := m.unknownFields[itag]
+ for _, u := range sl {
+ if err := b.encodeTagAndWireType(itag, u.Encoding); err != nil {
+ return err
+ }
+ switch u.Encoding {
+ case proto.WireBytes:
+ if err := b.encodeRawBytes(u.Contents); err != nil {
+ return err
+ }
+ case proto.WireStartGroup:
+ b.buf = append(b.buf, u.Contents...)
+ if err := b.encodeTagAndWireType(itag, proto.WireEndGroup); err != nil {
+ return err
+ }
+ case proto.WireFixed32:
+ if err := b.encodeFixed32(u.Value); err != nil {
+ return err
+ }
+ case proto.WireFixed64:
+ if err := b.encodeFixed64(u.Value); err != nil {
+ return err
+ }
+ case proto.WireVarint:
+ if err := b.encodeVarint(u.Value); err != nil {
+ return err
+ }
+ default:
+ return proto.ErrInternalBadWireType
+ }
+ }
+ }
+ return nil
+}
+
+func marshalField(tag int32, fd *desc.FieldDescriptor, val interface{}, b *codedBuffer, deterministic bool) error {
+ if fd.IsMap() {
+ mp := val.(map[interface{}]interface{})
+ entryType := fd.GetMessageType()
+ keyType := entryType.FindFieldByNumber(1)
+ valType := entryType.FindFieldByNumber(2)
+ var entryBuffer codedBuffer
+ if deterministic {
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ for _, k := range keys {
+ v := mp[k]
+ entryBuffer.reset()
+ if err := marshalFieldElement(1, keyType, k, &entryBuffer, deterministic); err != nil {
+ return err
+ }
+ if err := marshalFieldElement(2, valType, v, &entryBuffer, deterministic); err != nil {
+ return err
+ }
+ if err := b.encodeTagAndWireType(tag, proto.WireBytes); err != nil {
+ return err
+ }
+ if err := b.encodeRawBytes(entryBuffer.buf); err != nil {
+ return err
+ }
+ }
+ } else {
+ for k, v := range mp {
+ entryBuffer.reset()
+ if err := marshalFieldElement(1, keyType, k, &entryBuffer, deterministic); err != nil {
+ return err
+ }
+ if err := marshalFieldElement(2, valType, v, &entryBuffer, deterministic); err != nil {
+ return err
+ }
+ if err := b.encodeTagAndWireType(tag, proto.WireBytes); err != nil {
+ return err
+ }
+ if err := b.encodeRawBytes(entryBuffer.buf); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ } else if fd.IsRepeated() {
+ sl := val.([]interface{})
+ wt, err := getWireType(fd.GetType())
+ if err != nil {
+ return err
+ }
+ if isPacked(fd) && len(sl) > 1 &&
+ (wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) {
+ // packed repeated field
+ var packedBuffer codedBuffer
+ for _, v := range sl {
+ if err := marshalFieldValue(fd, v, &packedBuffer, deterministic); err != nil {
+ return err
+ }
+ }
+ if err := b.encodeTagAndWireType(tag, proto.WireBytes); err != nil {
+ return err
+ }
+ return b.encodeRawBytes(packedBuffer.buf)
+ } else {
+ // non-packed repeated field
+ for _, v := range sl {
+ if err := marshalFieldElement(tag, fd, v, b, deterministic); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ } else {
+ return marshalFieldElement(tag, fd, val, b, deterministic)
+ }
+}
+
+func isPacked(fd *desc.FieldDescriptor) bool {
+ opts := fd.AsFieldDescriptorProto().GetOptions()
+ // if set, use that value
+ if opts != nil && opts.Packed != nil {
+ return opts.GetPacked()
+ }
+ // if unset: proto2 defaults to false, proto3 to true
+ return fd.GetFile().IsProto3()
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+ return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+ vi := s[i]
+ vj := s[j]
+ switch reflect.TypeOf(vi).Kind() {
+ case reflect.Int32:
+ return vi.(int32) < vj.(int32)
+ case reflect.Int64:
+ return vi.(int64) < vj.(int64)
+ case reflect.Uint32:
+ return vi.(uint32) < vj.(uint32)
+ case reflect.Uint64:
+ return vi.(uint64) < vj.(uint64)
+ case reflect.String:
+ return vi.(string) < vj.(string)
+ case reflect.Bool:
+ return vi.(bool) && !vj.(bool)
+ default:
+ panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+ }
+}
+
+func (s sortable) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func marshalFieldElement(tag int32, fd *desc.FieldDescriptor, val interface{}, b *codedBuffer, deterministic bool) error {
+ wt, err := getWireType(fd.GetType())
+ if err != nil {
+ return err
+ }
+ if err := b.encodeTagAndWireType(tag, wt); err != nil {
+ return err
+ }
+ if err := marshalFieldValue(fd, val, b, deterministic); err != nil {
+ return err
+ }
+ if wt == proto.WireStartGroup {
+ return b.encodeTagAndWireType(tag, proto.WireEndGroup)
+ }
+ return nil
+}
+
+func marshalFieldValue(fd *desc.FieldDescriptor, val interface{}, b *codedBuffer, deterministic bool) error {
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ v := val.(bool)
+ if v {
+ return b.encodeVarint(1)
+ } else {
+ return b.encodeVarint(0)
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_ENUM,
+ descriptor.FieldDescriptorProto_TYPE_INT32:
+ v := val.(int32)
+ return b.encodeVarint(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ v := val.(int32)
+ return b.encodeFixed32(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ v := val.(int32)
+ return b.encodeVarint(encodeZigZag32(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ v := val.(uint32)
+ return b.encodeVarint(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ v := val.(uint32)
+ return b.encodeFixed32(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ v := val.(int64)
+ return b.encodeVarint(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ v := val.(int64)
+ return b.encodeFixed64(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ v := val.(int64)
+ return b.encodeVarint(encodeZigZag64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ v := val.(uint64)
+ return b.encodeVarint(v)
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ v := val.(uint64)
+ return b.encodeFixed64(v)
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ v := val.(float64)
+ return b.encodeFixed64(math.Float64bits(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ v := val.(float32)
+ return b.encodeFixed32(uint64(math.Float32bits(v)))
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ v := val.([]byte)
+ return b.encodeRawBytes(v)
+
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ v := val.(string)
+ return b.encodeRawBytes(([]byte)(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ m := val.(proto.Message)
+ if bytes, err := proto.Marshal(m); err != nil {
+ return err
+ } else {
+ return b.encodeRawBytes(bytes)
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ // just append the nested message to this buffer
+ dm, ok := val.(*Message)
+ if ok {
+ return dm.marshal(b, deterministic)
+ } else {
+ m := val.(proto.Message)
+ return b.encodeMessage(m)
+ }
+ // whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
+
+ default:
+ return fmt.Errorf("unrecognized field type: %v", fd.GetType())
+ }
+}
+
+func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+ switch t {
+ case descriptor.FieldDescriptorProto_TYPE_ENUM,
+ descriptor.FieldDescriptorProto_TYPE_BOOL,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64:
+ return proto.WireVarint, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ return proto.WireFixed32, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return proto.WireFixed64, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES,
+ descriptor.FieldDescriptorProto_TYPE_STRING,
+ descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return proto.WireBytes, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ return proto.WireStartGroup, nil
+
+ default:
+ return 0, proto.ErrInternalBadWireType
+ }
+}
+
+// Unmarshal de-serializes the message that is present in the given bytes into
+// this message. It first resets the current message. It returns an error if the
+// given bytes do not contain a valid encoding of this message type.
+func (m *Message) Unmarshal(b []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMerge(b); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMerge de-serializes the message that is present in the given bytes
+// into this message. Unlike Unmarshal, it does not first reset the message,
+// instead merging the data in the given bytes into the existing data in this
+// message.
+func (m *Message) UnmarshalMerge(b []byte) error {
+ return m.unmarshal(newCodedBuffer(b), false)
+}
+
+func (m *Message) unmarshal(buf *codedBuffer, isGroup bool) error {
+ for !buf.eof() {
+ tagNumber, wireType, err := buf.decodeTagAndWireType()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireEndGroup {
+ if isGroup {
+ // finished parsing group
+ return nil
+ } else {
+ return proto.ErrInternalBadWireType
+ }
+ }
+ fd := m.FindFieldDescriptor(tagNumber)
+ if fd == nil {
+ err := m.unmarshalUnknownField(tagNumber, wireType, buf)
+ if err != nil {
+ return err
+ }
+ } else {
+ err := m.unmarshalKnownField(fd, wireType, buf)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if isGroup {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func unmarshalSimpleField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return v != 0, nil
+ case descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ if v > math.MaxUint32 {
+ return nil, NumericOverflowError
+ }
+ return uint32(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM:
+ s := int64(v)
+ if s > math.MaxInt32 || s < math.MinInt32 {
+ return nil, NumericOverflowError
+ }
+ return int32(s), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ if v > math.MaxUint32 {
+ return nil, NumericOverflowError
+ }
+ return int32(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ if v > math.MaxUint32 {
+ return nil, NumericOverflowError
+ }
+ return decodeZigZag32(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ return v, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ return int64(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return decodeZigZag64(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ if v > math.MaxUint32 {
+ return nil, NumericOverflowError
+ }
+ return math.Float32frombits(uint32(v)), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return math.Float64frombits(v), nil
+
+ default:
+ // bytes, string, message, and group cannot be represented as a simple numeric value
+ return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName())
+ }
+}
+
+func unmarshalLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf *MessageFactory) (interface{}, error) {
+ switch {
+ case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return bytes, nil
+
+ case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+ return string(bytes), nil
+
+ case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
+ fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+ msg := mf.NewMessage(fd.GetMessageType())
+ err := proto.Unmarshal(bytes, msg)
+ if err != nil {
+ return nil, err
+ } else {
+ return msg, nil
+ }
+
+ default:
+ // even if the field is not repeated or not packed, we still parse it as such for
+ // backwards compatibility (e.g. message we are de-serializing could have been both
+ // repeated and packed at the time of serialization)
+ packedBuf := newCodedBuffer(bytes)
+ var slice []interface{}
+ var val interface{}
+ for !packedBuf.eof() {
+ var v uint64
+ var err error
+ if varintTypes[fd.GetType()] {
+ v, err = packedBuf.decodeVarint()
+ } else if fixed32Types[fd.GetType()] {
+ v, err = packedBuf.decodeFixed32()
+ } else if fixed64Types[fd.GetType()] {
+ v, err = packedBuf.decodeFixed64()
+ } else {
+ return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName())
+ }
+ if err != nil {
+ return nil, err
+ }
+ val, err = unmarshalSimpleField(fd, v)
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsRepeated() {
+ slice = append(slice, val)
+ }
+ }
+ if fd.IsRepeated() {
+ return slice, nil
+ } else {
+ // if not a repeated field, last value wins
+ return val, nil
+ }
+ }
+}
+
+func (m *Message) unmarshalKnownField(fd *desc.FieldDescriptor, encoding int8, b *codedBuffer) error {
+ var val interface{}
+ var err error
+ switch encoding {
+ case proto.WireFixed32:
+ var num uint64
+ num, err = b.decodeFixed32()
+ if err == nil {
+ val, err = unmarshalSimpleField(fd, num)
+ }
+ case proto.WireFixed64:
+ var num uint64
+ num, err = b.decodeFixed64()
+ if err == nil {
+ val, err = unmarshalSimpleField(fd, num)
+ }
+ case proto.WireVarint:
+ var num uint64
+ num, err = b.decodeVarint()
+ if err == nil {
+ val, err = unmarshalSimpleField(fd, num)
+ }
+
+ case proto.WireBytes:
+ if fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES {
+ val, err = b.decodeRawBytes(true) // defensive copy
+ } else if fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING {
+ var raw []byte
+ raw, err = b.decodeRawBytes(true) // defensive copy
+ if err == nil {
+ val = string(raw)
+ }
+ } else {
+ var raw []byte
+ raw, err = b.decodeRawBytes(false)
+ if err == nil {
+ val, err = unmarshalLengthDelimitedField(fd, raw, m.mf)
+ }
+ }
+
+ case proto.WireStartGroup:
+ if fd.GetMessageType() == nil {
+ return fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName())
+ }
+ msg := m.mf.NewMessage(fd.GetMessageType())
+ if dm, ok := msg.(*Message); ok {
+ err = dm.unmarshal(b, true)
+ if err == nil {
+ val = dm
+ }
+ } else {
+ var groupEnd, dataEnd int
+ groupEnd, dataEnd, err = skipGroup(b)
+ if err == nil {
+ err = proto.Unmarshal(b.buf[b.index:dataEnd], msg)
+ if err == nil {
+ val = msg
+ }
+ b.index = groupEnd
+ }
+ }
+
+ default:
+ return proto.ErrInternalBadWireType
+ }
+ if err != nil {
+ return err
+ }
+
+ return mergeField(m, fd, val)
+}
+
+func (m *Message) unmarshalUnknownField(tagNumber int32, encoding int8, b *codedBuffer) error {
+ u := UnknownField{Encoding: encoding}
+ var err error
+ switch encoding {
+ case proto.WireFixed32:
+ u.Value, err = b.decodeFixed32()
+ case proto.WireFixed64:
+ u.Value, err = b.decodeFixed64()
+ case proto.WireVarint:
+ u.Value, err = b.decodeVarint()
+ case proto.WireBytes:
+ u.Contents, err = b.decodeRawBytes(true)
+ case proto.WireStartGroup:
+ var groupEnd, dataEnd int
+ groupEnd, dataEnd, err = skipGroup(b)
+ if err == nil {
+ u.Contents = make([]byte, dataEnd-b.index)
+ copy(u.Contents, b.buf[b.index:])
+ b.index = groupEnd
+ }
+ default:
+ err = proto.ErrInternalBadWireType
+ }
+ if err != nil {
+ return err
+ }
+ if m.unknownFields == nil {
+ m.unknownFields = map[int32][]UnknownField{}
+ }
+ m.unknownFields[tagNumber] = append(m.unknownFields[tagNumber], u)
+ return nil
+}
+
+func skipGroup(b *codedBuffer) (int, int, error) {
+ bs := b.buf
+ start := b.index
+ defer func() {
+ b.index = start
+ }()
+ for {
+ fieldStart := b.index
+ // read a field tag
+ _, wireType, err := b.decodeTagAndWireType()
+ if err != nil {
+ return 0, 0, err
+ }
+ // skip past the field's data
+ switch wireType {
+ case proto.WireFixed32:
+ if !b.skip(4) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ case proto.WireFixed64:
+ if !b.skip(8) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ case proto.WireVarint:
+ // skip varint by finding last byte (has high bit unset)
+ i := b.index
+ for {
+ if i >= len(bs) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ if bs[i]&0x80 == 0 {
+ break
+ }
+ i++
+ }
+ b.index = i + 1
+ case proto.WireBytes:
+ l, err := b.decodeVarint()
+ if err != nil {
+ return 0, 0, err
+ }
+ if !b.skip(int(l)) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ case proto.WireStartGroup:
+ endIndex, _, err := skipGroup(b)
+ if err != nil {
+ return 0, 0, err
+ }
+ b.index = endIndex
+ case proto.WireEndGroup:
+ return b.index, fieldStart, nil
+ default:
+ return 0, 0, proto.ErrInternalBadWireType
+ }
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/codec.go b/vendor/github.com/jhump/protoreflect/dynamic/codec.go
new file mode 100644
index 0000000..9d70ab7
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/codec.go
@@ -0,0 +1,350 @@
+package dynamic
+
+// A reader/writer type that assists with encoding and decoding protobuf's binary representation.
+// This code is largely a fork of proto.Buffer, which cannot be used because it has no exported
+// field or method that provides access to its underlying reader index.
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = errors.New("proto: integer overflow")
+
+type codedBuffer struct {
+ buf []byte
+ index int
+}
+
+func newCodedBuffer(buf []byte) *codedBuffer {
+ return &codedBuffer{buf: buf}
+}
+
+func (cb *codedBuffer) reset() {
+ cb.buf = []byte(nil)
+ cb.index = 0
+}
+
+func (cb *codedBuffer) eof() bool {
+ return cb.index >= len(cb.buf)
+}
+
+func (cb *codedBuffer) skip(count int) bool {
+ newIndex := cb.index + count
+ if newIndex > len(cb.buf) {
+ return false
+ }
+ cb.index = newIndex
+ return true
+}
+
+func (cb *codedBuffer) decodeVarintSlow() (x uint64, err error) {
+ i := cb.index
+ l := len(cb.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := cb.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ cb.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = ErrOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *codedBuffer) decodeVarint() (uint64, error) {
+ i := cb.index
+ buf := cb.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ cb.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return cb.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x := uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, ErrOverflow
+
+done:
+ cb.index = i
+ return x, nil
+}
+
+func (cb *codedBuffer) decodeTagAndWireType() (tag int32, wireType int8, err error) {
+ var v uint64
+ v, err = cb.decodeVarint()
+ if err != nil {
+ return
+ }
+ // low 7 bits is wire type
+ wireType = int8(v & 7)
+ // rest is int32 tag number
+ v = v >> 3
+ if v > math.MaxInt32 {
+ err = fmt.Errorf("tag number out of range: %d", v)
+ return
+ }
+ tag = int32(v)
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *codedBuffer) decodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := cb.index + 8
+ if i < 0 || i > len(cb.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ cb.index = i
+
+ x = uint64(cb.buf[i-8])
+ x |= uint64(cb.buf[i-7]) << 8
+ x |= uint64(cb.buf[i-6]) << 16
+ x |= uint64(cb.buf[i-5]) << 24
+ x |= uint64(cb.buf[i-4]) << 32
+ x |= uint64(cb.buf[i-3]) << 40
+ x |= uint64(cb.buf[i-2]) << 48
+ x |= uint64(cb.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *codedBuffer) decodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := cb.index + 4
+ if i < 0 || i > len(cb.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ cb.index = i
+
+ x = uint64(cb.buf[i-4])
+ x |= uint64(cb.buf[i-3]) << 8
+ x |= uint64(cb.buf[i-2]) << 16
+ x |= uint64(cb.buf[i-1]) << 24
+ return
+}
+
+func decodeZigZag32(v uint64) int32 {
+ return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31))
+}
+
+func decodeZigZag64(v uint64) int64 {
+ return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *codedBuffer) decodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := cb.decodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := cb.index + nb
+ if end < cb.index || end > len(cb.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ buf = cb.buf[cb.index:end]
+ cb.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, cb.buf[cb.index:])
+ cb.index += nb
+ return
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *codedBuffer) encodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ cb.buf = append(cb.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ cb.buf = append(cb.buf, uint8(x))
+ return nil
+}
+
+func (cb *codedBuffer) encodeTagAndWireType(tag int32, wireType int8) error {
+ v := uint64((int64(tag) << 3) | int64(wireType))
+ return cb.encodeVarint(v)
+}
+
+// TODO: decodeTagAndWireType
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *codedBuffer) encodeFixed64(x uint64) error {
+ cb.buf = append(cb.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *codedBuffer) encodeFixed32(x uint64) error {
+ cb.buf = append(cb.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func encodeZigZag64(v int64) uint64 {
+ return (uint64(v) << 1) ^ uint64(v>>63)
+}
+
+func encodeZigZag32(v int32) uint64 {
+ return uint64((uint32(v) << 1) ^ uint32((v >> 31)))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *codedBuffer) encodeRawBytes(b []byte) error {
+ cb.encodeVarint(uint64(len(b)))
+ cb.buf = append(cb.buf, b...)
+ return nil
+}
+
+func (cb *codedBuffer) encodeMessage(pm proto.Message) error {
+ bytes, err := proto.Marshal(pm)
+ if err != nil {
+ return err
+ }
+ if len(bytes) == 0 {
+ return nil
+ }
+
+ if err := cb.encodeVarint(uint64(len(bytes))); err != nil {
+ return err
+ }
+ cb.buf = append(cb.buf, bytes...)
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
new file mode 100644
index 0000000..c329fcd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -0,0 +1,163 @@
+// Package dynamic provides an implementation for a dynamic protobuf message.
+//
+// The dynamic message is essentially a message descriptor along with a map of
+// tag numbers to values. It has a broad API for interacting with the message,
+// including inspection and modification. Generally, most operations have two
+// forms: a regular method that panics on bad input or error and a "Try" form
+// of the method that will instead return an error.
+//
+// A dynamic message can optionally be constructed with a MessageFactory. The
+// MessageFactory has various registries that may be used by the dynamic message,
+// such as during de-serialization. The message factory is "inherited" by any
+// other dynamic messages created, such as nested messages that are created
+// during de-serialization. Similarly, any dynamic message created using
+// MessageFactory.NewMessage will be associated with that factory, which in turn
+// will be used to create other messages or parse extension fields during
+// de-serialization.
+//
+//
+// Field Types
+//
+// The types of values expected by setters and returned by getters are the
+// same as protoc generates for scalar fields. For repeated fields, there are
+// methods for getting and setting values at a particular index or for adding
+// an element. Similarly, for map fields, there are methods for getting and
+// setting values for a particular key.
+//
+// If you use GetField for a repeated field, it will return a copy of all
+// elements as a slice []interface{}. Similarly, using GetField for a map field
+// will return a copy of all mappings as a map[interface{}]interface{}. You can
+// also use SetField to supply an entire slice or map for repeated or map fields.
+// The slice need not be []interface{} but can actually be typed according to
+// the field's expected type. For example, a repeated uint64 field can be set
+// using a slice of type []uint64.
+//
+// Descriptors for map fields describe them as repeated fields with a nested
+// message type. The nested message type is a special generated type that
+// represents a single mapping: key and value pair. The dynamic message has some
+// special affordances for this representation. For example, you can use
+// SetField to set a map field using a slice of these entry messages. Internally,
+// the slice of entries will be converted to an actual map. Similarly, you can
+// use AddRepeatedField with an entry message to add (or overwrite) a mapping.
+// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps,
+// since those take numeric index arguments which are not relevant to maps
+// (since maps in Go have no defined ordering).
+//
+// When setting field values in dynamic messages, the type-checking is lenient
+// in that it accepts any named type with the right kind. So a string field can
+// be assigned to any type that is defined as a string. Enum fields require
+// int32 values (or any type that is defined as an int32).
+//
+// Unlike normal use of numeric values in Go, values will be automatically
+// widened when assigned. So, for example, an int64 field can be set using an
+// int32 value since it can be safely widened without truncation or loss of
+// precision. Similar goes for uint32 values being converted to uint64 and
+// float32 being converted to float64. Narrowing conversions are not done,
+// however. Also, unsigned values will never be automatically converted to
+// signed (and vice versa), and floating point values will never be
+// automatically converted to integral values (and vice versa). Since the bit
+// width of int and uint fields is allowed to be platform dependent, but will
+// always be less than or equal to 64, they can only be used as values for
+// int64 and uint64 fields, respectively. They cannot be used to set int32 or
+// uint32 fields, which includes enums fields.
+//
+// Fields whose type is a nested message can have values set to either other
+// dynamic messages or generated messages (e.g. pointers to structs generated by
+// protoc). Getting a value for such a field will return the actual type it is
+// set to (e.g. either a dynamic message or a generated message). If the value
+// is not set and the message uses proto2 syntax, the default message returned
+// will be whatever is returned by the dynamic message's MessageFactory (if the
+// dynamic message was not created with a factory, it will use the logic of the
+// zero value factory). In most typical cases, it will return a dynamic message,
+// but if the factory is configured with a KnownTypeRegistry, or if the field's
+// type is a well-known type, it will return a zero value generated message.
+//
+//
+// Unrecognized Fields
+//
+// Unrecognized fields are preserved by the dynamic message when unmarshaling
+// from the standard binary format. If the message's MessageFactory was
+// configured with an ExtensionRegistry, it will be used to identify and parse
+// extension fields for the message.
+//
+// Unrecognized fields can dynamically become recognized fields if the
+// application attempts to retrieve an unrecognized field's value using a
+// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the
+// unknown field and move the parsed value into the message's set of known
+// fields. This behavior is most suited to the use of extensions, where an
+// ExtensionRegistry is not setup with all known extensions ahead of time. But
+// it can even happen for non-extension fields! Here's an example scenario where
+// a non-extension field can initially be unknown and become known:
+//
+// 1. A dynamic message is created with a descriptor, A, and then
+// de-serialized from a stream of bytes. The stream includes an
+// unrecognized tag T. The message will include tag T in its unrecognized
+// field set.
+// 2. Another call site retrieves a newer descriptor, A', which includes a
+// newly added field with tag T.
+// 3. That other call site then uses a FieldDescriptor to access the value of
+// the new field. This will cause the dynamic message to parse the bytes
+// for the unknown tag T and store them as a known field.
+// 4. Subsequent operations for tag T, including setting the field using only
+// tag number or de-serializing a stream that includes tag T, will operate
+// as if that tag were part of the original descriptor, A.
+//
+//
+// Compatibility
+//
+// In addition to implementing the proto.Message interface, the included
+// Message type also provides an XXX_MessageName() method, so it can work with
+// proto.MessageName. And it provides a Descriptor() method that behaves just
+// like the method of the same signature in messages generated by protoc.
+// Because of this, it is actually compatible with proto.Message in many (though
+// not all) contexts. In particular, it is compatible with proto.Marshal and
+// proto.Unmarshal for serializing and de-serializing messages.
+//
+// The dynamic message supports binary and text marshaling, using protobuf's
+// well-defined binary format and the same text format that protoc-generated
+// types use. It also supports JSON serialization/de-serialization by
+// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic
+// messages can safely be used with the jsonpb package for JSON serialization
+// and de-serialization.
+//
+// In addition to implementing the proto.Message interface and numerous related
+// methods, it also provides inter-op with generated messages via conversion.
+// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message
+// contents from a dynamic message to a generated message and vice versa.
+//
+// When copying from a generated message into a dynamic message, if the
+// generated message contains fields unknown to the dynamic message (e.g. not
+// present in the descriptor used to create the dynamic message), these fields
+// become known to the dynamic message (as per behavior described above in
+// "Unrecognized Fields"). If the generated message has unrecognized fields of
+// its own, including unrecognized extensions, they are preserved in the dynamic
+// message. It is possible that the dynamic message knows about fields that the
+// generated message did not, like if it has a different version of the
+// descriptor or its MessageFactory has an ExtensionRegistry that knows about
+// different extensions than were linked into the program. In this case, these
+// unrecognized fields in the generated message will be known fields in the
+// dynamic message.
+//
+// Similarly, when copying from a dynamic message into a generated message, if
+// the dynamic message has unrecognized fields they can be preserved in the
+// generated message (currently only for syntax proto2 since proto3 generated
+// messages do not preserve unrecognized fields). If the generated message knows
+// about fields that the dynamic message does not, these unrecognized fields may
+// become known fields in the generated message.
+//
+//
+// Registries
+//
+// This package also contains a couple of registries, for managing known types
+// and descriptors.
+//
+// The KnownTypeRegistry allows de-serialization of a dynamic message to use
+// generated message types, instead of dynamic messages, for some kinds of
+// nested message fields. This is particularly useful for working with proto
+// messages that have special encodings as JSON (e.g. the well-known types),
+// since the dynamic message does not try to handle these special cases in its
+// JSON marshaling facilities.
+//
+// The ExtensionRegistry allows for recognizing and parsing extensions fields
+// (for proto2 messages).
+package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
new file mode 100644
index 0000000..ac7e52f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -0,0 +1,2710 @@
+package dynamic
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ErrUnknownTagNumber is an error that is returned when an operation refers
+// to an unknown tag number.
+var ErrUnknownTagNumber = errors.New("unknown tag number")
+
+// UnknownTagNumberError is the same as ErrUnknownTagNumber.
+// Deprecated: use ErrUnknownTagNumber
+var UnknownTagNumberError = ErrUnknownTagNumber
+
+// ErrUnknownFieldName is an error that is returned when an operation refers
+// to an unknown field name.
+var ErrUnknownFieldName = errors.New("unknown field name")
+
+// UnknownFieldNameError is the same as ErrUnknownFieldName.
+// Deprecated: use ErrUnknownFieldName
+var UnknownFieldNameError = ErrUnknownFieldName
+
+// ErrFieldIsNotMap is an error that is returned when map-related operations
+// are attempted with fields that are not maps.
+var ErrFieldIsNotMap = errors.New("field is not a map type")
+
+// FieldIsNotMapError is the same as ErrFieldIsNotMap.
+// Deprecated: use ErrFieldIsNotMap
+var FieldIsNotMapError = ErrFieldIsNotMap
+
+// ErrFieldIsNotRepeated is an error that is returned when repeated field
+// operations are attempted with fields that are not repeated.
+var ErrFieldIsNotRepeated = errors.New("field is not repeated")
+
+// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated.
+// Deprecated: use ErrFieldIsNotRepeated
+var FieldIsNotRepeatedError = ErrFieldIsNotRepeated
+
+// ErrIndexOutOfRange is an error that is returned when an invalid index is
+// provided when access a single element of a repeated field.
+var ErrIndexOutOfRange = errors.New("index is out of range")
+
+// IndexOutOfRangeError is the same as ErrIndexOutOfRange.
+// Deprecated: use ErrIndexOutOfRange
+var IndexOutOfRangeError = ErrIndexOutOfRange
+
+// ErrNumericOverflow is an error returned by operations that encounter a
+// numeric value that is too large, for example de-serializing a value into an
+// int32 field when the value is larger that can fit into a 32-bit value.
+var ErrNumericOverflow = errors.New("numeric value is out of range")
+
+// NumericOverflowError is the same as ErrNumericOverflow.
+// Deprecated: use ErrNumericOverflow
+var NumericOverflowError = ErrNumericOverflow
+
+var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil))
+var typeOfBytes = reflect.TypeOf(([]byte)(nil))
+
+var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+
+func init() {
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+
+ fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
+ fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
+ fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+
+ fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
+ fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
+ fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+}
+
+// Message is a dynamic protobuf message. Instead of a generated struct,
+// like most protobuf messages, this is a map of field number to values and
+// a message descriptor, which is used to validate the field values and
+// also to de-serialize messages (from the standard binary format, as well
+// as from the text format and from JSON).
+type Message struct {
+ md *desc.MessageDescriptor
+ er *ExtensionRegistry
+ mf *MessageFactory
+ extraFields map[int32]*desc.FieldDescriptor
+ values map[int32]interface{}
+ unknownFields map[int32][]UnknownField
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+ // Encoding indicates how the unknown field was encoded on the wire. If it
+ // is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+ // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+ // significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+ // Value.
+ Encoding int8
+ Contents []byte
+ Value uint64
+}
+
+// NewMessage creates a new dynamic message for the type represented by the given
+// message descriptor. During de-serialization, a default MessageFactory is used to
+// instantiate any nested message fields and no extension fields will be parsed. To
+// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage.
+func NewMessage(md *desc.MessageDescriptor) *Message {
+ return NewMessageWithMessageFactory(md, nil)
+}
+
+// NewMessageWithExtensionRegistry creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// ExtensionRegistry is used to parse extension fields and nested messages will be
+// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er).
+func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message {
+ mf := NewMessageFactoryWithExtensionRegistry(er)
+ return NewMessageWithMessageFactory(md, mf)
+}
+
+// NewMessageWithMessageFactory creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// MessageFactory is used to instantiate nested messages.
+func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message {
+ var er *ExtensionRegistry
+ if mf != nil {
+ er = mf.er
+ }
+ return &Message{
+ md: md,
+ mf: mf,
+ er: er,
+ }
+}
+
+// AsDynamicMessage converts the given message to a dynamic message. If the
+// given message is dynamic, it is returned. Otherwise, a dynamic message is
+// created using NewMessage.
+func AsDynamicMessage(msg proto.Message) (*Message, error) {
+ return AsDynamicMessageWithMessageFactory(msg, nil)
+}
+
+// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithExtensionRegistry.
+func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) {
+ mf := NewMessageFactoryWithExtensionRegistry(er)
+ return AsDynamicMessageWithMessageFactory(msg, mf)
+}
+
+// AsDynamicMessageWithMessageFactory converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithMessageFactory.
+func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) {
+ if dm, ok := msg.(*Message); ok {
+ return dm, nil
+ }
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return nil, err
+ }
+ dm := NewMessageWithMessageFactory(md, mf)
+ err = dm.mergeFrom(msg)
+ if err != nil {
+ return nil, err
+ }
+ return dm, nil
+}
+
+// GetMessageDescriptor returns a descriptor for this message's type.
+func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor {
+ return m.md
+}
+
+// GetKnownFields returns a slice of descriptors for all known fields. The
+// fields will not be in any defined order.
+func (m *Message) GetKnownFields() []*desc.FieldDescriptor {
+ if len(m.extraFields) == 0 {
+ return m.md.GetFields()
+ }
+ flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields))
+ copy(flds, m.md.GetFields())
+ for _, fld := range m.extraFields {
+ if !fld.IsExtension() {
+ flds = append(flds, fld)
+ }
+ }
+ return flds
+}
+
+// GetKnownExtensions returns a slice of descriptors for all extensions known by
+// the message's extension registry. The fields will not be in any defined order.
+func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor {
+ if !m.md.IsExtendable() {
+ return nil
+ }
+ exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName())
+ for _, fld := range m.extraFields {
+ if fld.IsExtension() {
+ exts = append(exts, fld)
+ }
+ }
+ return exts
+}
+
+// GetUnknownFields returns a slice of tag numbers for all unknown fields that
+// this message contains. The tags will not be in any defined order.
+func (m *Message) GetUnknownFields() []int32 {
+ flds := make([]int32, 0, len(m.unknownFields))
+ for tag := range m.unknownFields {
+ flds = append(flds, tag)
+ }
+ return flds
+}
+
+// Descriptor returns the serialized form of the file descriptor in which the
+// message was defined and a path to the message type therein. This mimics the
+// method of the same name on message types generated by protoc.
+func (m *Message) Descriptor() ([]byte, []int) {
+ // get encoded file descriptor
+ b, err := proto.Marshal(m.md.GetFile().AsProto())
+ if err != nil {
+ panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+ var zippedBytes bytes.Buffer
+ w := gzip.NewWriter(&zippedBytes)
+ if _, err := w.Write(b); err != nil {
+ panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+ if err := w.Close(); err != nil {
+ panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+
+ // and path to message
+ path := []int{}
+ var d desc.Descriptor
+ name := m.md.GetFullyQualifiedName()
+ for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() {
+ found := false
+ switch d := d.(type) {
+ case (*desc.FileDescriptor):
+ for i, md := range d.GetMessageTypes() {
+ if md.GetFullyQualifiedName() == name {
+ found = true
+ path = append(path, i)
+ }
+ }
+ case (*desc.MessageDescriptor):
+ for i, md := range d.GetNestedMessageTypes() {
+ if md.GetFullyQualifiedName() == name {
+ found = true
+ path = append(path, i)
+ }
+ }
+ }
+ if !found {
+ panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName()))
+ }
+ }
+ // reverse the path
+ i := 0
+ j := len(path) - 1
+ for i < j {
+ path[i], path[j] = path[j], path[i]
+ i++
+ j--
+ }
+
+ return zippedBytes.Bytes(), path
+}
+
+// XXX_MessageName returns the fully qualified name of this message's type. This
+// allows dynamic messages to be used with proto.MessageName.
+func (m *Message) XXX_MessageName() string {
+ return m.md.GetFullyQualifiedName()
+}
+
+// FindFieldDescriptor returns a field descriptor for the given tag number. This
+// searches known fields in the descriptor, known fields discovered during calls
+// to GetField or SetField, and extension fields known by the message's extension
+// registry. It returns nil if the tag is unknown.
+func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor {
+ fd := m.md.FindFieldByNumber(tagNumber)
+ if fd != nil {
+ return fd
+ }
+ fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber)
+ if fd != nil {
+ return fd
+ }
+ return m.extraFields[tagNumber]
+}
+
+// FindFieldDescriptorByName returns a field descriptor for the given field
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. It returns nil if the name is unknown. If the
+// given name refers to an extension, it should be fully qualified and may be
+// optionally enclosed in parentheses or brackets.
+func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor {
+ if name == "" {
+ return nil
+ }
+ fd := m.md.FindFieldByName(name)
+ if fd != nil {
+ return fd
+ }
+ mustBeExt := false
+ if name[0] == '(' {
+ if name[len(name)-1] != ')' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ } else if name[0] == '[' {
+ if name[len(name)-1] != ']' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ }
+ fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name)
+ if fd != nil {
+ return fd
+ }
+ for _, fd := range m.extraFields {
+ if fd.IsExtension() && name == fd.GetFullyQualifiedName() {
+ return fd
+ } else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() {
+ return fd
+ }
+ }
+
+ return nil
+}
+
+// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. If no field matches the given JSON name, it
+// will fall back to searching field names (e.g. FindFieldDescriptorByName). If
+// this also yields no match, nil is returned.
+func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor {
+ if name == "" {
+ return nil
+ }
+ fd := m.md.FindFieldByJSONName(name)
+ if fd != nil {
+ return fd
+ }
+ mustBeExt := false
+ if name[0] == '(' {
+ if name[len(name)-1] != ')' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ } else if name[0] == '[' {
+ if name[len(name)-1] != ']' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ }
+ fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name)
+ if fd != nil {
+ return fd
+ }
+ for _, fd := range m.extraFields {
+ if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() {
+ return fd
+ } else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() {
+ return fd
+ }
+ }
+
+ // try non-JSON names
+ return m.FindFieldDescriptorByName(name)
+}
+
+func (m *Message) checkField(fd *desc.FieldDescriptor) error {
+ return checkField(fd, m.md)
+}
+
+func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error {
+ if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() {
+ return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName())
+ }
+ if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) {
+ return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges())
+ }
+ return nil
+}
+
+// GetField returns the value for the given field descriptor. It panics if an
+// error is encountered. See TryGetField.
+func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} {
+ if v, err := m.TryGetField(fd); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetField returns the value for the given field descriptor. An error is
+// returned if the given field descriptor does not belong to the right message
+// type.
+//
+// The Go type of the returned value, for scalar fields, is the same as protoc
+// would generate for the field (in a non-dynamic message). The table below
+// lists the scalar types and the corresponding Go types.
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
+//
+// Values for enum fields will always be int32 values. You can use the enum
+// descriptor associated with the field to lookup value names with those values.
+// Values for message type fields may be an instance of the generated type *or*
+// may be another *dynamic.Message that represents the type.
+//
+// If the given field is a map field, the returned type will be
+// map[interface{}]interface{}. The actual concrete types of keys and values is
+// as described above. If the given field is a (non-map) repeated field, the
+// returned type is always []interface{}; the type of the actual elements is as
+// described above.
+//
+// If this message has no value for the given field, its default value is
+// returned. If the message is defined in a file with "proto3" syntax, the
+// default is always the zero value for the field. The default value for map and
+// repeated fields is a nil map or slice (respectively). For field's whose types
+// is a message, the default value is an empty message for "proto2" syntax or a
+// nil message for "proto3" syntax. Note that the in the latter case, a non-nil
+// interface with a nil pointer is returned, not a nil interface. Also note that
+// whether the returned value is an empty message or nil depends on if *this*
+// message was defined as "proto3" syntax, not the message type referred to by
+// the field's type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be returned, or an error will
+// be returned if the unknown value cannot be parsed according to the field
+// descriptor's type information.
+func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) {
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getField(fd)
+}
+
+// GetFieldByName returns the value for the field with the given name. It panics
+// if an error is encountered. See TryGetFieldByName.
+func (m *Message) GetFieldByName(name string) interface{} {
+ if v, err := m.TryGetFieldByName(name); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetFieldByName returns the value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByName(name string) (interface{}, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getField(fd)
+}
+
+// GetFieldByNumber returns the value for the field with the given tag number.
+// It panics if an error is encountered. See TryGetFieldByNumber.
+func (m *Message) GetFieldByNumber(tagNumber int) interface{} {
+ if v, err := m.TryGetFieldByNumber(tagNumber); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetFieldByNumber returns the value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getField(fd)
+}
+
+func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) {
+ return m.doGetField(fd, false)
+}
+
+func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) {
+ res := m.values[fd.GetNumber()]
+ if res == nil {
+ var err error
+ if res, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ }
+ if res == nil {
+ if nilIfAbsent {
+ return nil, nil
+ } else {
+ def := fd.GetDefaultValue()
+ if def != nil {
+ return def, nil
+ }
+ // GetDefaultValue only returns nil for message types
+ md := fd.GetMessageType()
+ if md.IsProto3() {
+ // try to return a proper nil pointer
+ msgType := proto.MessageType(md.GetFullyQualifiedName())
+ if msgType != nil && msgType.Implements(typeOfProtoMessage) {
+ return reflect.Zero(msgType).Interface().(proto.Message), nil
+ }
+ // fallback to nil dynamic message pointer
+ return (*Message)(nil), nil
+ } else {
+ // for proto2, return default instance of message
+ return m.mf.NewMessage(md), nil
+ }
+ }
+ }
+ }
+ rt := reflect.TypeOf(res)
+ if rt.Kind() == reflect.Map {
+ // make defensive copies to prevent caller from storing illegal keys and values
+ m := res.(map[interface{}]interface{})
+ res := map[interface{}]interface{}{}
+ for k, v := range m {
+ res[k] = v
+ }
+ return res, nil
+ } else if rt.Kind() == reflect.Slice && rt != typeOfBytes {
+ // make defensive copies to prevent caller from storing illegal elements
+ sl := res.([]interface{})
+ res := make([]interface{}, len(sl))
+ copy(res, sl)
+ return res, nil
+ }
+ return res, nil
+}
+
+// HasField returns true if this message has a value for the given field. If the
+// given field is not valid (e.g. belongs to a different message type), false is
+// returned. If this message is defined in a file with "proto3" syntax, this
+// will return false even if a field was explicitly assigned its zero value (the
+// zero values for a field are intentionally indistinguishable from absent).
+func (m *Message) HasField(fd *desc.FieldDescriptor) bool {
+ if err := m.checkField(fd); err != nil {
+ return false
+ }
+ return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldName returns true if this message has a value for a field with the
+// given name. If the given name is unknown, this returns false.
+func (m *Message) HasFieldName(name string) bool {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return false
+ }
+ return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldNumber returns true if this message has a value for a field with the
+// given tag number. If the given tag is unknown, this returns false.
+func (m *Message) HasFieldNumber(tagNumber int) bool {
+ if _, ok := m.values[int32(tagNumber)]; ok {
+ return true
+ }
+ _, ok := m.unknownFields[int32(tagNumber)]
+ return ok
+}
+
+// SetField sets the value for the given field descriptor to the given value. It
+// panics if an error is encountered. See TrySetField.
+func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) {
+ if err := m.TrySetField(fd, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetField sets the value for the given field descriptor to the given value.
+// An error is returned if the given field descriptor does not belong to the
+// right message type or if the given value is not a correct/compatible type for
+// the given field.
+//
+// The Go type expected for a field is the same as TryGetField would return for
+// the field. So message values can be supplied as either the correct generated
+// message type or as a *dynamic.Message.
+//
+// Since it is cumbersome to work with dynamic messages, some concessions are
+// made to simplify usage regarding types:
+//
+// 1. If a numeric type is provided that can be converted *without loss or
+// overflow*, it is accepted. This allows for setting int64 fields using int
+// or int32 values. Similarly for uint64 with uint and uint32 values and for
+// float64 fields with float32 values.
+// 2. The value can be a named type, as long as its underlying type is correct.
+// 3. Map and repeated fields can be set using any kind of concrete map or
+// slice type, as long as the values within are all of the correct type. So
+// a field defined as a 'map<string, int32>` can be set using a
+// map[string]int32, a map[string]interface{}, or even a
+// map[interface{}]interface{}.
+// 4. Finally, dynamic code that chooses to not treat maps as a special-case
+// find that they can set map fields using a slice where each element is a
+// message that matches the implicit map-entry field message type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.setField(fd, val)
+}
+
+// SetFieldByName sets the value for the field with the given name to the given
+// value. It panics if an error is encountered. See TrySetFieldByName.
+func (m *Message) SetFieldByName(name string, val interface{}) {
+ if err := m.TrySetFieldByName(name, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetFieldByName sets the value for the field with the given name to the
+// given value. An error is returned if the given name is unknown or if the
+// given value has an incorrect type. If the given name refers to an extension
+// field, it should be fully qualified and optionally enclosed in parenthesis or
+// brackets.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByName(name string, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.setField(fd, val)
+}
+
+// SetFieldByNumber sets the value for the field with the given tag number to
+// the given value. It panics if an error is encountered. See
+// TrySetFieldByNumber.
+func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) {
+ if err := m.TrySetFieldByNumber(tagNumber, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetFieldByNumber sets the value for the field with the given tag number to
+// the given value. An error is returned if the given tag is unknown or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.setField(fd, val)
+}
+
+func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error {
+ var err error
+ if val, err = validFieldValue(fd, val); err != nil {
+ return err
+ }
+ m.internalSetField(fd, val)
+ return nil
+}
+
+func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) {
+ if fd.IsRepeated() {
+ // Unset fields and zero-length fields are indistinguishable, in both
+ // proto2 and proto3 syntax
+ if reflect.ValueOf(val).Len() == 0 {
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ return
+ }
+ } else if m.md.IsProto3() && fd.GetOneOf() == nil {
+ // proto3 considers fields that are set to their zero value as unset
+ // (we already handled repeated fields above)
+ var equal bool
+ if b, ok := val.([]byte); ok {
+ // can't compare slices, so we have to special-case []byte values
+ equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte))
+ } else {
+ defVal := fd.GetDefaultValue()
+ equal = defVal == val
+ if !equal && defVal == nil {
+ // above just checks if value is the nil interface,
+ // but we should also test if the given value is a
+ // nil pointer
+ rv := reflect.ValueOf(val)
+ if rv.Kind() == reflect.Ptr && rv.IsNil() {
+ equal = true
+ }
+ }
+ }
+ if equal {
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ return
+ }
+ }
+ if m.values == nil {
+ m.values = map[int32]interface{}{}
+ }
+ m.values[fd.GetNumber()] = val
+ // if this field is part of a one-of, make sure all other one-of choices are cleared
+ od := fd.GetOneOf()
+ if od != nil {
+ for _, other := range od.GetChoices() {
+ if other.GetNumber() != fd.GetNumber() {
+ delete(m.values, other.GetNumber())
+ }
+ }
+ }
+ // also clear any unknown fields
+ if m.unknownFields != nil {
+ delete(m.unknownFields, fd.GetNumber())
+ }
+ // and add this field if it was previously unknown
+ if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+ m.addField(fd)
+ }
+}
+
+func (m *Message) addField(fd *desc.FieldDescriptor) {
+ if m.extraFields == nil {
+ m.extraFields = map[int32]*desc.FieldDescriptor{}
+ }
+ m.extraFields[fd.GetNumber()] = fd
+}
+
+// ClearField removes any value for the given field. It panics if an error is
+// encountered. See TryClearField.
+func (m *Message) ClearField(fd *desc.FieldDescriptor) {
+ if err := m.TryClearField(fd); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearField removes any value for the given field. An error is returned if
+// the given field descriptor does not belong to the right message type.
+func (m *Message) TryClearField(fd *desc.FieldDescriptor) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ m.clearField(fd)
+ return nil
+}
+
+// ClearFieldByName removes any value for the field with the given name. It
+// panics if an error is encountered. See TryClearFieldByName.
+func (m *Message) ClearFieldByName(name string) {
+ if err := m.TryClearFieldByName(name); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearFieldByName removes any value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+func (m *Message) TryClearFieldByName(name string) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ m.clearField(fd)
+ return nil
+}
+
+// ClearFieldByNumber removes any value for the field with the given tag number.
+// It panics if an error is encountered. See TryClearFieldByNumber.
+func (m *Message) ClearFieldByNumber(tagNumber int) {
+ if err := m.TryClearFieldByNumber(tagNumber); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearFieldByNumber removes any value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+func (m *Message) TryClearFieldByNumber(tagNumber int) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ m.clearField(fd)
+ return nil
+}
+
+func (m *Message) clearField(fd *desc.FieldDescriptor) {
+ // clear value
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ // also clear any unknown fields
+ if m.unknownFields != nil {
+ delete(m.unknownFields, fd.GetNumber())
+ }
+ // and add this field if it was previously unknown
+ if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+ m.addField(fd)
+ }
+}
+
+// GetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. It panics if an error is encountered. See
+// TryGetOneOfField.
+func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) {
+ if fd, val, err := m.TryGetOneOfField(od); err != nil {
+ panic(err.Error())
+ } else {
+ return fd, val
+ }
+}
+
+// TryGetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. An error is returned if the given one-of belongs to the
+// wrong message type. If the given one-of has no field set, this method will
+// return nil, nil.
+//
+// The type of the value, if one is set, is the same as would be returned by
+// TryGetField using the returned field descriptor.
+//
+// Like with TryGetField, if the given one-of contains any fields that are not
+// known (e.g. not present in this message's descriptor), they will become known
+// and any unknown value will be parsed (and become a known value on success).
+func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) {
+ if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ for _, fd := range od.GetChoices() {
+ val, err := m.doGetField(fd, true)
+ if err != nil {
+ return nil, nil, err
+ }
+ if val != nil {
+ return fd, val, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// ClearOneOfField removes any value for any of the given one-of's fields. It
+// panics if an error is encountered. See TryClearOneOfField.
+func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) {
+ if err := m.TryClearOneOfField(od); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearOneOfField removes any value for any of the given one-of's fields. An
+// error is returned if the given one-of descriptor does not belong to the right
+// message type.
+func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error {
+ if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ for _, fd := range od.GetChoices() {
+ m.clearField(fd)
+ }
+ return nil
+}
+
+// GetMapField returns the value for the given map field descriptor and given
+// key. It panics if an error is encountered. See TryGetMapField.
+func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} {
+ if v, err := m.TryGetMapField(fd, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapField returns the value for the given map field descriptor and given
+// key. An error is returned if the given field descriptor does not belong to
+// the right message type or if it is not a map field.
+//
+// If the map field does not contain the requested key, this method returns
+// nil, nil. The Go type of the value returned mirrors the type that protoc
+// would generate for the field. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getMapField(fd, key)
+}
+
+// GetMapFieldByName returns the value for the map field with the given name and
+// given key. It panics if an error is encountered. See TryGetMapFieldByName.
+func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} {
+ if v, err := m.TryGetMapFieldByName(name, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapFieldByName returns the value for the map field with the given name
+// and given key. An error is returned if the given name is unknown or if it
+// names a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getMapField(fd, key)
+}
+
+// GetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. It panics if an error is encountered. See
+// TryGetMapFieldByNumber.
+func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} {
+ if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. An error is returned if the given tag is unknown or if
+// it indicates a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getMapField(fd, key)
+}
+
+func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+ if !fd.IsMap() {
+ return nil, FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key)
+ if err != nil {
+ return nil, err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ } else if mp == nil {
+ return nil, nil
+ }
+ }
+ return mp.(map[interface{}]interface{})[ki], nil
+}
+
+// ForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. It stops iteration if the function
+// returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntry.
+func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntry(fd, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. An error is returned if the given field
+// descriptor does not belong to the right message type or if it is not a map
+// field.
+//
+// Iteration ends either when all entries have been examined or when the given
+// function returns false. So the function is expected to return true for normal
+// iteration and false to break out. If this message has no value for the given
+// field, it returns without invoking the given function.
+//
+// The Go type of the key and value supplied to the function mirrors the type
+// that protoc would generate for the field. (See TryGetField for more details
+// on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByName executes the given function for each entry in the
+// map value for the field with the given name. It stops iteration if the
+// function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByName.
+func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntryByName executes the given function for each entry in
+// the map value for the field with the given name. It stops iteration if the
+// function returns false. An error is returned if the given name is unknown or
+// if it names a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByNumber.
+func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. An error is returned if the given tag is unknown
+// or if it indicates a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err := m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ return nil
+ }
+ }
+ for k, v := range mp.(map[interface{}]interface{}) {
+ if !fn(k, v) {
+ break
+ }
+ }
+ return nil
+}
+
+// PutMapField sets the value for the given map field descriptor and given key
+// to the given value. It panics if an error is encountered. See TryPutMapField.
+func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) {
+ if err := m.TryPutMapField(fd, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapField sets the value for the given map field descriptor and given
+// key to the given value. An error is returned if the given field descriptor
+// does not belong to the right message type, if the given field is not a map
+// field, or if the given value is not a correct/compatible type for the given
+// field.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a field with the same type as the map's value type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByName sets the value for the map field with the given name and
+// given key to the given value. It panics if an error is encountered. See
+// TryPutMapFieldByName.
+func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) {
+ if err := m.TryPutMapFieldByName(name, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapFieldByName sets the value for the map field with the given name and
+// the given key to the given value. An error is returned if the given name is
+// unknown, if it names a field that is not a map, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByNumber sets the value for the map field with the given tag
+// number and given key to the given value. It panics if an error is
+// encountered. See TryPutMapFieldByNumber.
+func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) {
+ if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapFieldByNumber sets the value for the map field with the given tag
+// number and the given key to the given value. An error is returned if the
+// given tag is unknown, if it indicates a field that is not a map, or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.putMapField(fd, key, val)
+}
+
+func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key)
+ if err != nil {
+ return err
+ }
+ vfd := fd.GetMessageType().GetFields()[1]
+ vi, err := validElementFieldValue(vfd, val)
+ if err != nil {
+ return err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ mp = map[interface{}]interface{}{}
+ m.internalSetField(fd, map[interface{}]interface{}{ki: vi})
+ return nil
+ }
+ }
+ mp.(map[interface{}]interface{})[ki] = vi
+ return nil
+}
+
+// RemoveMapField changes the value for the given field descriptor by removing
+// any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapField.
+func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) {
+ if err := m.TryRemoveMapField(fd, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapField changes the value for the given field descriptor by
+// removing any value associated with the given key. An error is returned if the
+// given field descriptor does not belong to the right message type or if the
+// given field is not a map field.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and any value for the given key
+// removed.
+func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByName changes the value for the field with the given name by
+// removing any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapFieldByName.
+func (m *Message) RemoveMapFieldByName(name string, key interface{}) {
+ if err := m.TryRemoveMapFieldByName(name, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapFieldByName changes the value for the field with the given name
+// by removing any value associated with the given key. An error is returned if
+// the given name is unknown or if it names a field that is not a map.
+func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. It panics if an
+// error is encountered. See TryRemoveMapFieldByNumber.
+func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) {
+ if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. An error is
+// returned if the given tag is unknown or if it indicates a field that is not
+// a map.
+func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.removeMapField(fd, key)
+}
+
+func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key)
+ if err != nil {
+ return err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ return nil
+ }
+ }
+ res := mp.(map[interface{}]interface{})
+ delete(res, ki)
+ if len(res) == 0 {
+ delete(m.values, fd.GetNumber())
+ }
+ return nil
+}
+
+// FieldLength returns the number of elements in this message for the given
+// field descriptor. It panics if an error is encountered. See TryFieldLength.
+func (m *Message) FieldLength(fd *desc.FieldDescriptor) int {
+ l, err := m.TryFieldLength(fd)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLength returns the number of elements in this message for the given
+// field descriptor. An error is returned if the given field descriptor does not
+// belong to the right message type or if it is neither a map field nor a
+// repeated field.
+func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) {
+ if err := m.checkField(fd); err != nil {
+ return 0, err
+ }
+ return m.fieldLength(fd)
+}
+
+// FieldLengthByName returns the number of elements in this message for the
+// field with the given name. It panics if an error is encountered. See
+// TryFieldLengthByName.
+func (m *Message) FieldLengthByName(name string) int {
+ l, err := m.TryFieldLengthByName(name)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLengthByName returns the number of elements in this message for the
+// field with the given name. An error is returned if the given name is unknown
+// or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByName(name string) (int, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return 0, UnknownFieldNameError
+ }
+ return m.fieldLength(fd)
+}
+
+// FieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. It panics if an error is encountered. See
+// TryFieldLengthByNumber.
+func (m *Message) FieldLengthByNumber(tagNumber int32) int {
+ l, err := m.TryFieldLengthByNumber(tagNumber)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. An error is returned if the given tag is
+// unknown or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return 0, UnknownTagNumberError
+ }
+ return m.fieldLength(fd)
+}
+
+func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) {
+ if !fd.IsRepeated() {
+ return 0, FieldIsNotRepeatedError
+ }
+ val := m.values[fd.GetNumber()]
+ if val == nil {
+ var err error
+ if val, err = m.parseUnknownField(fd); err != nil {
+ return 0, err
+ } else if val == nil {
+ return 0, nil
+ }
+ }
+ if sl, ok := val.([]interface{}); ok {
+ return len(sl), nil
+ } else if mp, ok := val.(map[interface{}]interface{}); ok {
+ return len(mp), nil
+ }
+ return 0, nil
+}
+
+// GetRepeatedField returns the value for the given repeated field descriptor at
+// the given index. It panics if an error is encountered. See
+// TryGetRepeatedField.
+func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} {
+ if v, err := m.TryGetRepeatedField(fd, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedField returns the value for the given repeated field descriptor
+// at the given index. An error is returned if the given field descriptor does
+// not belong to the right message type, if it is not a repeated field, or if
+// the given index is out of range (less than zero or greater than or equal to
+// the length of the repeated field). Also, even though map fields technically
+// are repeated fields, if the given field is a map field an error will result:
+// map representation does not lend itself to random access by index.
+//
+// The Go type of the value returned mirrors the type that protoc would generate
+// for the field's element type. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The value at the given index in the parsed value
+// will be returned. An error will be returned if the unknown value cannot be
+// parsed according to the field descriptor's type information.
+func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. It panics if an error is encountered. See
+// TryGetRepeatedFieldByName.
+func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} {
+ if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. An error is returned if the given name is
+// unknown, if it names a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater
+// than or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. It panics if an error is encountered.
+// See TryGetRepeatedFieldByNumber.
+func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} {
+ if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. An error is returned if the given tag is
+// unknown, if it indicates a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater than
+// or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+ if fd.IsMap() || !fd.IsRepeated() {
+ return nil, FieldIsNotRepeatedError
+ }
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ var err error
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ } else if sl == nil {
+ return nil, IndexOutOfRangeError
+ }
+ }
+ res := sl.([]interface{})
+ if index >= len(res) {
+ return nil, IndexOutOfRangeError
+ }
+ return res[index], nil
+}
+
+// AddRepeatedField appends the given value to the given repeated field. It
+// panics if an error is encountered. See TryAddRepeatedField.
+func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) {
+ if err := m.TryAddRepeatedField(fd, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedField appends the given value to the given repeated field. An
+// error is returned if the given field descriptor does not belong to the right
+// message type, if the given field is not repeated, or if the given value is
+// not a correct/compatible type for the given field. If the given field is a
+// map field, the call will succeed if the given value is an instance of the
+// map's entry message type.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the given value is appended to
+// it.
+func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByName appends the given value to the repeated field with the
+// given name. It panics if an error is encountered. See
+// TryAddRepeatedFieldByName.
+func (m *Message) AddRepeatedFieldByName(name string, val interface{}) {
+ if err := m.TryAddRepeatedFieldByName(name, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedFieldByName appends the given value to the repeated field with
+// the given name. An error is returned if the given name is unknown, if it
+// names a field that is not repeated, or if the given value has an incorrect
+// type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByNumber appends the given value to the repeated field with
+// the given tag number. It panics if an error is encountered. See
+// TryAddRepeatedFieldByNumber.
+func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) {
+ if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedFieldByNumber appends the given value to the repeated field
+// with the given tag number. An error is returned if the given tag is unknown,
+// if it indicates a field that is not repeated, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+ if !fd.IsRepeated() {
+ return FieldIsNotRepeatedError
+ }
+ val, err := validElementFieldValue(fd, val)
+ if err != nil {
+ return err
+ }
+
+ if fd.IsMap() {
+ // We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow
+ // adding entries one at a time (as if the field were a normal repeated field).
+ msg := val.(proto.Message)
+ dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf)
+ if err != nil {
+ return err
+ }
+ k, err := dm.TryGetFieldByNumber(1)
+ if err != nil {
+ return err
+ }
+ v, err := dm.TryGetFieldByNumber(2)
+ if err != nil {
+ return err
+ }
+ return m.putMapField(fd, k, v)
+ }
+
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if sl == nil {
+ sl = []interface{}{}
+ }
+ }
+ res := sl.([]interface{})
+ res = append(res, val)
+ m.internalSetField(fd, res)
+ return nil
+}
+
+// SetRepeatedField sets the value for the given repeated field descriptor and
+// given index to the given value. It panics if an error is encountered. See
+// SetRepeatedField.
+func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) {
+ if err := m.TrySetRepeatedField(fd, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedField sets the value for the given repeated field descriptor
+// and given index to the given value. An error is returned if the given field
+// descriptor does not belong to the right message type, if the given field is
+// not repeated, or if the given value is not a correct/compatible type for the
+// given field. Also, even though map fields technically are repeated fields, if
+// the given field is a map field an error will result: map representation does
+// not lend itself to random access by index.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the element at the given index
+// is replaced with the given value.
+func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByName sets the value for the repeated field with the given
+// name and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByName.
+func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) {
+ if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedFieldByName sets the value for the repeated field with the
+// given name and the given index to the given value. An error is returned if
+// the given name is unknown, if it names a field that is not repeated (or is a
+// map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByNumber sets the value for the repeated field with the given
+// tag number and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByNumber.
+func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) {
+ if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedFieldByNumber sets the value for the repeated field with the
+// given tag number and the given index to the given value. An error is returned
+// if the given tag is unknown, if it indicates a field that is not repeated (or
+// is a map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+ if fd.IsMap() || !fd.IsRepeated() {
+ return FieldIsNotRepeatedError
+ }
+ val, err := validElementFieldValue(fd, val)
+ if err != nil {
+ return err
+ }
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if sl == nil {
+ return IndexOutOfRangeError
+ }
+ }
+ res := sl.([]interface{})
+ if index >= len(res) {
+ return IndexOutOfRangeError
+ }
+ res[index] = val
+ return nil
+}
+
+// GetUnknownField gets the value(s) for the given unknown tag number. If this
+// message has no unknown fields with the given tag, nil is returned.
+func (m *Message) GetUnknownField(tagNumber int32) []UnknownField {
+ if u, ok := m.unknownFields[tagNumber]; ok {
+ return u
+ } else {
+ return nil
+ }
+}
+
+func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) {
+ unks, ok := m.unknownFields[fd.GetNumber()]
+ if !ok {
+ return nil, nil
+ }
+ var v interface{}
+ var sl []interface{}
+ var mp map[interface{}]interface{}
+ if fd.IsMap() {
+ mp = map[interface{}]interface{}{}
+ }
+ var err error
+ for _, unk := range unks {
+ var val interface{}
+ if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup {
+ val, err = unmarshalLengthDelimitedField(fd, unk.Contents, m.mf)
+ } else {
+ val, err = unmarshalSimpleField(fd, unk.Value)
+ }
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsMap() {
+ newEntry := val.(*Message)
+ kk, err := newEntry.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := newEntry.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ v = mp
+ } else if fd.IsRepeated() {
+ t := reflect.TypeOf(val)
+ if t.Kind() == reflect.Slice && t != typeOfBytes {
+ // append slices if we unmarshalled a packed repeated field
+ newVals := val.([]interface{})
+ sl = append(sl, newVals...)
+ } else {
+ sl = append(sl, val)
+ }
+ v = sl
+ } else {
+ v = val
+ }
+ }
+ m.internalSetField(fd, v)
+ return v, nil
+}
+
+func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+ return validFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ if fd.IsMap() && val.Kind() == reflect.Map {
+ return validFieldValueForMapField(fd, val)
+ }
+
+ if fd.IsRepeated() { // this will also catch map fields where given value was not a map
+ if val.Kind() != reflect.Array && val.Kind() != reflect.Slice {
+ if fd.IsMap() {
+ return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type())
+ } else {
+ return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type())
+ }
+ }
+
+ if fd.IsMap() {
+ // value should be a slice of entry messages that we need convert into a map[interface{}]interface{}
+ m := map[interface{}]interface{}{}
+ for i := 0; i < val.Len(); i++ {
+ e, err := validElementFieldValue(fd, val.Index(i).Interface())
+ if err != nil {
+ return nil, err
+ }
+ msg := e.(proto.Message)
+ dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil)
+ if err != nil {
+ return nil, err
+ }
+ k, err := dm.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ v, err := dm.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ m[k] = v
+ }
+ return m, nil
+ }
+
+ // make a defensive copy while checking contents (also converts to []interface{})
+ s := make([]interface{}, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ ev := val.Index(i)
+ if ev.Kind() == reflect.Interface {
+ // unwrap it
+ ev = reflect.ValueOf(ev.Interface())
+ }
+ e, err := validElementFieldValueForRv(fd, ev)
+ if err != nil {
+ return nil, err
+ }
+ s[i] = e
+ }
+
+ return s, nil
+ }
+
+ return validElementFieldValueForRv(fd, val)
+}
+
+func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) {
+ if dm, ok := m.(*Message); ok {
+ return dm, nil
+ }
+ dm := NewMessageWithMessageFactory(md, mf)
+ if err := dm.mergeFrom(m); err != nil {
+ return nil, err
+ }
+ return dm, nil
+}
+
+func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+ return validElementFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ t := fd.GetType()
+ if !val.IsValid() {
+ return nil, typeError(fd, nil)
+ }
+
+ switch t {
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM:
+ return toInt32(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return toInt64(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_UINT32:
+ return toUint32(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64:
+ return toUint64(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ return toFloat32(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return toFloat64(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return toBool(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return toBytes(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return toString(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_GROUP:
+ m, err := asMessage(val, fd.GetFullyQualifiedName())
+ // check that message is correct type
+ if err != nil {
+ return nil, err
+ }
+ var msgType string
+ if dm, ok := m.(*Message); ok {
+ msgType = dm.GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ msgType = proto.MessageName(m)
+ }
+ if msgType != fd.GetMessageType().GetFullyQualifiedName() {
+ return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType)
+ }
+ return m, nil
+
+ default:
+ return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType())
+ }
+}
+
+func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) {
+ if v.Kind() == reflect.Int32 {
+ return int32(v.Int()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) {
+ if v.Kind() == reflect.Uint32 {
+ return uint32(v.Uint()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) {
+ if v.Kind() == reflect.Float32 {
+ return float32(v.Float()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) {
+ if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 {
+ return v.Int(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) {
+ if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 {
+ return v.Uint(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) {
+ if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 {
+ return v.Float(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) {
+ if v.Kind() == reflect.Bool {
+ return v.Bool(), nil
+ }
+ return false, typeError(fd, v.Type())
+}
+
+func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) {
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+ return v.Bytes(), nil
+ }
+ return nil, typeError(fd, v.Type())
+}
+
+func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) {
+ if v.Kind() == reflect.String {
+ return v.String(), nil
+ }
+ return "", typeError(fd, v.Type())
+}
+
+func typeError(fd *desc.FieldDescriptor, t reflect.Type) error {
+ return fmt.Errorf(
+ "%s field %s is not compatible with value of type %v",
+ getTypeString(fd), fd.GetFullyQualifiedName(), t)
+}
+
+func getTypeString(fd *desc.FieldDescriptor) string {
+ return strings.ToLower(fd.GetType().String())
+}
+
+func asMessage(v reflect.Value, fieldName string) (proto.Message, error) {
+ t := v.Type()
+ // we need a pointer to a struct that implements proto.Message
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) {
+ return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type())
+ }
+ return v.Interface().(proto.Message), nil
+}
+
+// Reset resets this message to an empty message. It removes all values set in
+// the message.
+func (m *Message) Reset() {
+ for k := range m.values {
+ delete(m.values, k)
+ }
+ for k := range m.unknownFields {
+ delete(m.unknownFields, k)
+ }
+}
+
+// String returns this message rendered in compact text format.
+func (m *Message) String() string {
+ b, err := m.MarshalText()
+ if err != nil {
+ panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error()))
+ }
+ return string(b)
+}
+
+// ProtoMessage is present to satisfy the proto.Message interface.
+func (m *Message) ProtoMessage() {
+}
+
+// ConvertTo converts this dynamic message into the given message. This is
+// shorthand for resetting then merging:
+// target.Reset()
+// m.MergeInto(target)
+func (m *Message) ConvertTo(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ target.Reset()
+ return m.mergeInto(target)
+}
+
+// ConvertFrom converts the given message into this dynamic message. This is
+// shorthand for resetting then merging:
+// m.Reset()
+// m.MergeFrom(target)
+func (m *Message) ConvertFrom(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ m.Reset()
+ return m.mergeFrom(target)
+}
+
+// MergeInto merges this dynamic message into the given message. All field
+// values in this message will be set on the given message. For map fields,
+// entries are added to the given message (if the given message has existing
+// values for like keys, they are overwritten). For slice fields, elements are
+// added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in this message to be represented as unknown fields in the
+// given message after merging, and vice versa.
+func (m *Message) MergeInto(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+ return m.mergeInto(target)
+}
+
+// MergeFrom merges the given message into this dynamic message. All field
+// values in the given message will be set on this message. For map fields,
+// entries are added to this message (if this message has existing values for
+// like keys, they are overwritten). For slice fields, elements are added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in that message to be represented as unknown fields in this
+// message after merging, and vice versa.
+func (m *Message) MergeFrom(source proto.Message) error {
+ if err := m.checkType(source); err != nil {
+ return err
+ }
+ return m.mergeFrom(source)
+}
+
+// Merge implements the proto.Merger interface so that dynamic messages are
+// compatible with the proto.Merge function. It delegates to MergeFrom but will
+// panic on error as the proto.Merger interface doesn't allow for returning an
+// error.
+//
+// Unlike nearly all other methods, this method can work if this message's type
+// is not defined (such as instantiating the message without using NewMessage).
+// This is strictly so that dynamic message's are compatible with the
+// proto.Clone function, which instantiates a new message via reflection (thus
+// its message descriptor will not be set) and than calls Merge.
+func (m *Message) Merge(source proto.Message) {
+ if m.md == nil {
+ // To support proto.Clone, initialize the descriptor from the source.
+ if dm, ok := source.(*Message); ok {
+ m.md = dm.md
+ // also make sure the clone uses the same message factory and
+ // extensions and also knows about the same extra fields (if any)
+ m.mf = dm.mf
+ m.er = dm.er
+ m.extraFields = dm.extraFields
+ } else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil {
+ panic(err.Error())
+ } else {
+ m.md = md
+ }
+ }
+
+ if err := m.MergeFrom(source); err != nil {
+ panic(err.Error())
+ }
+}
+
+func (m *Message) checkType(target proto.Message) error {
+ if dm, ok := target.(*Message); ok {
+ if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ return nil
+ }
+
+ msgName := proto.MessageName(target)
+ if msgName != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName())
+ }
+ return nil
+}
+
+func (m *Message) mergeInto(pm proto.Message) error {
+ if dm, ok := pm.(*Message); ok {
+ return dm.mergeFrom(m)
+ }
+
+ target := reflect.ValueOf(pm)
+ if target.Kind() == reflect.Ptr {
+ target = target.Elem()
+ }
+
+ // track tags for which the dynamic message has data but the given
+ // message doesn't know about it
+ u := target.FieldByName("XXX_unrecognized")
+ var unknownTags map[int32]struct{}
+ if u.IsValid() && u.Type() == typeOfBytes {
+ unknownTags = map[int32]struct{}{}
+ for tag := range m.values {
+ unknownTags[tag] = struct{}{}
+ }
+ }
+
+ // check that we can successfully do the merge
+ structProps := proto.GetProperties(reflect.TypeOf(pm).Elem())
+ for _, prop := range structProps.Prop {
+ if prop.Tag == 0 {
+ continue // one-of or special field (such as XXX_unrecognized, etc.)
+ }
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ f := target.FieldByName(prop.Name)
+ ft := f.Type()
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+ // check one-of fields
+ for _, oop := range structProps.OneofTypes {
+ prop := oop.Prop
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ stf, ok := oop.Type.Elem().FieldByName(prop.Name)
+ if !ok {
+ return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem())
+ }
+ ft := stf.Type
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+ // and check extensions, too
+ for tag, ext := range proto.RegisteredExtensions(pm) {
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ ft := reflect.TypeOf(ext.ExtensionType)
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+
+ // now actually perform the merge
+ for _, prop := range structProps.Prop {
+ v, ok := m.values[int32(prop.Tag)]
+ if !ok {
+ continue
+ }
+ f := target.FieldByName(prop.Name)
+ if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+ return err
+ }
+ }
+ // merge one-ofs
+ for _, oop := range structProps.OneofTypes {
+ prop := oop.Prop
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ oov := reflect.New(oop.Type.Elem())
+ f := oov.Elem().FieldByName(prop.Name)
+ if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+ return err
+ }
+ target.Field(oop.Field).Set(oov)
+ }
+ // merge extensions, too
+ for tag, ext := range proto.RegisteredExtensions(pm) {
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem()
+ if err := mergeVal(reflect.ValueOf(v), e); err != nil {
+ return err
+ }
+ if err := proto.SetExtension(pm, ext, e.Interface()); err != nil {
+ // shouldn't happen since we already checked that the extension type was compatible above
+ return err
+ }
+ }
+
+ // if we have fields that the given message doesn't know about, add to its unknown fields
+ if len(unknownTags) > 0 {
+ ub := u.Interface().([]byte)
+ var b codedBuffer
+ for tag := range unknownTags {
+ fd := m.FindFieldDescriptor(tag)
+ if err := marshalField(tag, fd, m.values[tag], &b, false); err != nil {
+ return err
+ }
+ }
+ ub = append(ub, b.buf...)
+ u.Set(reflect.ValueOf(ub))
+ }
+
+ // finally, convey unknown fields into the given message by letting it unmarshal them
+ // (this will append to its unknown fields if not known; if somehow the given message recognizes
+ // a field even though the dynamic message did not, it will get correctly unmarshalled)
+ if unknownTags != nil && len(m.unknownFields) > 0 {
+ var b codedBuffer
+ m.marshalUnknownFields(&b)
+ proto.UnmarshalMerge(b.buf, pm)
+ }
+
+ return nil
+}
+
+func canConvert(src reflect.Value, target reflect.Type) bool {
+ if src.Kind() == reflect.Interface {
+ src = reflect.ValueOf(src.Interface())
+ }
+ srcType := src.Type()
+ // we allow convertible types instead of requiring exact types so that calling
+ // code can, for example, assign an enum constant to an enum field. In that case,
+ // one type is the enum type (a sub-type of int32) and the other may be the int32
+ // type. So we automatically do the conversion in that case.
+ if srcType.ConvertibleTo(target) {
+ return true
+ } else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) {
+ return true
+ } else if target.Kind() == reflect.Slice {
+ if srcType.Kind() != reflect.Slice {
+ return false
+ }
+ et := target.Elem()
+ for i := 0; i < src.Len(); i++ {
+ if !canConvert(src.Index(i), et) {
+ return false
+ }
+ }
+ return true
+ } else if target.Kind() == reflect.Map {
+ if srcType.Kind() != reflect.Map {
+ return false
+ }
+ return canConvertMap(src, target)
+ } else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) {
+ z := reflect.Zero(target).Interface()
+ msgType := proto.MessageName(z.(proto.Message))
+ return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ return false
+ }
+}
+
+func mergeVal(src, target reflect.Value) error {
+ if src.Kind() == reflect.Interface && !src.IsNil() {
+ src = src.Elem()
+ }
+ srcType := src.Type()
+ targetType := target.Type()
+ if srcType.ConvertibleTo(targetType) {
+ if targetType.Implements(typeOfProtoMessage) && !target.IsNil() {
+ Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message))
+ } else {
+ target.Set(src.Convert(targetType))
+ }
+ } else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) {
+ if !src.CanAddr() {
+ target.Set(reflect.New(targetType.Elem()))
+ target.Elem().Set(src.Convert(targetType.Elem()))
+ } else {
+ target.Set(src.Addr().Convert(targetType))
+ }
+ } else if targetType.Kind() == reflect.Slice {
+ l := target.Len()
+ newL := l + src.Len()
+ if target.Cap() < newL {
+ // expand capacity of the slice and copy
+ newSl := reflect.MakeSlice(targetType, newL, newL)
+ for i := 0; i < target.Len(); i++ {
+ newSl.Index(i).Set(target.Index(i))
+ }
+ target.Set(newSl)
+ } else {
+ target.SetLen(newL)
+ }
+ for i := 0; i < src.Len(); i++ {
+ dest := target.Index(l + i)
+ if dest.Kind() == reflect.Ptr {
+ dest.Set(reflect.New(dest.Type().Elem()))
+ }
+ if err := mergeVal(src.Index(i), dest); err != nil {
+ return err
+ }
+ }
+ } else if targetType.Kind() == reflect.Map {
+ return mergeMapVal(src, target, targetType)
+ } else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) {
+ dm := src.Interface().(*Message)
+ if target.IsNil() {
+ target.Set(reflect.New(targetType.Elem()))
+ }
+ m := target.Interface().(proto.Message)
+ if err := dm.mergeInto(m); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("cannot convert %v to %v", srcType, targetType)
+ }
+ return nil
+}
+
+func (m *Message) mergeFrom(pm proto.Message) error {
+ if dm, ok := pm.(*Message); ok {
+ // if given message is also a dynamic message, we merge differently
+ for tag, v := range dm.values {
+ fd := m.FindFieldDescriptor(tag)
+ if fd == nil {
+ fd = dm.FindFieldDescriptor(tag)
+ }
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ pmrv := reflect.ValueOf(pm)
+ if pmrv.IsNil() {
+ // nil is an empty message, so nothing to do
+ return nil
+ }
+
+ // check that we can successfully do the merge
+ src := pmrv.Elem()
+ values := map[*desc.FieldDescriptor]interface{}{}
+ props := proto.GetProperties(reflect.TypeOf(pm).Elem())
+ if props == nil {
+ return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem())
+ }
+
+ // regular fields
+ for _, prop := range props.Prop {
+ if prop.Tag == 0 {
+ continue // one-of or special field (such as XXX_unrecognized, etc.)
+ }
+ fd := m.FindFieldDescriptor(int32(prop.Tag))
+ if fd == nil {
+ // Our descriptor has different fields than this message object. So
+ // try to reflect on the message object's fields.
+ md, err := desc.LoadMessageDescriptorForMessage(pm)
+ if err != nil {
+ return err
+ }
+ fd = md.FindFieldByNumber(int32(prop.Tag))
+ if fd == nil {
+ return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name)
+ }
+ }
+ rv := src.FieldByName(prop.Name)
+ if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() {
+ continue
+ }
+ if v, err := validFieldValueForRv(fd, rv); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // one-of fields
+ for _, oop := range props.OneofTypes {
+ oov := src.Field(oop.Field).Elem()
+ if !oov.IsValid() || oov.Type() != oop.Type {
+ // this field is unset (in other words, one-of message field is not currently set to this option)
+ continue
+ }
+ prop := oop.Prop
+ rv := oov.Elem().FieldByName(prop.Name)
+ fd := m.FindFieldDescriptor(int32(prop.Tag))
+ if fd == nil {
+ // Our descriptor has different fields than this message object. So
+ // try to reflect on the message object's fields.
+ md, err := desc.LoadMessageDescriptorForMessage(pm)
+ if err != nil {
+ return err
+ }
+ fd = md.FindFieldByNumber(int32(prop.Tag))
+ if fd == nil {
+ return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name)
+ }
+ }
+ if v, err := validFieldValueForRv(fd, rv); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // extension fields
+ rexts, _ := proto.ExtensionDescs(pm)
+ var unknownExtensions []byte
+ for _, ed := range rexts {
+ v, _ := proto.GetExtension(pm, ed)
+ if v == nil {
+ continue
+ }
+ if ed.ExtensionType == nil {
+ extBytes, _ := v.([]byte)
+ if len(extBytes) > 0 {
+ unknownExtensions = append(unknownExtensions, extBytes...)
+ }
+ continue
+ }
+ fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field)
+ if fd == nil {
+ var err error
+ if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil {
+ return err
+ }
+ }
+ if v, err := validFieldValue(fd, v); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // now actually perform the merge
+ for fd, v := range values {
+ mergeField(m, fd, v)
+ }
+
+ u := src.FieldByName("XXX_unrecognized")
+ if u.IsValid() && u.Type() == typeOfBytes {
+ // ignore any error returned: pulling in unknown fields is best-effort
+ m.UnmarshalMerge(u.Interface().([]byte))
+ }
+
+ // lastly, also extract any unknown extensions the message may have (unknown extensions
+ // are stored with other extensions, not in the XXX_unrecognized field, so we have to do
+ // more than just the step above...)
+ if len(unknownExtensions) > 0 {
+ // pulling in unknown fields is best-effort, so we just ignore errors
+ m.UnmarshalMerge(unknownExtensions)
+ }
+ return nil
+}
+
+// Validate checks that all required fields are present. It returns an error if any are absent.
+func (m *Message) Validate() error {
+ missingFields := m.findMissingFields()
+ if len(missingFields) == 0 {
+ return nil
+ }
+ return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+}
+
+func (m *Message) findMissingFields() []string {
+ if m.md.IsProto3() {
+ // proto3 does not allow required fields
+ return nil
+ }
+ var missingFields []string
+ for _, fd := range m.md.GetFields() {
+ if fd.IsRequired() {
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ missingFields = append(missingFields, fd.GetName())
+ }
+ }
+ }
+ return missingFields
+}
+
+// ValidateRecursive checks that all required fields are present and also
+// recursively validates all fields who are also messages. It returns an error
+// if any required fields, in this message or nested within, are absent.
+func (m *Message) ValidateRecursive() error {
+ return m.validateRecursive("")
+}
+
+func (m *Message) validateRecursive(prefix string) error {
+ if missingFields := m.findMissingFields(); len(missingFields) > 0 {
+ for i := range missingFields {
+ missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i])
+ }
+ return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+ }
+
+ for tag, fld := range m.values {
+ fd := m.FindFieldDescriptor(tag)
+ var chprefix string
+ var md *desc.MessageDescriptor
+ checkMsg := func(pm proto.Message) error {
+ var dm *Message
+ if d, ok := pm.(*Message); ok {
+ dm = d
+ } else {
+ dm = m.mf.NewDynamicMessage(md)
+ if err := dm.ConvertFrom(pm); err != nil {
+ return nil
+ }
+ }
+ if err := dm.validateRecursive(chprefix); err != nil {
+ return err
+ }
+ return nil
+ }
+ isMap := fd.IsMap()
+ if isMap && fd.GetMapValueType().GetMessageType() != nil {
+ md = fd.GetMapValueType().GetMessageType()
+ mp := fld.(map[interface{}]interface{})
+ for k, v := range mp {
+ chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k)
+ if err := checkMsg(v.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ } else if !isMap && fd.GetMessageType() != nil {
+ md = fd.GetMessageType()
+ if fd.IsRepeated() {
+ sl := fld.([]interface{})
+ for i, v := range sl {
+ chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i)
+ if err := checkMsg(v.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ } else {
+ chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd))
+ if err := checkMsg(fld.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func getName(fd *desc.FieldDescriptor) string {
+ if fd.IsExtension() {
+ return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName())
+ } else {
+ return fd.GetName()
+ }
+}
+
+// knownFieldTags return tags of present and recognized fields, in sorted order.
+func (m *Message) knownFieldTags() []int {
+ if len(m.values) == 0 {
+ return []int(nil)
+ }
+
+ keys := make([]int, len(m.values))
+ i := 0
+ for k := range m.values {
+ keys[i] = int(k)
+ i++
+ }
+
+ sort.Ints(keys)
+ return keys
+}
+
+// allKnownFieldTags return tags of present and recognized fields, including
+// those that are unset, in sorted order. This only includes extensions that are
+// present. Known but not-present extensions are not included in the returned
+// set of tags.
+func (m *Message) allKnownFieldTags() []int {
+ fds := m.md.GetFields()
+ keys := make([]int, 0, len(fds)+len(m.extraFields))
+
+ for k := range m.values {
+ keys = append(keys, int(k))
+ }
+
+ // also include known fields that are not present
+ for _, fd := range fds {
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ keys = append(keys, int(fd.GetNumber()))
+ }
+ }
+ for _, fd := range m.extraFields {
+ if !fd.IsExtension() { // skip extensions that are not present
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ keys = append(keys, int(fd.GetNumber()))
+ }
+ }
+ }
+
+ sort.Ints(keys)
+ return keys
+}
+
+// unknownFieldTags return tags of present but unrecognized fields, in sorted order.
+func (m *Message) unknownFieldTags() []int {
+ if len(m.unknownFields) == 0 {
+ return []int(nil)
+ }
+ keys := make([]int, len(m.unknownFields))
+ i := 0
+ for k := range m.unknownFields {
+ keys[i] = int(k)
+ i++
+ }
+ sort.Ints(keys)
+ return keys
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
new file mode 100644
index 0000000..5fbcc24
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
@@ -0,0 +1,152 @@
+package dynamic
+
+import (
+ "bytes"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
+// have the same message type and same fields set to equal values. For proto3 messages, fields set
+// to their zero value are considered unset.
+func Equal(a, b *Message) bool {
+ if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
+ return false
+ }
+ if len(a.values) != len(b.values) {
+ return false
+ }
+ if len(a.unknownFields) != len(b.unknownFields) {
+ return false
+ }
+ for tag, aval := range a.values {
+ bval, ok := b.values[tag]
+ if !ok {
+ return false
+ }
+ if !fieldsEqual(aval, bval) {
+ return false
+ }
+ }
+ for tag, au := range a.unknownFields {
+ bu, ok := b.unknownFields[tag]
+ if !ok {
+ return false
+ }
+ if len(au) != len(bu) {
+ return false
+ }
+ for i, aval := range au {
+ bval := bu[i]
+ if aval.Encoding != bval.Encoding {
+ return false
+ }
+ if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
+ if !bytes.Equal(aval.Contents, bval.Contents) {
+ return false
+ }
+ } else if aval.Value != bval.Value {
+ return false
+ }
+ }
+ }
+ // all checks pass!
+ return true
+}
+
+func fieldsEqual(aval, bval interface{}) bool {
+ arv := reflect.ValueOf(aval)
+ brv := reflect.ValueOf(bval)
+ if arv.Type() != brv.Type() {
+ // it is possible that one is a dynamic message and one is not
+ apm, ok := aval.(proto.Message)
+ if !ok {
+ return false
+ }
+ bpm, ok := bval.(proto.Message)
+ if !ok {
+ return false
+ }
+ return MessagesEqual(apm, bpm)
+
+ } else {
+ switch arv.Kind() {
+ case reflect.Ptr:
+ apm, ok := aval.(proto.Message)
+ if !ok {
+ // Don't know how to compare pointer values that aren't messages!
+ // Maybe this should panic?
+ return false
+ }
+ bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
+ return MessagesEqual(apm, bpm)
+
+ case reflect.Map:
+ return mapsEqual(arv, brv)
+
+ case reflect.Slice:
+ if arv.Type() == typeOfBytes {
+ return bytes.Equal(aval.([]byte), bval.([]byte))
+ } else {
+ return slicesEqual(arv, brv)
+ }
+
+ default:
+ return aval == bval
+ }
+ }
+}
+
+func slicesEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ for i := 0; i < a.Len(); i++ {
+ ai := a.Index(i)
+ bi := b.Index(i)
+ if !fieldsEqual(ai.Interface(), bi.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
+// when one or both of the messages might be a dynamic message.
+func MessagesEqual(a, b proto.Message) bool {
+ da, aok := a.(*Message)
+ db, bok := b.(*Message)
+ // Both dynamic messages
+ if aok && bok {
+ return Equal(da, db)
+ }
+ // Neither dynamic messages
+ if !aok && !bok {
+ return proto.Equal(a, b)
+ }
+ // Mixed
+ if aok {
+ md, err := desc.LoadMessageDescriptorForMessage(b)
+ if err != nil {
+ return false
+ }
+ db = NewMessageWithMessageFactory(md, da.mf)
+ if db.ConvertFrom(b) != nil {
+ return false
+ }
+ return Equal(da, db)
+ } else {
+ md, err := desc.LoadMessageDescriptorForMessage(a)
+ if err != nil {
+ return false
+ }
+ da = NewMessageWithMessageFactory(md, db.mf)
+ if da.ConvertFrom(a) != nil {
+ return false
+ }
+ return Equal(da, db)
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
new file mode 100644
index 0000000..a0ff6af
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
@@ -0,0 +1,44 @@
+package dynamic
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// SetExtension sets the given extension value. If the given message is not a
+// dynamic message, the given extension may not be recognized (or may differ
+// from the compiled and linked in version of the extension. So in that case,
+// this function will serialize the given value to bytes and then use
+// proto.SetRawExtension to set the value.
+func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error {
+ if !extd.IsExtension() {
+ return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName())
+ }
+
+ if dm, ok := msg.(*Message); ok {
+ return dm.TrySetField(extd, val)
+ }
+
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return err
+ }
+ if err := checkField(extd, md); err != nil {
+ return err
+ }
+
+ val, err = validFieldValue(extd, val)
+ if err != nil {
+ return err
+ }
+
+ var b codedBuffer
+ if err := marshalField(extd.GetNumber(), extd, val, &b, defaultDeterminism); err != nil {
+ return err
+ }
+ proto.SetRawExtension(msg, extd.GetNumber(), b.buf)
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
new file mode 100644
index 0000000..6876827
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
@@ -0,0 +1,241 @@
+package dynamic
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ExtensionRegistry is a registry of known extension fields. This is used to parse
+// extension fields encountered when de-serializing a dynamic message.
+type ExtensionRegistry struct {
+ includeDefault bool
+ mu sync.RWMutex
+ exts map[string]map[int32]*desc.FieldDescriptor
+}
+
+// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions,
+// which are those that are statically linked into the current program (e.g. registered by
+// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the
+// registry will override any default extensions that are for the same extendee and have the
+// same tag number and/or name.
+func NewExtensionRegistryWithDefaults() *ExtensionRegistry {
+ return &ExtensionRegistry{includeDefault: true}
+}
+
+// AddExtensionDesc adds the given extensions to the registry.
+func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error {
+ flds := make([]*desc.FieldDescriptor, len(exts))
+ for i, ext := range exts {
+ fd, err := desc.LoadFieldDescriptorForExtension(ext)
+ if err != nil {
+ return err
+ }
+ flds[i] = fd
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, fd := range flds {
+ r.putExtensionLocked(fd)
+ }
+ return nil
+}
+
+// AddExtension adds the given extensions to the registry. The given extensions
+// will overwrite any previously added extensions that are for the same extendee
+// message and same extension tag number.
+func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error {
+ for _, ext := range exts {
+ if !ext.IsExtension() {
+ return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName())
+ }
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, ext := range exts {
+ r.putExtensionLocked(ext)
+ }
+ return nil
+}
+
+// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor.
+func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.addExtensionsFromFileLocked(fd, false, nil)
+}
+
+// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file
+// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds
+// extensions from the entire transitive closure for the given file.
+func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ already := map[*desc.FileDescriptor]struct{}{}
+ r.addExtensionsFromFileLocked(fd, true, already)
+}
+
+func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) {
+ if _, ok := alreadySeen[fd]; ok {
+ return
+ }
+
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, ext := range fd.GetExtensions() {
+ r.putExtensionLocked(ext)
+ }
+ for _, msg := range fd.GetMessageTypes() {
+ r.addExtensionsFromMessageLocked(msg)
+ }
+
+ if recursive {
+ alreadySeen[fd] = struct{}{}
+ for _, dep := range fd.GetDependencies() {
+ r.addExtensionsFromFileLocked(dep, recursive, alreadySeen)
+ }
+ }
+}
+
+func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) {
+ for _, ext := range md.GetNestedExtensions() {
+ r.putExtensionLocked(ext)
+ }
+ for _, msg := range md.GetNestedMessageTypes() {
+ r.addExtensionsFromMessageLocked(msg)
+ }
+}
+
+func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) {
+ msgName := fd.GetOwner().GetFullyQualifiedName()
+ m := r.exts[msgName]
+ if m == nil {
+ m = map[int32]*desc.FieldDescriptor{}
+ r.exts[msgName] = m
+ }
+ m[fd.GetNumber()] = fd
+}
+
+// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and tag number. If no extension is known, nil is returned.
+func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ fd := r.exts[messageName][tagNumber]
+ if fd == nil && r.includeDefault {
+ ext := getDefaultExtensions(messageName)[tagNumber]
+ if ext != nil {
+ fd, _ = desc.LoadFieldDescriptorForExtension(ext)
+ }
+ }
+ return fd
+}
+
+// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil
+// is returned.
+func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for _, fd := range r.exts[messageName] {
+ if fd.GetFullyQualifiedName() == fieldName {
+ return fd
+ }
+ }
+ if r.includeDefault {
+ for _, ext := range getDefaultExtensions(messageName) {
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd.GetFullyQualifiedName() == fieldName {
+ return fd
+ }
+ }
+ }
+ return nil
+}
+
+// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned.
+// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last
+// component uses the field's JSON name (if present).
+func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for _, fd := range r.exts[messageName] {
+ if fd.GetFullyQualifiedJSONName() == fieldName {
+ return fd
+ }
+ }
+ if r.includeDefault {
+ for _, ext := range getDefaultExtensions(messageName) {
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd.GetFullyQualifiedJSONName() == fieldName {
+ return fd
+ }
+ }
+ }
+ return nil
+}
+
+func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc {
+ t := proto.MessageType(messageName)
+ if t != nil {
+ msg := reflect.Zero(t).Interface().(proto.Message)
+ return proto.RegisteredExtensions(msg)
+ }
+ return nil
+}
+
+// AllExtensionsForType returns all known extension fields for the given extendee name (must be a
+// fully-qualified message name).
+func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor {
+ if r == nil {
+ return []*desc.FieldDescriptor(nil)
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ flds := r.exts[messageName]
+ var ret []*desc.FieldDescriptor
+ if r.includeDefault {
+ exts := getDefaultExtensions(messageName)
+ if len(exts) > 0 || len(flds) > 0 {
+ ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds))
+ }
+ for tag, ext := range exts {
+ if _, ok := flds[tag]; ok {
+ // skip default extension and use the one explicitly registered instead
+ continue
+ }
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd != nil {
+ ret = append(ret, fd)
+ }
+ }
+ } else if len(flds) > 0 {
+ ret = make([]*desc.FieldDescriptor, 0, len(flds))
+ }
+
+ for _, ext := range flds {
+ ret = append(ret, ext)
+ }
+ return ret
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
new file mode 100644
index 0000000..1eaedfa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -0,0 +1,303 @@
+// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC
+// method where only method descriptors are known. The actual request and response
+// messages may be dynamic messages.
+package grpcdynamic
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server.
+type Stub struct {
+ channel Channel
+ mf *dynamic.MessageFactory
+}
+
+// Channel represents the operations necessary to issue RPCs via gRPC. The
+// *grpc.ClientConn type provides this interface and will typically the concrete
+// type used to construct Stubs. But the use of this interface allows
+// construction of stubs that use alternate concrete types as the transport for
+// RPC operations.
+type Channel interface {
+ Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
+ NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
+}
+
+var _ Channel = (*grpc.ClientConn)(nil)
+
+// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
+func NewStub(channel Channel) Stub {
+ return NewStubWithMessageFactory(channel, nil)
+}
+
+// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for
+// dispatching RPCs and the given MessageFactory for creating response messages.
+func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub {
+ return Stub{channel: channel, mf: mf}
+}
+
+func requestMethod(md *desc.MethodDescriptor) string {
+ return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName())
+}
+
+// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods.
+func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) {
+ if method.IsClientStreaming() || method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ if err := checkMessageType(method.GetInputType(), request); err != nil {
+ return nil, err
+ }
+ resp := s.mf.NewMessage(method.GetOutputType())
+ if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods.
+func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) {
+ if method.IsClientStreaming() || !method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ if err := checkMessageType(method.GetInputType(), request); err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ err = cs.SendMsg(request)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ err = cs.CloseSend()
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
+ }
+}
+
+// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end,
+// receive the response message. Use this for client-streaming methods.
+func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) {
+ if !method.IsClientStreaming() || method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ return &ClientStream{cs, method, s.mf, cancel}, nil
+ }
+}
+
+// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response
+// messages. Use this for bidi-streaming methods.
+func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) {
+ if !method.IsClientStreaming() || !method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil
+ }
+}
+
+func methodType(md *desc.MethodDescriptor) string {
+ if md.IsClientStreaming() && md.IsServerStreaming() {
+ return "bidi-streaming"
+ } else if md.IsClientStreaming() {
+ return "client-streaming"
+ } else if md.IsServerStreaming() {
+ return "server-streaming"
+ } else {
+ return "unary"
+ }
+}
+
+func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error {
+ var typeName string
+ if dm, ok := msg.(*dynamic.Message); ok {
+ typeName = dm.GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ typeName = proto.MessageName(msg)
+ }
+ if typeName != md.GetFullyQualifiedName() {
+ return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName)
+ }
+ return nil
+}
+
+// ServerStream represents a response stream from a server. Messages in the stream can be queried
+// as can header and trailer metadata sent by the server.
+type ServerStream struct {
+ stream grpc.ClientStream
+ respType *desc.MessageDescriptor
+ mf *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ServerStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ServerStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ServerStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *ServerStream) RecvMsg() (proto.Message, error) {
+ resp := s.mf.NewMessage(s.respType)
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
+
+// ClientStream represents a response stream from a client. Messages in the stream can be sent
+// and, when done, the unary server message and header and trailer metadata can be queried.
+type ClientStream struct {
+ stream grpc.ClientStream
+ method *desc.MethodDescriptor
+ mf *dynamic.MessageFactory
+ cancel context.CancelFunc
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ClientStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ClientStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ClientStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *ClientStream) SendMsg(m proto.Message) error {
+ if err := checkMessageType(s.method.GetInputType(), m); err != nil {
+ return err
+ }
+ return s.stream.SendMsg(m)
+}
+
+// CloseAndReceive closes the outgoing request stream and then blocks for the server's response.
+func (s *ClientStream) CloseAndReceive() (proto.Message, error) {
+ if err := s.stream.CloseSend(); err != nil {
+ return nil, err
+ }
+ resp := s.mf.NewMessage(s.method.GetOutputType())
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ }
+ // make sure we get EOF for a second message
+ if err := s.stream.RecvMsg(resp); err != io.EOF {
+ if err == nil {
+ s.cancel()
+ return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName())
+ } else {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+// BidiStream represents a bi-directional stream for sending messages to and receiving
+// messages from a server. The header and trailer metadata sent by the server can also be
+// queried.
+type BidiStream struct {
+ stream grpc.ClientStream
+ reqType *desc.MessageDescriptor
+ respType *desc.MessageDescriptor
+ mf *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *BidiStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *BidiStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *BidiStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *BidiStream) SendMsg(m proto.Message) error {
+ if err := checkMessageType(s.reqType, m); err != nil {
+ return err
+ }
+ return s.stream.SendMsg(m)
+}
+
+// CloseSend indicates the request stream has ended. Invoke this after all request messages
+// are sent (even if there are zero such messages).
+func (s *BidiStream) CloseSend() error {
+ return s.stream.CloseSend()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *BidiStream) RecvMsg() (proto.Message, error) {
+ resp := s.mf.NewMessage(s.respType)
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
new file mode 100644
index 0000000..bd7fcaa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
@@ -0,0 +1,76 @@
+package dynamic
+
+import "bytes"
+
+type indentBuffer struct {
+ bytes.Buffer
+ indent string
+ indentCount int
+ comma bool
+}
+
+func (b *indentBuffer) start() error {
+ if b.indentCount >= 0 {
+ b.indentCount++
+ return b.newLine(false)
+ }
+ return nil
+}
+
+func (b *indentBuffer) sep() error {
+ if b.indentCount >= 0 {
+ _, err := b.WriteString(": ")
+ return err
+ } else {
+ return b.WriteByte(':')
+ }
+}
+
+func (b *indentBuffer) end() error {
+ if b.indentCount >= 0 {
+ b.indentCount--
+ return b.newLine(false)
+ }
+ return nil
+}
+
+func (b *indentBuffer) maybeNext(first *bool) error {
+ if *first {
+ *first = false
+ return nil
+ } else {
+ return b.next()
+ }
+}
+
+func (b *indentBuffer) next() error {
+ if b.indentCount >= 0 {
+ return b.newLine(b.comma)
+ } else if b.comma {
+ return b.WriteByte(',')
+ } else {
+ return b.WriteByte(' ')
+ }
+}
+
+func (b *indentBuffer) newLine(comma bool) error {
+ if comma {
+ err := b.WriteByte(',')
+ if err != nil {
+ return err
+ }
+ }
+
+ err := b.WriteByte('\n')
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < b.indentCount; i++ {
+ _, err := b.WriteString(b.indent)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
new file mode 100644
index 0000000..f79b4ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -0,0 +1,1201 @@
+package dynamic
+
+// JSON marshalling and unmarshalling for dynamic messages
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ // link in the well-known-types that have a special JSON format
+ _ "github.com/golang/protobuf/ptypes/any"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ _ "github.com/golang/protobuf/ptypes/empty"
+ _ "github.com/golang/protobuf/ptypes/struct"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
+ _ "github.com/golang/protobuf/ptypes/wrappers"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+var wellKnownTypeNames = map[string]struct{}{
+ "google.protobuf.Any": {},
+ "google.protobuf.Empty": {},
+ "google.protobuf.Duration": {},
+ "google.protobuf.Timestamp": {},
+ // struct.proto
+ "google.protobuf.Struct": {},
+ "google.protobuf.Value": {},
+ "google.protobuf.ListValue": {},
+ // wrappers.proto
+ "google.protobuf.DoubleValue": {},
+ "google.protobuf.FloatValue": {},
+ "google.protobuf.Int64Value": {},
+ "google.protobuf.UInt64Value": {},
+ "google.protobuf.Int32Value": {},
+ "google.protobuf.UInt32Value": {},
+ "google.protobuf.BoolValue": {},
+ "google.protobuf.StringValue": {},
+ "google.protobuf.BytesValue": {},
+}
+
+// MarshalJSON serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a compact form: no newlines, and spaces between fields and
+// between field identifiers and values are elided.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+// m.MarshalJSONPB(&jsonpb.Marshaler{})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSON() ([]byte, error) {
+ return m.MarshalJSONPB(&jsonpb.Marshaler{})
+}
+
+// MarshalJSONIndent serializes this message to bytes in JSON format, returning
+// an error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values. Indentation of two spaces is
+// used.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSONIndent() ([]byte, error) {
+ return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+}
+
+// MarshalJSONPB serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string. The given marshaler is used to convey options used during marshaling.
+//
+// If this message contains nested messages that are generated message types (as
+// opposed to dynamic messages), the given marshaler is used to marshal it.
+//
+// When marshaling any nested messages, any jsonpb.AnyResolver configured in the
+// given marshaler is augmented with knowledge of message types known to this
+// message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) {
+ var b indentBuffer
+ b.indent = opts.Indent
+ if len(opts.Indent) == 0 {
+ b.indentCount = -1
+ }
+ b.comma = true
+ if err := m.marshalJSON(&b, opts); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error {
+ if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+ newOpts := *opts
+ newOpts.AnyResolver = r
+ opts = &newOpts
+ }
+
+ if ok, err := marshalWellKnownType(m, b, opts); ok {
+ return err
+ }
+
+ err := b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ var tags []int
+ if opts.EmitDefaults {
+ tags = m.allKnownFieldTags()
+ } else {
+ tags = m.knownFieldTags()
+ }
+
+ first := true
+
+ for _, tag := range tags {
+ itag := int32(tag)
+ fd := m.FindFieldDescriptor(itag)
+
+ v, ok := m.values[itag]
+ if !ok {
+ if fd.GetOneOf() != nil {
+ // don't print defaults for fields in a oneof
+ continue
+ }
+ v = fd.GetDefaultValue()
+ }
+
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldJSON(b, fd, v, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) {
+ fqn := m.md.GetFullyQualifiedName()
+ if _, ok := wellKnownTypeNames[fqn]; !ok {
+ return false, nil
+ }
+
+ msgType := proto.MessageType(fqn)
+ if msgType == nil {
+ // wtf?
+ panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+ }
+
+ // convert dynamic message to well-known type and let jsonpb marshal it
+ msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+ if err := m.MergeInto(msg); err != nil {
+ return true, err
+ }
+ return true, opts.Marshal(b, msg)
+}
+
+func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+ var jsonName string
+ if opts.OrigName {
+ jsonName = fd.GetName()
+ } else {
+ jsonName = fd.AsFieldDescriptorProto().GetJsonName()
+ if jsonName == "" {
+ jsonName = fd.GetName()
+ }
+ }
+ if fd.IsExtension() {
+ var scope string
+ switch parent := fd.GetParent().(type) {
+ case *desc.FileDescriptor:
+ scope = parent.GetPackage()
+ default:
+ scope = parent.GetFullyQualifiedName()
+ }
+ if scope == "" {
+ jsonName = fmt.Sprintf("[%s]", jsonName)
+ } else {
+ jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName)
+ }
+ }
+ err := writeJsonString(b, jsonName)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+
+ if isNil(v) {
+ _, err := b.WriteString("null")
+ return err
+ }
+
+ if fd.IsMap() {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ md := fd.GetMessageType()
+ vfd := md.FindFieldByNumber(2)
+
+ mp := v.(map[interface{}]interface{})
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ first := true
+ for _, mk := range keys {
+ mv := mp[mk]
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+
+ err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte('}')
+
+ } else if fd.IsRepeated() {
+ err = b.WriteByte('[')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ sl := v.([]interface{})
+ first := true
+ for _, slv := range sl {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldValueJSON(b, fd, slv, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte(']')
+
+ } else {
+ return marshalKnownFieldValueJSON(b, fd, v, opts)
+ }
+}
+
+func isNil(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ return rv.Kind() == reflect.Ptr && rv.IsNil()
+}
+
+func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error {
+ rk := reflect.ValueOf(mk)
+ var strkey string
+ switch rk.Kind() {
+ case reflect.Bool:
+ strkey = strconv.FormatBool(rk.Bool())
+ case reflect.Int32, reflect.Int64:
+ strkey = strconv.FormatInt(rk.Int(), 10)
+ case reflect.Uint32, reflect.Uint64:
+ strkey = strconv.FormatUint(rk.Uint(), 10)
+ case reflect.String:
+ strkey = rk.String()
+ default:
+ return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type())
+ }
+ err := writeString(b, strkey)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ return marshalKnownFieldValueJSON(b, vfd, mv, opts)
+}
+
+func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int32, reflect.Int64:
+ ed := fd.GetEnumType()
+ if !opts.EnumsAsInts && ed != nil {
+ n := int32(rv.Int())
+ vd := ed.FindValueByNumber(n)
+ if vd == nil {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ } else {
+ return writeJsonString(b, vd.GetName())
+ }
+ } else {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ }
+ case reflect.Uint32, reflect.Uint64:
+ _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+ return err
+ case reflect.Float32, reflect.Float64:
+ f := rv.Float()
+ var str string
+ if math.IsNaN(f) {
+ str = `"NaN"`
+ } else if math.IsInf(f, 1) {
+ str = `"Infinity"`
+ } else if math.IsInf(f, -1) {
+ str = `"-Infinity"`
+ } else {
+ var bits int
+ if rv.Kind() == reflect.Float32 {
+ bits = 32
+ } else {
+ bits = 64
+ }
+ str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+ }
+ _, err := b.WriteString(str)
+ return err
+ case reflect.Bool:
+ _, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+ return err
+ case reflect.Slice:
+ bstr := base64.StdEncoding.EncodeToString(rv.Bytes())
+ return writeJsonString(b, bstr)
+ case reflect.String:
+ return writeJsonString(b, rv.String())
+ default:
+ // must be a message
+ if dm, ok := v.(*Message); ok {
+ return dm.marshalJSON(b, opts)
+ } else {
+ var err error
+ if b.indentCount <= 0 || len(b.indent) == 0 {
+ err = opts.Marshal(b, v.(proto.Message))
+ } else {
+ str, err := opts.MarshalToString(v.(proto.Message))
+ if err != nil {
+ return err
+ }
+ indent := strings.Repeat(b.indent, b.indentCount)
+ pos := 0
+ // add indention prefix to each line
+ for pos < len(str) {
+ start := pos
+ nextPos := strings.Index(str[pos:], "\n")
+ if nextPos == -1 {
+ nextPos = len(str)
+ } else {
+ nextPos = pos + nextPos + 1 // include newline
+ }
+ line := str[start:nextPos]
+ if pos > 0 {
+ _, err = b.WriteString(indent)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = b.WriteString(line)
+ if err != nil {
+ return err
+ }
+ pos = nextPos
+ }
+ }
+ return err
+ }
+ }
+}
+
+func writeJsonString(b *indentBuffer, s string) error {
+ if sbytes, err := json.Marshal(s); err != nil {
+ return err
+ } else {
+ _, err := b.Write(sbytes)
+ return err
+ }
+}
+
+// UnmarshalJSON de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
+// value) unmarshaler:
+//
+// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//
+// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
+// will be used when parsing google.protobuf.Any messages.
+func (m *Message) UnmarshalJSON(js []byte) error {
+ return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalMergeJSON de-serializes the message that is present, in JSON format,
+// in the given bytes into this message. Unlike UnmarshalJSON, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSON(js []byte) error {
+ return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. The given unmarshaler conveys options used
+// when parsing the JSON. This function first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// The decoding is lenient:
+// 1. The JSON can refer to fields either by their JSON name or by their
+// declared name.
+// 2. The JSON can use either numeric values or string names for enum values.
+//
+// When instantiating nested messages, if this message's associated factory
+// returns a generated message type (as opposed to a dynamic message), the given
+// unmarshaler is used to unmarshal it.
+//
+// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in
+// the given unmarshaler is augmented with knowledge of message types known to
+// this message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMergeJSONPB(opts, js); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON
+// format, in the given bytes into this message. The given unmarshaler conveys
+// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+ r := newJsReader(js)
+ err := m.unmarshalJson(r, opts)
+ if err != nil {
+ return err
+ }
+ if t, err := r.poll(); err != io.EOF {
+ b, _ := ioutil.ReadAll(r.unread())
+ s := fmt.Sprintf("%v%s", t, string(b))
+ return fmt.Errorf("superfluous data found after JSON object: %q", s)
+ }
+ return nil
+}
+
+func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) {
+ fqn := m.md.GetFullyQualifiedName()
+ if _, ok := wellKnownTypeNames[fqn]; !ok {
+ return false, nil
+ }
+
+ msgType := proto.MessageType(fqn)
+ if msgType == nil {
+ // wtf?
+ panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+ }
+
+ // extract json value from r
+ var js json.RawMessage
+ if err := json.NewDecoder(r.unread()).Decode(&js); err != nil {
+ return true, err
+ }
+ if err := r.skip(); err != nil {
+ return true, err
+ }
+
+ // unmarshal into well-known type and then convert to dynamic message
+ msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+ if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil {
+ return true, err
+ }
+ return true, m.MergeFrom(msg)
+}
+
+func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error {
+ if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+ newOpts := *opts
+ newOpts.AnyResolver = r
+ opts = &newOpts
+ }
+
+ if ok, err := unmarshalWellKnownType(m, r, opts); ok {
+ return err
+ }
+
+ t, err := r.peek()
+ if err != nil {
+ return err
+ }
+ if t == nil {
+ // if json is simply "null" we do nothing
+ r.poll()
+ return nil
+ }
+
+ if err := r.beginObject(); err != nil {
+ return err
+ }
+
+ for r.hasNext() {
+ f, err := r.nextObjectKey()
+ if err != nil {
+ return err
+ }
+ fd := m.FindFieldDescriptorByJSONName(f)
+ if fd == nil {
+ if opts.AllowUnknownFields {
+ r.skip()
+ continue
+ }
+ return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f)
+ }
+ v, err := unmarshalJsField(fd, r, m.mf, opts)
+ if err != nil {
+ return err
+ }
+ if v != nil {
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ } else if fd.GetOneOf() != nil {
+ // preserve explicit null for oneof fields (this is a little odd but
+ // mimics the behavior of jsonpb with oneofs in generated message types)
+ if fd.GetMessageType() != nil {
+ typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName())
+ if typ != nil {
+ // typed nil
+ if typ.Kind() != reflect.Ptr {
+ typ = reflect.PtrTo(typ)
+ }
+ v = reflect.Zero(typ).Interface()
+ } else {
+ // can't use nil dynamic message, so we just use empty one instead
+ v = m.mf.NewDynamicMessage(fd.GetMessageType())
+ }
+ if err := m.setField(fd, v); err != nil {
+ return err
+ }
+ } else {
+ // not a message... explicit null makes no sense
+ return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f)
+ }
+ } else {
+ m.clearField(fd)
+ }
+ }
+
+ if err := r.endObject(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func isWellKnownValue(fd *desc.FieldDescriptor) bool {
+ return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
+}
+
+func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
+ return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue"
+}
+
+func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+ t, err := r.peek()
+ if err != nil {
+ return nil, err
+ }
+ if t == nil && !isWellKnownValue(fd) {
+ // if value is null, just return nil
+ // (unless field is google.protobuf.Value, in which case
+ // we fall through to parse it as an instance where its
+ // underlying value is set to a NullValue)
+ r.poll()
+ return nil, nil
+ }
+
+ if t == json.Delim('{') && fd.IsMap() {
+ entryType := fd.GetMessageType()
+ keyType := entryType.FindFieldByNumber(1)
+ valueType := entryType.FindFieldByNumber(2)
+ mp := map[interface{}]interface{}{}
+
+ // TODO: if there are just two map keys "key" and "value" and they have the right type of values,
+ // treat this JSON object as a single map entry message. (In keeping with support of map fields as
+ // if they were normal repeated field of entry messages as well as supporting a transition from
+ // optional to repeated...)
+
+ if err := r.beginObject(); err != nil {
+ return nil, err
+ }
+ for r.hasNext() {
+ kk, err := unmarshalJsFieldElement(keyType, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := unmarshalJsFieldElement(valueType, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ }
+ if err := r.endObject(); err != nil {
+ return nil, err
+ }
+
+ return mp, nil
+ } else if t == json.Delim('[') && !isWellKnownListValue(fd) {
+ // We support parsing an array, even if field is not repeated, to mimic support in proto
+ // binary wire format that supports changing an optional field to repeated and vice versa.
+ // If the field is not repeated, we only keep the last value in the array.
+
+ if err := r.beginArray(); err != nil {
+ return nil, err
+ }
+ var sl []interface{}
+ var v interface{}
+ for r.hasNext() {
+ var err error
+ v, err = unmarshalJsFieldElement(fd, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsRepeated() && v != nil {
+ sl = append(sl, v)
+ }
+ }
+ if err := r.endArray(); err != nil {
+ return nil, err
+ }
+ if fd.IsMap() {
+ mp := map[interface{}]interface{}{}
+ for _, m := range sl {
+ msg := m.(*Message)
+ kk, err := msg.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := msg.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ }
+ return mp, nil
+ } else if fd.IsRepeated() {
+ return sl, nil
+ } else {
+ return v, nil
+ }
+ } else {
+ // We support parsing a singular value, even if field is repeated, to mimic support in proto
+ // binary wire format that supports changing an optional field to repeated and vice versa.
+ // If the field is repeated, we store value as singleton slice of that one value.
+
+ v, err := unmarshalJsFieldElement(fd, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+ if fd.IsRepeated() {
+ return []interface{}{v}, nil
+ } else {
+ return v, nil
+ }
+ }
+}
+
+func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+ t, err := r.peek()
+ if err != nil {
+ return nil, err
+ }
+
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_GROUP:
+ m := mf.NewMessage(fd.GetMessageType())
+ if dm, ok := m.(*Message); ok {
+ if err := dm.unmarshalJson(r, opts); err != nil {
+ return nil, err
+ }
+ } else {
+ var msg json.RawMessage
+ if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil {
+ return nil, err
+ }
+ if err := r.skip(); err != nil {
+ return nil, err
+ }
+ if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ if e, err := r.nextNumber(); err != nil {
+ return nil, err
+ } else {
+ // value could be string or number
+ if i, err := e.Int64(); err != nil {
+ // number cannot be parsed, so see if it's an enum value name
+ vd := fd.GetEnumType().FindValueByName(string(e))
+ if vd != nil {
+ return vd.GetNumber(), nil
+ } else {
+ return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e)
+ }
+ } else if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, NumericOverflowError
+ } else {
+ return int32(i), err
+ }
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, err := r.nextInt(); err != nil {
+ return nil, err
+ } else if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, NumericOverflowError
+ } else {
+ return int32(i), err
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ return r.nextInt()
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ if i, err := r.nextUint(); err != nil {
+ return nil, err
+ } else if i > math.MaxUint32 {
+ return nil, NumericOverflowError
+ } else {
+ return uint32(i), err
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ return r.nextUint()
+
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ if str, ok := t.(string); ok {
+ if str == "true" {
+ r.poll() // consume token
+ return true, err
+ } else if str == "false" {
+ r.poll() // consume token
+ return false, err
+ }
+ }
+ return r.nextBool()
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := r.nextFloat(); err != nil {
+ return nil, err
+ } else {
+ return float32(f), nil
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return r.nextFloat()
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return r.nextBytes()
+
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return r.nextString()
+
+ default:
+ return nil, fmt.Errorf("unknown field type: %v", fd.GetType())
+ }
+}
+
+type jsReader struct {
+ reader *bytes.Reader
+ dec *json.Decoder
+ current json.Token
+ peeked bool
+}
+
+func newJsReader(b []byte) *jsReader {
+ reader := bytes.NewReader(b)
+ dec := json.NewDecoder(reader)
+ dec.UseNumber()
+ return &jsReader{reader: reader, dec: dec}
+}
+
+func (r *jsReader) unread() io.Reader {
+ bufs := make([]io.Reader, 3)
+ var peeked []byte
+ if r.peeked {
+ if _, ok := r.current.(json.Delim); ok {
+ peeked = []byte(fmt.Sprintf("%v", r.current))
+ } else {
+ peeked, _ = json.Marshal(r.current)
+ }
+ }
+ readerCopy := *r.reader
+ decCopy := *r.dec
+
+ bufs[0] = bytes.NewReader(peeked)
+ bufs[1] = decCopy.Buffered()
+ bufs[2] = &readerCopy
+ return &concatReader{bufs: bufs}
+}
+
+func (r *jsReader) hasNext() bool {
+ return r.dec.More()
+}
+
+func (r *jsReader) peek() (json.Token, error) {
+ if r.peeked {
+ return r.current, nil
+ }
+ t, err := r.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ r.peeked = true
+ r.current = t
+ return t, nil
+}
+
+func (r *jsReader) poll() (json.Token, error) {
+ if r.peeked {
+ ret := r.current
+ r.current = nil
+ r.peeked = false
+ return ret, nil
+ }
+ return r.dec.Token()
+}
+
+func (r *jsReader) beginObject() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'")
+ return err
+}
+
+func (r *jsReader) endObject() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'")
+ return err
+}
+
+func (r *jsReader) beginArray() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['")
+ return err
+}
+
+func (r *jsReader) endArray() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'")
+ return err
+}
+
+func (r *jsReader) nextObjectKey() (string, error) {
+ return r.nextString()
+}
+
+func (r *jsReader) nextString() (string, error) {
+ t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string")
+ if err != nil {
+ return "", err
+ }
+ return t.(string), nil
+}
+
+func (r *jsReader) nextBytes() ([]byte, error) {
+ str, err := r.nextString()
+ if err != nil {
+ return nil, err
+ }
+ return base64.StdEncoding.DecodeString(str)
+}
+
+func (r *jsReader) nextBool() (bool, error) {
+ t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean")
+ if err != nil {
+ return false, err
+ }
+ return t.(bool), nil
+}
+
+func (r *jsReader) nextInt() (int64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return n.Int64()
+}
+
+func (r *jsReader) nextUint() (uint64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(string(n), 10, 64)
+}
+
+func (r *jsReader) nextFloat() (float64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return n.Float64()
+}
+
+func (r *jsReader) nextNumber() (json.Number, error) {
+ t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number")
+ if err != nil {
+ return "", err
+ }
+ switch t := t.(type) {
+ case json.Number:
+ return t, nil
+ case string:
+ return json.Number(t), nil
+ }
+ return "", fmt.Errorf("expecting a number but got %v", t)
+}
+
+func (r *jsReader) skip() error {
+ t, err := r.poll()
+ if err != nil {
+ return err
+ }
+ if t == json.Delim('[') {
+ if err := r.skipArray(); err != nil {
+ return err
+ }
+ } else if t == json.Delim('{') {
+ if err := r.skipObject(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *jsReader) skipArray() error {
+ for r.hasNext() {
+ if err := r.skip(); err != nil {
+ return err
+ }
+ }
+ if err := r.endArray(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *jsReader) skipObject() error {
+ for r.hasNext() {
+ // skip object key
+ if err := r.skip(); err != nil {
+ return err
+ }
+ // and value
+ if err := r.skip(); err != nil {
+ return err
+ }
+ }
+ if err := r.endObject(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) {
+ t, err := r.poll()
+ if err != nil {
+ return nil, err
+ }
+ if t == nil && ifNil != nil {
+ return ifNil, nil
+ }
+ if !predicate(t) {
+ return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t)
+ }
+ return t, nil
+}
+
+type concatReader struct {
+ bufs []io.Reader
+ curr int
+}
+
+func (r *concatReader) Read(p []byte) (n int, err error) {
+ for {
+ if r.curr >= len(r.bufs) {
+ err = io.EOF
+ return
+ }
+ var c int
+ c, err = r.bufs[r.curr].Read(p)
+ n += c
+ if err != io.EOF {
+ return
+ }
+ r.curr++
+ p = p[c:]
+ }
+}
+
+// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors
+// to resolve message names. It uses the given factory, which may be nil, to
+// instantiate messages. The messages that it returns when resolving a type name
+// may often be dynamic messages.
+func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver {
+ return &anyResolver{mf: mf, files: files}
+}
+
+type anyResolver struct {
+ mf *MessageFactory
+ files []*desc.FileDescriptor
+ ignored map[*desc.FileDescriptor]struct{}
+ other jsonpb.AnyResolver
+}
+
+func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) {
+ if r, ok := r.(*anyResolver); ok {
+ if _, ok := r.ignored[f]; ok {
+ // if the current resolver is ignoring this file, it's because another
+ // (upstream) resolver is already handling it, so nothing to do
+ return r, false
+ }
+ for _, file := range r.files {
+ if file == f {
+ // no need to wrap!
+ return r, false
+ }
+ }
+ // ignore files that will be checked by the resolver we're wrapping
+ // (we'll just delegate and let it search those files)
+ ignored := map[*desc.FileDescriptor]struct{}{}
+ for i := range r.ignored {
+ ignored[i] = struct{}{}
+ }
+ ignore(r.files, ignored)
+ return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true
+ }
+ return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true
+}
+
+func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) {
+ for _, f := range files {
+ if _, ok := ignored[f]; ok {
+ continue
+ }
+ ignored[f] = struct{}{}
+ ignore(f.GetDependencies(), ignored)
+ }
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+
+ // see if the user-specified resolver is able to do the job
+ if r.other != nil {
+ msg, err := r.other.Resolve(typeUrl)
+ if err == nil {
+ return msg, nil
+ }
+ }
+
+ // try to find the message in our known set of files
+ checked := map[*desc.FileDescriptor]struct{}{}
+ for _, f := range r.files {
+ md := r.findMessage(f, mname, checked)
+ if md != nil {
+ return r.mf.NewMessage(md), nil
+ }
+ }
+ // failing that, see if the message factory knows about this type
+ var ktr *KnownTypeRegistry
+ if r.mf != nil {
+ ktr = r.mf.ktr
+ } else {
+ ktr = (*KnownTypeRegistry)(nil)
+ }
+ m := ktr.CreateIfKnown(mname)
+ if m != nil {
+ return m, nil
+ }
+
+ // no other resolver to fallback to? mimic default behavior
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+ // if this is an ignored descriptor, skip
+ if _, ok := r.ignored[fd]; ok {
+ return nil
+ }
+
+ // bail if we've already checked this file
+ if _, ok := checked[fd]; ok {
+ return nil
+ }
+ checked[fd] = struct{}{}
+
+ // see if this file has the message
+ md := fd.FindMessage(msgName)
+ if md != nil {
+ return md
+ }
+
+ // if not, recursively search the file's imports
+ for _, dep := range fd.GetDependencies() {
+ md = r.findMessage(dep, msgName, checked)
+ if md != nil {
+ return md
+ }
+ }
+ return nil
+}
+
+var _ jsonpb.AnyResolver = (*anyResolver)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
new file mode 100644
index 0000000..bb68d7b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -0,0 +1,129 @@
+//+build !go1.12
+
+package dynamic
+
+import (
+ "github.com/jhump/protoreflect/desc"
+ "reflect"
+)
+
+// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
+// iterate a map. (We can be more efficient in Go 1.12 and up...)
+
+func mapsEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ if a.Len() == 0 && b.Len() == 0 {
+ // Optimize the case where maps are frequently empty because MapKeys()
+ // function allocates heavily.
+ return true
+ }
+
+ for _, k := range a.MapKeys() {
+ av := a.MapIndex(k)
+ bv := b.MapIndex(k)
+ if !bv.IsValid() {
+ return false
+ }
+ if !fieldsEqual(av.Interface(), bv.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ // make a defensive copy while we check the contents
+ // (also converts to map[interface{}]interface{} if it's some other type)
+ keyField := fd.GetMessageType().GetFields()[0]
+ valField := fd.GetMessageType().GetFields()[1]
+ m := map[interface{}]interface{}{}
+ for _, k := range val.MapKeys() {
+ if k.Kind() == reflect.Interface {
+ // unwrap it
+ k = reflect.ValueOf(k.Interface())
+ }
+ kk, err := validFieldValueForRv(keyField, k)
+ if err != nil {
+ return nil, err
+ }
+ v := val.MapIndex(k)
+ if v.Kind() == reflect.Interface {
+ // unwrap it
+ v = reflect.ValueOf(v.Interface())
+ }
+ vv, err := validFieldValueForRv(valField, v)
+ if err != nil {
+ return nil, err
+ }
+ m[kk] = vv
+ }
+ return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+ kt := target.Key()
+ vt := target.Elem()
+ for _, k := range src.MapKeys() {
+ if !canConvert(k, kt) {
+ return false
+ }
+ if !canConvert(src.MapIndex(k), vt) {
+ return false
+ }
+ }
+ return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+ tkt := targetType.Key()
+ tvt := targetType.Elem()
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ skt := k.Type()
+ svt := v.Type()
+ var nk, nv reflect.Value
+ if tkt == skt {
+ nk = k
+ } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+ nk = k.Addr()
+ } else {
+ nk = reflect.New(tkt).Elem()
+ if err := mergeVal(k, nk); err != nil {
+ return err
+ }
+ }
+ if tvt == svt {
+ nv = v
+ } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+ nv = v.Addr()
+ } else {
+ nv = reflect.New(tvt).Elem()
+ if err := mergeVal(v, nv); err != nil {
+ return err
+ }
+ }
+ if target.IsNil() {
+ target.Set(reflect.MakeMap(targetType))
+ }
+ target.SetMapIndex(nk, nv)
+ }
+ return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+ for _, k := range rv.MapKeys() {
+ if k.Kind() == reflect.Interface && !k.IsNil() {
+ k = k.Elem()
+ }
+ v := rv.MapIndex(k)
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
new file mode 100644
index 0000000..f5ffd67
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -0,0 +1,137 @@
+//+build go1.12
+
+package dynamic
+
+import (
+ "github.com/jhump/protoreflect/desc"
+ "reflect"
+)
+
+// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
+// over maps more efficiently than using reflect.Value.MapKeys.
+
+func mapsEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ if a.Len() == 0 && b.Len() == 0 {
+ // Optimize the case where maps are frequently empty
+ return true
+ }
+
+ iter := a.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ av := iter.Value()
+ bv := b.MapIndex(k)
+ if !bv.IsValid() {
+ return false
+ }
+ if !fieldsEqual(av.Interface(), bv.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ // make a defensive copy while we check the contents
+ // (also converts to map[interface{}]interface{} if it's some other type)
+ keyField := fd.GetMessageType().GetFields()[0]
+ valField := fd.GetMessageType().GetFields()[1]
+ m := map[interface{}]interface{}{}
+ iter := val.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if k.Kind() == reflect.Interface {
+ // unwrap it
+ k = reflect.ValueOf(k.Interface())
+ }
+ kk, err := validFieldValueForRv(keyField, k)
+ if err != nil {
+ return nil, err
+ }
+ v := iter.Value()
+ if v.Kind() == reflect.Interface {
+ // unwrap it
+ v = reflect.ValueOf(v.Interface())
+ }
+ vv, err := validFieldValueForRv(valField, v)
+ if err != nil {
+ return nil, err
+ }
+ m[kk] = vv
+ }
+ return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+ kt := target.Key()
+ vt := target.Elem()
+ iter := src.MapRange()
+ for iter.Next() {
+ if !canConvert(iter.Key(), kt) {
+ return false
+ }
+ if !canConvert(iter.Value(), vt) {
+ return false
+ }
+ }
+ return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+ tkt := targetType.Key()
+ tvt := targetType.Elem()
+ iter := src.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ skt := k.Type()
+ svt := v.Type()
+ var nk, nv reflect.Value
+ if tkt == skt {
+ nk = k
+ } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+ nk = k.Addr()
+ } else {
+ nk = reflect.New(tkt).Elem()
+ if err := mergeVal(k, nk); err != nil {
+ return err
+ }
+ }
+ if tvt == svt {
+ nv = v
+ } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+ nv = v.Addr()
+ } else {
+ nv = reflect.New(tvt).Elem()
+ if err := mergeVal(v, nv); err != nil {
+ return err
+ }
+ }
+ if target.IsNil() {
+ target.Set(reflect.MakeMap(targetType))
+ }
+ target.SetMapIndex(nk, nv)
+ }
+ return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+ iter := rv.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ if k.Kind() == reflect.Interface && !k.IsNil() {
+ k = k.Elem()
+ }
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
new file mode 100644
index 0000000..ce727fd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
@@ -0,0 +1,100 @@
+package dynamic
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Merge merges the given source message into the given destination message. Use
+// use this instead of proto.Merge when one or both of the messages might be a
+// a dynamic message. If there is a problem merging the messages, such as the
+// two messages having different types, then this method will panic (just as
+// proto.Merges does).
+func Merge(dst, src proto.Message) {
+ if dm, ok := dst.(*Message); ok {
+ if err := dm.MergeFrom(src); err != nil {
+ panic(err.Error())
+ }
+ } else if dm, ok := src.(*Message); ok {
+ if err := dm.MergeInto(dst); err != nil {
+ panic(err.Error())
+ }
+ } else {
+ proto.Merge(dst, src)
+ }
+}
+
+// TryMerge merges the given source message into the given destination message.
+// You can use this instead of proto.Merge when one or both of the messages
+// might be a dynamic message. Unlike proto.Merge, this method will return an
+// error on failure instead of panic'ing.
+func TryMerge(dst, src proto.Message) error {
+ if dm, ok := dst.(*Message); ok {
+ if err := dm.MergeFrom(src); err != nil {
+ return err
+ }
+ } else if dm, ok := src.(*Message); ok {
+ if err := dm.MergeInto(dst); err != nil {
+ return err
+ }
+ } else {
+ // proto.Merge panics on bad input, so we first verify
+ // inputs and return error instead of panic
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ return errors.New("proto: nil destination")
+ }
+ in := reflect.ValueOf(src)
+ if in.Type() != out.Type() {
+ return errors.New("proto: type mismatch")
+ }
+ proto.Merge(dst, src)
+ }
+ return nil
+}
+
+func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error {
+ rv := reflect.ValueOf(val)
+
+ if fd.IsMap() && rv.Kind() == reflect.Map {
+ return mergeMapField(m, fd, rv)
+ }
+
+ if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes {
+ for i := 0; i < rv.Len(); i++ {
+ e := rv.Index(i)
+ if e.Kind() == reflect.Interface && !e.IsNil() {
+ e = e.Elem()
+ }
+ if err := m.addRepeatedField(fd, e.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if fd.IsRepeated() {
+ return m.addRepeatedField(fd, val)
+ } else if fd.GetMessageType() == nil {
+ return m.setField(fd, val)
+ }
+
+ // it's a message type, so we want to merge contents
+ var err error
+ if val, err = validFieldValue(fd, val); err != nil {
+ return err
+ }
+
+ existing, _ := m.doGetField(fd, true)
+ if existing != nil && !reflect.ValueOf(existing).IsNil() {
+ return TryMerge(existing.(proto.Message), val.(proto.Message))
+ }
+
+ // no existing message, so just set field
+ m.internalSetField(fd, val)
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
new file mode 100644
index 0000000..6c54de8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -0,0 +1,189 @@
+package dynamic
+
+import (
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// MessageFactory can be used to create new empty message objects. A default instance
+// (without extension registry or known-type registry specified) will always return
+// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types.
+// The well-known types include primitive wrapper types and a handful of other special
+// types defined in standard protobuf definitions, like Any, Duration, and Timestamp.
+type MessageFactory struct {
+ er *ExtensionRegistry
+ ktr *KnownTypeRegistry
+}
+
+// NewMessageFactoryWithExtensionRegistry creates a new message factory where any
+// dynamic messages produced will use the given extension registry to recognize and
+// parse extension fields.
+func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory {
+ return NewMessageFactoryWithRegistries(er, nil)
+}
+
+// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the
+// known types, per the given registry, will be returned as normal protobuf messages
+// (e.g. generated structs, instead of dynamic messages).
+func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory {
+ return NewMessageFactoryWithRegistries(nil, ktr)
+}
+
+// NewMessageFactoryWithDefaults creates a new message factory where all "default" types
+// (those for which protoc-generated code is statically linked into the Go program) are
+// known types. If any dynamic messages are produced, they will recognize and parse all
+// "default" extension fields. This is the equivalent of:
+// NewMessageFactoryWithRegistries(
+// NewExtensionRegistryWithDefaults(),
+// NewKnownTypeRegistryWithDefaults())
+func NewMessageFactoryWithDefaults() *MessageFactory {
+ return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
+}
+
+// NewMessageFactoryWithRegistries creates a new message factory with the given extension
+// and known type registries.
+func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory {
+ return &MessageFactory{
+ er: er,
+ ktr: ktr,
+ }
+}
+
+// NewMessage creates a new empty message that corresponds to the given descriptor.
+// If the given descriptor describes a "known type" then that type is instantiated.
+// Otherwise, an empty dynamic message is returned.
+func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message {
+ var ktr *KnownTypeRegistry
+ if f != nil {
+ ktr = f.ktr
+ }
+ if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil {
+ return m
+ }
+ return NewMessageWithMessageFactory(md, f)
+}
+
+// NewDynamicMessage creates a new empty dynamic message that corresponds to the given
+// descriptor. This is like f.NewMessage(md) except the known type registry is not
+// consulted so the return value is always a dynamic message.
+//
+// This is also like dynamic.NewMessage(md) except that the returned message will use
+// this factory when creating other messages, like during de-serialization of fields
+// that are themselves message types.
+func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message {
+ return NewMessageWithMessageFactory(md, f)
+}
+
+// GetKnownTypeRegistry returns the known type registry that this factory uses to
+// instantiate known (e.g. generated) message types.
+func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry {
+ if f == nil {
+ return nil
+ }
+ return f.ktr
+}
+
+// GetExtensionRegistry returns the extension registry that this factory uses to
+// create dynamic messages. The registry is used by dynamic messages to recognize
+// and parse extension fields during de-serialization.
+func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry {
+ if f == nil {
+ return nil
+ }
+ return f.er
+}
+
+type wkt interface {
+ XXX_WellKnownType() string
+}
+
+var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem()
+
+// KnownTypeRegistry is a registry of known message types, as identified by their
+// fully-qualified name. A known message type is one for which a protoc-generated
+// struct exists, so a dynamic message is not necessary to represent it. A
+// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated
+// struct or a dynamic message. The zero-value registry (including the behavior of
+// a nil pointer) only knows about the "well-known types" in protobuf. These
+// include only the wrapper types and a handful of other special types like Any,
+// Duration, and Timestamp.
+type KnownTypeRegistry struct {
+ excludeWkt bool
+ includeDefault bool
+ mu sync.RWMutex
+ types map[string]reflect.Type
+}
+
+// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all
+// "default" types (those for which protoc-generated code is statically linked
+// into the Go program).
+func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry {
+ return &KnownTypeRegistry{includeDefault: true}
+}
+
+// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not*
+// include the "well-known types" in protobuf. So even well-known types would be
+// represented by a dynamic message.
+func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry {
+ return &KnownTypeRegistry{excludeWkt: true}
+}
+
+// AddKnownType adds the types of the given messages as known types.
+func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.types == nil {
+ r.types = map[string]reflect.Type{}
+ }
+ for _, kt := range kts {
+ r.types[proto.MessageName(kt)] = reflect.TypeOf(kt)
+ }
+}
+
+// CreateIfKnown will construct an instance of the given message if it is a known type.
+// If the given name is unknown, nil is returned.
+func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message {
+ msgType := r.GetKnownType(messageName)
+ if msgType == nil {
+ return nil
+ }
+
+ if msgType.Kind() == reflect.Ptr {
+ return reflect.New(msgType.Elem()).Interface().(proto.Message)
+ } else {
+ return reflect.New(msgType).Elem().Interface().(proto.Message)
+ }
+}
+
+// GetKnownType will return the reflect.Type for the given message name if it is
+// known. If it is not known, nil is returned.
+func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
+ var msgType reflect.Type
+ if r == nil {
+ // a nil registry behaves the same as zero value instance: only know of well-known types
+ t := proto.MessageType(messageName)
+ if t != nil && t.Implements(typeOfWkt) {
+ msgType = t
+ }
+ } else {
+ if r.includeDefault {
+ msgType = proto.MessageType(messageName)
+ } else if !r.excludeWkt {
+ t := proto.MessageType(messageName)
+ if t != nil && t.Implements(typeOfWkt) {
+ msgType = t
+ }
+ }
+ if msgType == nil {
+ r.mu.RLock()
+ msgType = r.types[messageName]
+ r.mu.RUnlock()
+ }
+ }
+
+ return msgType
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
new file mode 100644
index 0000000..2d0fa04
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -0,0 +1,1174 @@
+package dynamic
+
+// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "text/scanner"
+ "unicode"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// MarshalText serializes this message to bytes in the standard text format,
+// returning an error if the operation fails. The resulting bytes will be a
+// valid UTF8 string.
+//
+// This method uses a compact form: no newlines, and spaces between field
+// identifiers and values are elided.
+func (m *Message) MarshalText() ([]byte, error) {
+ var b indentBuffer
+ b.indentCount = -1 // no indentation
+ if err := m.marshalText(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalTextIndent serializes this message to bytes in the standard text
+// format, returning an error if the operation fails. The resulting bytes will
+// be a valid UTF8 string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values.
+func (m *Message) MarshalTextIndent() ([]byte, error) {
+ var b indentBuffer
+ b.indent = " " // TODO: option for indent?
+ if err := m.marshalText(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func (m *Message) marshalText(b *indentBuffer) error {
+ // TODO: option for emitting extended Any format?
+ first := true
+ // first the known fields
+ for _, tag := range m.knownFieldTags() {
+ itag := int32(tag)
+ v := m.values[itag]
+ fd := m.FindFieldDescriptor(itag)
+ if fd.IsMap() {
+ md := fd.GetMessageType()
+ kfd := md.FindFieldByNumber(1)
+ vfd := md.FindFieldByNumber(2)
+ mp := v.(map[interface{}]interface{})
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ for _, mk := range keys {
+ mv := mp[mk]
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv)
+ if err != nil {
+ return err
+ }
+ }
+ } else if fd.IsRepeated() {
+ sl := v.([]interface{})
+ for _, slv := range sl {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, fd, slv)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, fd, v)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // then the unknown fields
+ for _, tag := range m.unknownFieldTags() {
+ itag := int32(tag)
+ ufs := m.unknownFields[itag]
+ for _, uf := range ufs {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(b, "%d", tag)
+ if err != nil {
+ return err
+ }
+ if uf.Encoding == proto.WireStartGroup {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ in := newCodedBuffer(uf.Contents)
+ err = marshalUnknownGroupText(b, in, true)
+ if err != nil {
+ return err
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+ } else {
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ if uf.Encoding == proto.WireBytes {
+ err = writeString(b, string(uf.Contents))
+ if err != nil {
+ return err
+ }
+ } else {
+ _, err = b.WriteString(strconv.FormatUint(uf.Value, 10))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+ } else {
+ name = fd.GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+
+ err = b.WriteByte('<')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ err = marshalKnownFieldText(b, kfd, mk)
+ if err != nil {
+ return err
+ }
+ err = b.next()
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, vfd, mv)
+ if err != nil {
+ return err
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte('>')
+}
+
+func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
+ group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+ if group {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName())
+ } else {
+ name = fd.GetMessageType().GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ } else {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+ } else {
+ name = fd.GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int32, reflect.Int64:
+ ed := fd.GetEnumType()
+ if ed != nil {
+ n := int32(rv.Int())
+ vd := ed.FindValueByNumber(n)
+ if vd == nil {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ } else {
+ _, err := b.WriteString(vd.GetName())
+ return err
+ }
+ } else {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ }
+ case reflect.Uint32, reflect.Uint64:
+ _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+ return err
+ case reflect.Float32, reflect.Float64:
+ f := rv.Float()
+ var str string
+ if math.IsNaN(f) {
+ str = "nan"
+ } else if math.IsInf(f, 1) {
+ str = "inf"
+ } else if math.IsInf(f, -1) {
+ str = "-inf"
+ } else {
+ var bits int
+ if rv.Kind() == reflect.Float32 {
+ bits = 32
+ } else {
+ bits = 64
+ }
+ str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+ }
+ _, err := b.WriteString(str)
+ return err
+ case reflect.Bool:
+ _, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+ return err
+ case reflect.Slice:
+ return writeString(b, string(rv.Bytes()))
+ case reflect.String:
+ return writeString(b, rv.String())
+ default:
+ var err error
+ if group {
+ err = b.WriteByte('{')
+ } else {
+ err = b.WriteByte('<')
+ }
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ // must be a message
+ if dm, ok := v.(*Message); ok {
+ err = dm.marshalText(b)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = proto.CompactText(b, v.(proto.Message))
+ if err != nil {
+ return err
+ }
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ if group {
+ return b.WriteByte('}')
+ } else {
+ return b.WriteByte('>')
+ }
+ }
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(b *indentBuffer, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := b.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = b.WriteString("\\n")
+ case '\r':
+ _, err = b.WriteString("\\r")
+ case '\t':
+ _, err = b.WriteString("\\t")
+ case '"':
+ _, err = b.WriteString("\\")
+ case '\\':
+ _, err = b.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c < 0x7f {
+ err = b.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(b, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return b.WriteByte('"')
+}
+
+func marshalUnknownGroupText(b *indentBuffer, in *codedBuffer, topLevel bool) error {
+ first := true
+ for {
+ if in.eof() {
+ if topLevel {
+ return nil
+ }
+ // this is a nested message: we are expecting an end-group tag, not EOF!
+ return io.ErrUnexpectedEOF
+ }
+ tag, wireType, err := in.decodeTagAndWireType()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireEndGroup {
+ return nil
+ }
+ err = b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(b, "%d", tag)
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireStartGroup {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ err = marshalUnknownGroupText(b, in, false)
+ if err != nil {
+ return err
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+ continue
+ } else {
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireBytes {
+ contents, err := in.decodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ err = writeString(b, string(contents))
+ if err != nil {
+ return err
+ }
+ } else {
+ var v uint64
+ switch wireType {
+ case proto.WireVarint:
+ v, err = in.decodeVarint()
+ case proto.WireFixed32:
+ v, err = in.decodeFixed32()
+ case proto.WireFixed64:
+ v, err = in.decodeFixed64()
+ default:
+ return proto.ErrInternalBadWireType
+ }
+ if err != nil {
+ return err
+ }
+ _, err = b.WriteString(strconv.FormatUint(v, 10))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+}
+
+// UnmarshalText de-serializes the message that is present, in text format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in the standard text format
+func (m *Message) UnmarshalText(text []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMergeText(text); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMergeText de-serializes the message that is present, in text format,
+// in the given bytes into this message. Unlike UnmarshalText, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeText(text []byte) error {
+ return m.unmarshalText(newReader(text), tokenEOF)
+}
+
+func (m *Message) unmarshalText(tr *txtReader, end tokenType) error {
+ for {
+ tok := tr.next()
+ if tok.tokTyp == end {
+ return nil
+ }
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ var fd *desc.FieldDescriptor
+ var extendedAnyType *desc.MessageDescriptor
+ if tok.tokTyp == tokenInt {
+ // tag number (indicates unknown field)
+ tag, err := strconv.ParseInt(tok.val.(string), 10, 32)
+ if err != nil {
+ return err
+ }
+ itag := int32(tag)
+ fd = m.FindFieldDescriptor(itag)
+ if fd == nil {
+ // can't parse the value w/out field descriptor, so skip it
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenOpenBrace {
+ if err := skipMessageText(tr, true); err != nil {
+ return err
+ }
+ } else if tok.tokTyp == tokenColon {
+ if err := skipFieldValueText(tr); err != nil {
+ return err
+ }
+ } else {
+ return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+ }
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ continue
+ }
+ } else {
+ fieldName, err := unmarshalFieldNameText(tr, tok)
+ if err != nil {
+ return err
+ }
+ fd = m.FindFieldDescriptorByName(fieldName)
+ if fd == nil {
+ // See if it's a group name
+ for _, field := range m.md.GetFields() {
+ if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+ fd = field
+ break
+ }
+ }
+ if fd == nil {
+ // maybe this is an extended Any
+ if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") {
+ // strip surrounding "[" and "]" and extract type name from URL
+ typeUrl := fieldName[1 : len(fieldName)-1]
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ // TODO: add a way to weave an AnyResolver to this point
+ extendedAnyType = findMessageDescriptor(mname, m.md.GetFile())
+ if extendedAnyType == nil {
+ return textError(tok, "could not parse Any with unknown type URL %q", fieldName)
+ }
+ // field 1 is "type_url"
+ typeUrlField := m.md.FindFieldByNumber(1)
+ if err := m.TrySetField(typeUrlField, typeUrl); err != nil {
+ return err
+ }
+ } else {
+ // TODO: add a flag to just ignore unrecognized field names
+ return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName())
+ }
+ }
+ }
+ }
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ if extendedAnyType != nil {
+ // consume optional colon; make sure this is a "start message" token
+ if tok.tokTyp == tokenColon {
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ }
+ if tok.tokTyp.EndToken() == tokenError {
+ return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt)
+ }
+
+ // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+ g := m.mf.NewDynamicMessage(extendedAnyType)
+ if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+ return err
+ }
+ // now we marshal the message to bytes and store in the Any
+ b, err := g.Marshal()
+ if err != nil {
+ return err
+ }
+ // field 2 is "value"
+ anyValueField := m.md.FindFieldByNumber(2)
+ if err := m.TrySetField(anyValueField, b); err != nil {
+ return err
+ }
+
+ } else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
+ fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+ tok.tokTyp.EndToken() != tokenError {
+
+ // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+ g := m.mf.NewDynamicMessage(fd.GetMessageType())
+ if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+ return err
+ }
+ if fd.IsRepeated() {
+ if err := m.TryAddRepeatedField(fd, g); err != nil {
+ return err
+ }
+ } else {
+ if err := m.TrySetField(fd, g); err != nil {
+ return err
+ }
+ }
+ } else {
+ if tok.tokTyp != tokenColon {
+ return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt)
+ }
+ if err := m.unmarshalFieldValueText(fd, tr); err != nil {
+ return err
+ }
+ }
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+}
+func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor {
+ md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{})
+ if md == nil {
+ // couldn't find it; see if we have this message linked in
+ md, _ = desc.LoadMessageDescriptor(name)
+ }
+ return md
+}
+
+func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+ if _, ok := seen[fd]; ok {
+ // already checked this file
+ return nil
+ }
+ seen[fd] = struct{}{}
+ md := fd.FindMessage(name)
+ if md != nil {
+ return md
+ }
+ // not in this file so recursively search its deps
+ for _, dep := range fd.GetDependencies() {
+ md = findMessageInTransitiveDeps(name, dep, seen)
+ if md != nil {
+ return md
+ }
+ }
+ // couldn't find it
+ return nil
+}
+
+func textError(tok *token, format string, args ...interface{}) error {
+ var msg string
+ if tok.tokTyp == tokenError {
+ msg = tok.val.(error).Error()
+ } else {
+ msg = fmt.Sprintf(format, args...)
+ }
+ return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg)
+}
+
+type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error
+
+func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error {
+ var set setFunction
+ if fd.IsRepeated() {
+ set = (*Message).addRepeatedField
+ } else {
+ set = mergeField
+ }
+ tok := tr.peek()
+ if tok.tokTyp == tokenOpenBracket {
+ tr.next() // consume tok
+ for {
+ if err := m.unmarshalFieldElementText(fd, tr, set); err != nil {
+ return err
+ }
+ tok = tr.peek()
+ if tok.tokTyp == tokenCloseBracket {
+ tr.next() // consume tok
+ return nil
+ } else if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+ }
+ return m.unmarshalFieldElementText(fd, tr, set)
+}
+
+func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error {
+ tok := tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+
+ var expected string
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ if tok.tokTyp == tokenIdent {
+ if tok.val.(string) == "true" {
+ return set(m, fd, true)
+ } else if tok.val.(string) == "false" {
+ return set(m, fd, false)
+ }
+ }
+ expected = "boolean value"
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ if tok.tokTyp == tokenString {
+ return set(m, fd, []byte(tok.val.(string)))
+ }
+ expected = "bytes string value"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ if tok.tokTyp == tokenString {
+ return set(m, fd, tok.val)
+ }
+ expected = "string value"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ switch tok.tokTyp {
+ case tokenFloat:
+ return set(m, fd, float32(tok.val.(float64)))
+ case tokenInt:
+ if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, float32(f))
+ }
+ case tokenIdent:
+ ident := strings.ToLower(tok.val.(string))
+ if ident == "inf" {
+ return set(m, fd, float32(math.Inf(1)))
+ } else if ident == "nan" {
+ return set(m, fd, float32(math.NaN()))
+ }
+ case tokenMinus:
+ peeked := tr.peek()
+ if peeked.tokTyp == tokenIdent {
+ ident := strings.ToLower(peeked.val.(string))
+ if ident == "inf" {
+ tr.next() // consume peeked token
+ return set(m, fd, float32(math.Inf(-1)))
+ }
+ }
+ }
+ expected = "float value"
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ switch tok.tokTyp {
+ case tokenFloat:
+ return set(m, fd, tok.val)
+ case tokenInt:
+ if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, f)
+ }
+ case tokenIdent:
+ ident := strings.ToLower(tok.val.(string))
+ if ident == "inf" {
+ return set(m, fd, math.Inf(1))
+ } else if ident == "nan" {
+ return set(m, fd, math.NaN())
+ }
+ case tokenMinus:
+ peeked := tr.peek()
+ if peeked.tokTyp == tokenIdent {
+ ident := strings.ToLower(peeked.val.(string))
+ if ident == "inf" {
+ tr.next() // consume peeked token
+ return set(m, fd, math.Inf(-1))
+ }
+ }
+ }
+ expected = "float value"
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, int32(i))
+ }
+ }
+ expected = "int value"
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, i)
+ }
+ }
+ expected = "int value"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, uint32(i))
+ }
+ }
+ expected = "unsigned int value"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, i)
+ }
+ }
+ expected = "unsigned int value"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ if tok.tokTyp == tokenIdent {
+ // TODO: add a flag to just ignore unrecognized enum value names?
+ vd := fd.GetEnumType().FindValueByName(tok.val.(string))
+ if vd != nil {
+ return set(m, fd, vd.GetNumber())
+ }
+ } else if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, int32(i))
+ }
+ }
+ expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_GROUP:
+
+ endTok := tok.tokTyp.EndToken()
+ if endTok != tokenError {
+ dm := m.mf.NewDynamicMessage(fd.GetMessageType())
+ if err := dm.unmarshalText(tr, endTok); err != nil {
+ return err
+ }
+ // TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use
+ // proto package to unmarshal it. But the text parser isn't particularly amenable
+ // to that, so we instead convert a dynamic message to a generated one if the
+ // known-type registry knows about the generated type...
+ var ktr *KnownTypeRegistry
+ if m.mf != nil {
+ ktr = m.mf.ktr
+ }
+ pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName())
+ if pm != nil {
+ if err := dm.ConvertTo(pm); err != nil {
+ return set(m, fd, pm)
+ }
+ }
+ return set(m, fd, dm)
+ }
+ expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName())
+ default:
+ return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType())
+ }
+
+ // if we get here, token was wrong type; create error message
+ var article string
+ if strings.Contains("aieou", expected[0:1]) {
+ article = "an"
+ } else {
+ article = "a"
+ }
+ return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt)
+}
+
+func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) {
+ if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen {
+ // extension name
+ var closeType tokenType
+ var closeChar string
+ if tok.tokTyp == tokenOpenBracket {
+ closeType = tokenCloseBracket
+ closeChar = "close bracket ']'"
+ } else {
+ closeType = tokenCloseParen
+ closeChar = "close paren ')'"
+ }
+ // must be followed by an identifier
+ idents := make([]string, 0, 1)
+ for {
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return "", io.ErrUnexpectedEOF
+ } else if tok.tokTyp != tokenIdent {
+ return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt)
+ }
+ idents = append(idents, tok.val.(string))
+ // and then close bracket/paren, or "/" to keep adding URL elements to name
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return "", io.ErrUnexpectedEOF
+ } else if tok.tokTyp == closeType {
+ break
+ } else if tok.tokTyp != tokenSlash {
+ return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt)
+ }
+ }
+ return "[" + strings.Join(idents, "/") + "]", nil
+ } else if tok.tokTyp == tokenIdent {
+ // normal field name
+ return tok.val.(string), nil
+ } else {
+ return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt)
+ }
+}
+
+func skipFieldNameText(tr *txtReader) error {
+ tok := tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent {
+ return nil
+ } else {
+ _, err := unmarshalFieldNameText(tr, tok)
+ return err
+ }
+}
+
+func skipFieldValueText(tr *txtReader) error {
+ tok := tr.peek()
+ if tok.tokTyp == tokenOpenBracket {
+ tr.next() // consume tok
+ for {
+ if err := skipFieldElementText(tr); err != nil {
+ return err
+ }
+ tok = tr.peek()
+ if tok.tokTyp == tokenCloseBracket {
+ tr.next() // consume tok
+ return nil
+ } else if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+
+ }
+ }
+ return skipFieldElementText(tr)
+}
+
+func skipFieldElementText(tr *txtReader) error {
+ tok := tr.next()
+ switch tok.tokTyp {
+ case tokenEOF:
+ return io.ErrUnexpectedEOF
+ case tokenInt, tokenFloat, tokenString, tokenIdent:
+ return nil
+ case tokenOpenAngle:
+ return skipMessageText(tr, false)
+ default:
+ return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt)
+ }
+}
+
+func skipMessageText(tr *txtReader, isGroup bool) error {
+ for {
+ tok := tr.peek()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if isGroup && tok.tokTyp == tokenCloseBrace {
+ return nil
+ } else if !isGroup && tok.tokTyp == tokenCloseAngle {
+ return nil
+ }
+
+ // field name or tag
+ if err := skipFieldNameText(tr); err != nil {
+ return err
+ }
+
+ // field value
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenOpenBrace {
+ if err := skipMessageText(tr, true); err != nil {
+ return err
+ }
+ } else if tok.tokTyp == tokenColon {
+ if err := skipFieldValueText(tr); err != nil {
+ return err
+ }
+ } else {
+ return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+ }
+
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+}
+
+type tokenType int
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenIdent
+ tokenString
+ tokenInt
+ tokenFloat
+ tokenColon
+ tokenComma
+ tokenSemiColon
+ tokenOpenBrace
+ tokenCloseBrace
+ tokenOpenBracket
+ tokenCloseBracket
+ tokenOpenAngle
+ tokenCloseAngle
+ tokenOpenParen
+ tokenCloseParen
+ tokenSlash
+ tokenMinus
+)
+
+func (t tokenType) IsSep() bool {
+ return t == tokenComma || t == tokenSemiColon
+}
+
+func (t tokenType) EndToken() tokenType {
+ switch t {
+ case tokenOpenAngle:
+ return tokenCloseAngle
+ case tokenOpenBrace:
+ return tokenCloseBrace
+ default:
+ return tokenError
+ }
+}
+
+type token struct {
+ tokTyp tokenType
+ val interface{}
+ txt string
+ pos scanner.Position
+}
+
+type txtReader struct {
+ scanner scanner.Scanner
+ peeked token
+ havePeeked bool
+}
+
+func newReader(text []byte) *txtReader {
+ sc := scanner.Scanner{}
+ sc.Init(bytes.NewReader(text))
+ sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars |
+ scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ // identifiers are same restrictions as Go identifiers, except we also allow dots since
+ // we accept fully-qualified names
+ sc.IsIdentRune = func(ch rune, i int) bool {
+ return ch == '_' || unicode.IsLetter(ch) ||
+ (i > 0 && unicode.IsDigit(ch)) ||
+ (i > 0 && ch == '.')
+ }
+ // ignore errors; we handle them if/when we see malformed tokens
+ sc.Error = func(s *scanner.Scanner, msg string) {}
+ return &txtReader{scanner: sc}
+}
+
+func (p *txtReader) peek() *token {
+ if p.havePeeked {
+ return &p.peeked
+ }
+ t := p.scanner.Scan()
+ if t == scanner.EOF {
+ p.peeked.tokTyp = tokenEOF
+ p.peeked.val = nil
+ p.peeked.txt = ""
+ p.peeked.pos = p.scanner.Position
+ } else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil {
+ p.peeked.tokTyp = tokenError
+ p.peeked.val = err
+ }
+ p.havePeeked = true
+ return &p.peeked
+}
+
+func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error {
+ p.peeked.pos = pos
+ p.peeked.txt = text
+ switch t {
+ case scanner.Ident:
+ p.peeked.tokTyp = tokenIdent
+ p.peeked.val = text
+ case scanner.Int:
+ p.peeked.tokTyp = tokenInt
+ p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+ case scanner.Float:
+ p.peeked.tokTyp = tokenFloat
+ var err error
+ if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+ return err
+ }
+ case scanner.Char, scanner.String:
+ p.peeked.tokTyp = tokenString
+ var err error
+ if p.peeked.val, err = strconv.Unquote(text); err != nil {
+ return err
+ }
+ case '-': // unary minus, for negative ints and floats
+ ch := p.scanner.Peek()
+ if ch < '0' || ch > '9' {
+ p.peeked.tokTyp = tokenMinus
+ p.peeked.val = '-'
+ } else {
+ t := p.scanner.Scan()
+ if t == scanner.EOF {
+ return io.ErrUnexpectedEOF
+ } else if t == scanner.Float {
+ p.peeked.tokTyp = tokenFloat
+ text += p.scanner.TokenText()
+ p.peeked.txt = text
+ var err error
+ if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+ p.peeked.pos = p.scanner.Position
+ return err
+ }
+ } else if t == scanner.Int {
+ p.peeked.tokTyp = tokenInt
+ text += p.scanner.TokenText()
+ p.peeked.txt = text
+ p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+ } else {
+ p.peeked.pos = p.scanner.Position
+ return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText())
+ }
+ }
+ case ':':
+ p.peeked.tokTyp = tokenColon
+ p.peeked.val = ':'
+ case ',':
+ p.peeked.tokTyp = tokenComma
+ p.peeked.val = ','
+ case ';':
+ p.peeked.tokTyp = tokenSemiColon
+ p.peeked.val = ';'
+ case '{':
+ p.peeked.tokTyp = tokenOpenBrace
+ p.peeked.val = '{'
+ case '}':
+ p.peeked.tokTyp = tokenCloseBrace
+ p.peeked.val = '}'
+ case '<':
+ p.peeked.tokTyp = tokenOpenAngle
+ p.peeked.val = '<'
+ case '>':
+ p.peeked.tokTyp = tokenCloseAngle
+ p.peeked.val = '>'
+ case '[':
+ p.peeked.tokTyp = tokenOpenBracket
+ p.peeked.val = '['
+ case ']':
+ p.peeked.tokTyp = tokenCloseBracket
+ p.peeked.val = ']'
+ case '(':
+ p.peeked.tokTyp = tokenOpenParen
+ p.peeked.val = '('
+ case ')':
+ p.peeked.tokTyp = tokenCloseParen
+ p.peeked.val = ')'
+ case '/':
+ // only allowed to separate URL components in expanded Any format
+ p.peeked.tokTyp = tokenSlash
+ p.peeked.val = '/'
+ default:
+ return fmt.Errorf("invalid character: %c", t)
+ }
+ return nil
+}
+
+func (p *txtReader) next() *token {
+ t := p.peek()
+ if t.tokTyp != tokenEOF && t.tokTyp != tokenError {
+ p.havePeeked = false
+ }
+ return t
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
new file mode 100644
index 0000000..3fca3eb
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -0,0 +1,666 @@
+package grpcreflect
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+ "google.golang.org/grpc/status"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/internal"
+)
+
+// elementNotFoundError is the error returned by reflective operations where the
+// server does not recognize a given file name, symbol name, or extension.
+type elementNotFoundError struct {
+ name string
+ kind elementKind
+ symType symbolType // only used when kind == elementKindSymbol
+ tag int32 // only used when kind == elementKindExtension
+
+ // only errors with a kind of elementKindFile will have a cause, which means
+ // the named file count not be resolved because of a dependency that could
+ // not be found where cause describes the missing dependency
+ cause *elementNotFoundError
+}
+
+type elementKind int
+
+const (
+ elementKindSymbol elementKind = iota
+ elementKindFile
+ elementKindExtension
+)
+
+type symbolType string
+
+const (
+ symbolTypeService = "Service"
+ symbolTypeMessage = "Message"
+ symbolTypeEnum = "Enum"
+ symbolTypeUnknown = "Symbol"
+)
+
+func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+ return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
+}
+
+func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+ return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
+}
+
+func fileNotFound(file string, cause *elementNotFoundError) error {
+ return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
+}
+
+func (e *elementNotFoundError) Error() string {
+ first := true
+ var b bytes.Buffer
+ for ; e != nil; e = e.cause {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(&b, "\ncaused by: ")
+ }
+ switch e.kind {
+ case elementKindSymbol:
+ fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+ case elementKindExtension:
+ fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+ default:
+ fmt.Fprintf(&b, "File not found: %s", e.name)
+ }
+ }
+ return b.String()
+}
+
+// IsElementNotFoundError determines if the given error indicates that a file
+// name, symbol name, or extension field was could not be found by the server.
+func IsElementNotFoundError(err error) bool {
+ _, ok := err.(*elementNotFoundError)
+ return ok
+}
+
+// ProtocolError is an error returned when the server sends a response of the
+// wrong type.
+type ProtocolError struct {
+ missingType reflect.Type
+}
+
+func (p ProtocolError) Error() string {
+ return fmt.Sprintf("Protocol error: response was missing %v", p.missingType)
+}
+
+type extDesc struct {
+ extendedMessageName string
+ extensionNumber int32
+}
+
+// Client is a client connection to a server for performing reflection calls
+// and resolving remote symbols.
+type Client struct {
+ ctx context.Context
+ stub rpb.ServerReflectionClient
+
+ connMu sync.Mutex
+ cancel context.CancelFunc
+ stream rpb.ServerReflection_ServerReflectionInfoClient
+
+ cacheMu sync.RWMutex
+ protosByName map[string]*dpb.FileDescriptorProto
+ filesByName map[string]*desc.FileDescriptor
+ filesBySymbol map[string]*desc.FileDescriptor
+ filesByExtension map[extDesc]*desc.FileDescriptor
+}
+
+// NewClient creates a new Client with the given root context and using the
+// given RPC stub for talking to the server.
+func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+ cr := &Client{
+ ctx: ctx,
+ stub: stub,
+ protosByName: map[string]*dpb.FileDescriptorProto{},
+ filesByName: map[string]*desc.FileDescriptor{},
+ filesBySymbol: map[string]*desc.FileDescriptor{},
+ filesByExtension: map[extDesc]*desc.FileDescriptor{},
+ }
+ // don't leak a grpc stream
+ runtime.SetFinalizer(cr, (*Client).Reset)
+ return cr
+}
+
+// FileByFilename asks the server for a file descriptor for the proto file with
+// the given name.
+func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ if fd, ok := cr.filesByName[filename]; ok {
+ cr.cacheMu.RUnlock()
+ return fd, nil
+ }
+ fdp, ok := cr.protosByName[filename]
+ cr.cacheMu.RUnlock()
+ // not there? see if we've downloaded the proto
+ if ok {
+ return cr.descriptorFromProto(fdp)
+ }
+
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+ FileByFilename: filename,
+ },
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+ if isNotFound(err) {
+ // file not found? see if we can look up via alternate name
+ if alternate, ok := internal.StdFileAliases[filename]; ok {
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+ FileByFilename: alternate,
+ },
+ }
+ fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
+ if isNotFound(err) {
+ err = fileNotFound(filename, nil)
+ }
+ } else {
+ err = fileNotFound(filename, nil)
+ }
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = fileNotFound(filename, e)
+ }
+ return fd, err
+}
+
+// FileContainingSymbol asks the server for a file descriptor for the proto file
+// that declares the given fully-qualified symbol.
+func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ fd, ok := cr.filesBySymbol[symbol]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return fd, nil
+ }
+
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: symbol,
+ },
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+ if isNotFound(err) {
+ err = symbolNotFound(symbol, symbolTypeUnknown, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = symbolNotFound(symbol, symbolTypeUnknown, e)
+ }
+ return fd, err
+}
+
+// FileContainingExtension asks the server for a file descriptor for the proto
+// file that declares an extension with the given number for the given
+// fully-qualified message name.
+func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return fd, nil
+ }
+
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &rpb.ExtensionRequest{
+ ContainingType: extendedMessageName,
+ ExtensionNumber: extensionNumber,
+ },
+ },
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+ if isNotFound(err) {
+ err = extensionNotFound(extendedMessageName, extensionNumber, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = extensionNotFound(extendedMessageName, extensionNumber, e)
+ }
+ return fd, err
+}
+
+func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+ resp, err := cr.send(req)
+ if err != nil {
+ return nil, err
+ }
+
+ fdResp := resp.GetFileDescriptorResponse()
+ if fdResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()}
+ }
+
+ // Response can contain the result file descriptor, but also its transitive
+ // deps. Furthermore, protocol states that subsequent requests do not need
+ // to send transitive deps that have been sent in prior responses. So we
+ // need to cache all file descriptors and then return the first one (which
+ // should be the answer). If we're looking for a file by name, we can be
+ // smarter and make sure to grab one by name instead of just grabbing the
+ // first one.
+ var firstFd *dpb.FileDescriptorProto
+ for _, fdBytes := range fdResp.FileDescriptorProto {
+ fd := &dpb.FileDescriptorProto{}
+ if err = proto.Unmarshal(fdBytes, fd); err != nil {
+ return nil, err
+ }
+
+ if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName {
+ // we found a file was aliased, so we need to update the proto to reflect that
+ fd.Name = proto.String(alias)
+ }
+
+ cr.cacheMu.Lock()
+ // see if this file was created and cached concurrently
+ if firstFd == nil {
+ if d, ok := cr.filesByName[fd.GetName()]; ok {
+ cr.cacheMu.Unlock()
+ return d, nil
+ }
+ }
+ // store in cache of raw descriptor protos, but don't overwrite existing protos
+ if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
+ fd = existingFd
+ } else {
+ cr.protosByName[fd.GetName()] = fd
+ }
+ cr.cacheMu.Unlock()
+ if firstFd == nil {
+ firstFd = fd
+ }
+ }
+ if firstFd == nil {
+ return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+ }
+
+ return cr.descriptorFromProto(firstFd)
+}
+
+func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+ deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+ for i, depName := range fd.GetDependency() {
+ if dep, err := cr.FileByFilename(depName); err != nil {
+ return nil, err
+ } else {
+ deps[i] = dep
+ }
+ }
+ d, err := desc.CreateFileDescriptor(fd, deps...)
+ if err != nil {
+ return nil, err
+ }
+ d = cr.cacheFile(d)
+ return d, nil
+}
+
+func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+ cr.cacheMu.Lock()
+ defer cr.cacheMu.Unlock()
+
+ // cache file descriptor by name, but don't overwrite existing entry
+ // (existing entry could come from concurrent caller)
+ if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
+ return existingFd
+ }
+ cr.filesByName[fd.GetName()] = fd
+
+ // also cache by symbols and extensions
+ for _, m := range fd.GetMessageTypes() {
+ cr.cacheMessageLocked(fd, m)
+ }
+ for _, e := range fd.GetEnumTypes() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ for _, v := range e.GetValues() {
+ cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+ }
+ }
+ for _, e := range fd.GetExtensions() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+ }
+ for _, s := range fd.GetServices() {
+ cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+ for _, m := range s.GetMethods() {
+ cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+ }
+ }
+
+ return fd
+}
+
+func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
+ cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+ for _, f := range md.GetFields() {
+ cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+ }
+ for _, o := range md.GetOneOfs() {
+ cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+ }
+ for _, e := range md.GetNestedEnumTypes() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ for _, v := range e.GetValues() {
+ cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+ }
+ }
+ for _, e := range md.GetNestedExtensions() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+ }
+ for _, m := range md.GetNestedMessageTypes() {
+ cr.cacheMessageLocked(fd, m) // recurse
+ }
+}
+
+// AllExtensionNumbersForType asks the server for all known extension numbers
+// for the given fully-qualified message name.
+func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: extendedMessageName,
+ },
+ }
+ resp, err := cr.send(req)
+ if err != nil {
+ if isNotFound(err) {
+ return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
+ }
+ return nil, err
+ }
+
+ extResp := resp.GetAllExtensionNumbersResponse()
+ if extResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+ }
+ return extResp.ExtensionNumber, nil
+}
+
+// ListServices asks the server for the fully-qualified names of all exposed
+// services.
+func (cr *Client) ListServices() ([]string, error) {
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+ // proto doesn't indicate any purpose for this value and server impl
+ // doesn't actually use it...
+ ListServices: "*",
+ },
+ }
+ resp, err := cr.send(req)
+ if err != nil {
+ return nil, err
+ }
+
+ listResp := resp.GetListServicesResponse()
+ if listResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()}
+ }
+ serviceNames := make([]string, len(listResp.Service))
+ for i, s := range listResp.Service {
+ serviceNames[i] = s.Name
+ }
+ return serviceNames, nil
+}
+
+func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+ // we allow one immediate retry, in case we have a stale stream
+ // (e.g. closed by server)
+ resp, err := cr.doSend(true, req)
+ if err != nil {
+ return nil, err
+ }
+
+ // convert error response messages into errors
+ errResp := resp.GetErrorResponse()
+ if errResp != nil {
+ return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage)
+ }
+
+ return resp, nil
+}
+
+func isNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ s, ok := status.FromError(err)
+ return ok && s.Code() == codes.NotFound
+}
+
+func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+ // TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
+ // (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
+ // delivered in correct oder.
+ cr.connMu.Lock()
+ defer cr.connMu.Unlock()
+ return cr.doSendLocked(retry, req)
+}
+
+func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+ if err := cr.initStreamLocked(); err != nil {
+ return nil, err
+ }
+
+ if err := cr.stream.Send(req); err != nil {
+ if err == io.EOF {
+ // if send returns EOF, must call Recv to get real underlying error
+ _, err = cr.stream.Recv()
+ }
+ cr.resetLocked()
+ if retry {
+ return cr.doSendLocked(false, req)
+ }
+ return nil, err
+ }
+
+ if resp, err := cr.stream.Recv(); err != nil {
+ cr.resetLocked()
+ if retry {
+ return cr.doSendLocked(false, req)
+ }
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
+
+func (cr *Client) initStreamLocked() error {
+ if cr.stream != nil {
+ return nil
+ }
+ var newCtx context.Context
+ newCtx, cr.cancel = context.WithCancel(cr.ctx)
+ var err error
+ cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+ return err
+}
+
+// Reset ensures that any active stream with the server is closed, releasing any
+// resources.
+func (cr *Client) Reset() {
+ cr.connMu.Lock()
+ defer cr.connMu.Unlock()
+ cr.resetLocked()
+}
+
+func (cr *Client) resetLocked() {
+ if cr.stream != nil {
+ cr.stream.CloseSend()
+ for {
+ // drain the stream, this covers io.EOF too
+ if _, err := cr.stream.Recv(); err != nil {
+ break
+ }
+ }
+ cr.stream = nil
+ }
+ if cr.cancel != nil {
+ cr.cancel()
+ cr.cancel = nil
+ }
+}
+
+// ResolveService asks the server to resolve the given fully-qualified service
+// name into a service descriptor.
+func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) {
+ file, err := cr.FileContainingSymbol(serviceName)
+ if err != nil {
+ return nil, setSymbolType(err, serviceName, symbolTypeService)
+ }
+ d := file.FindSymbol(serviceName)
+ if d == nil {
+ return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+ }
+ if s, ok := d.(*desc.ServiceDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+ }
+}
+
+// ResolveMessage asks the server to resolve the given fully-qualified message
+// name into a message descriptor.
+func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) {
+ file, err := cr.FileContainingSymbol(messageName)
+ if err != nil {
+ return nil, setSymbolType(err, messageName, symbolTypeMessage)
+ }
+ d := file.FindSymbol(messageName)
+ if d == nil {
+ return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+ }
+ if s, ok := d.(*desc.MessageDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+ }
+}
+
+// ResolveEnum asks the server to resolve the given fully-qualified enum name
+// into an enum descriptor.
+func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) {
+ file, err := cr.FileContainingSymbol(enumName)
+ if err != nil {
+ return nil, setSymbolType(err, enumName, symbolTypeEnum)
+ }
+ d := file.FindSymbol(enumName)
+ if d == nil {
+ return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+ }
+ if s, ok := d.(*desc.EnumDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+ }
+}
+
+func setSymbolType(err error, name string, symType symbolType) error {
+ if e, ok := err.(*elementNotFoundError); ok {
+ if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown {
+ e.symType = symType
+ }
+ }
+ return err
+}
+
+// ResolveEnumValues asks the server to resolve the given fully-qualified enum
+// name into a map of names to numbers that represents the enum's values.
+func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) {
+ enumDesc, err := cr.ResolveEnum(enumName)
+ if err != nil {
+ return nil, err
+ }
+ vals := map[string]int32{}
+ for _, valDesc := range enumDesc.GetValues() {
+ vals[valDesc.GetName()] = valDesc.GetNumber()
+ }
+ return vals, nil
+}
+
+// ResolveExtension asks the server to resolve the given extension number and
+// fully-qualified message name into a field descriptor.
+func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) {
+ file, err := cr.FileContainingExtension(extendedType, extensionNumber)
+ if err != nil {
+ return nil, err
+ }
+ d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file})
+ if d == nil {
+ return nil, extensionNotFound(extendedType, extensionNumber, nil)
+ } else {
+ return d, nil
+ }
+}
+
+func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
+ // search extensions in this scope
+ for _, ext := range scope.extensions() {
+ if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType {
+ return ext
+ }
+ }
+
+ // if not found, search nested scopes
+ for _, nested := range scope.nestedScopes() {
+ ext := findExtension(extendedType, extensionNumber, nested)
+ if ext != nil {
+ return ext
+ }
+ }
+
+ return nil
+}
+
+type extensionScope interface {
+ extensions() []*desc.FieldDescriptor
+ nestedScopes() []extensionScope
+}
+
+// fileDescriptorExtensions implements extensionHolder interface on top of
+// FileDescriptorProto
+type fileDescriptorExtensions struct {
+ proto *desc.FileDescriptor
+}
+
+func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+ return fde.proto.GetExtensions()
+}
+
+func (fde fileDescriptorExtensions) nestedScopes() []extensionScope {
+ scopes := make([]extensionScope, len(fde.proto.GetMessageTypes()))
+ for i, m := range fde.proto.GetMessageTypes() {
+ scopes[i] = msgDescriptorExtensions{m}
+ }
+ return scopes
+}
+
+// msgDescriptorExtensions implements extensionHolder interface on top of
+// DescriptorProto
+type msgDescriptorExtensions struct {
+ proto *desc.MessageDescriptor
+}
+
+func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+ return mde.proto.GetNestedExtensions()
+}
+
+func (mde msgDescriptorExtensions) nestedScopes() []extensionScope {
+ scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes()))
+ for i, m := range mde.proto.GetNestedMessageTypes() {
+ scopes[i] = msgDescriptorExtensions{m}
+ }
+ return scopes
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
new file mode 100644
index 0000000..ec7bd02
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
@@ -0,0 +1,10 @@
+// Package grpcreflect provides GRPC-specific extensions to protobuf reflection.
+// This includes a way to access rich service descriptors for all services that
+// a GRPC server exports.
+//
+// Also included is an easy-to-use client for the GRPC reflection service
+// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that
+// supports the reflection service) for metadata on its exported services, which
+// could be used to construct a dynamic client. (See the grpcdynamic package in
+// this same repo for more on that.)
+package grpcreflect
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
new file mode 100644
index 0000000..c9ef619
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -0,0 +1,61 @@
+package grpcreflect
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// LoadServiceDescriptors loads the service descriptors for all services exposed by the
+// given GRPC server.
+func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+ descs := map[string]*desc.ServiceDescriptor{}
+ for name, info := range s.GetServiceInfo() {
+ file, ok := info.Metadata.(string)
+ if !ok {
+ return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata)
+ }
+ fd, err := desc.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+ d := fd.FindSymbol(name)
+ if d == nil {
+ return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name)
+ }
+ sd, ok := d.(*desc.ServiceDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d)
+ }
+ descs[name] = sd
+ }
+ return descs, nil
+}
+
+// LoadServiceDescriptor loads a rich descriptor for a given service description
+// generated by protoc-gen-go. Generated code contains an unexported symbol with
+// a name like "_<Service>_serviceDesc" which is the service's description. It
+// is used internally to register a service implementation with a GRPC server.
+// But it can also be used by this package to retrieve the rich descriptor for
+// the service.
+func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) {
+ file, ok := svc.Metadata.(string)
+ if !ok {
+ return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata)
+ }
+ fd, err := desc.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+ d := fd.FindSymbol(svc.ServiceName)
+ if d == nil {
+ return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName)
+ }
+ sd, ok := d.(*desc.ServiceDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d)
+ }
+ return sd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
new file mode 100644
index 0000000..4a8b47a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -0,0 +1,127 @@
+// Package internal contains some code that should not be exported but needs to
+// be shared across more than one of the protoreflect sub-packages.
+package internal
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// TODO: replace this alias configuration with desc.RegisterImportPath?
+
+// StdFileAliases are the standard protos included with protoc, but older versions of
+// their respective packages registered them using incorrect paths.
+var StdFileAliases = map[string]string{
+ // Files for the github.com/golang/protobuf/ptypes package at one point were
+ // registered using the path where the proto files are mirrored in GOPATH,
+ // inside the golang/protobuf repo.
+ // (Fixed as of https://github.com/golang/protobuf/pull/412)
+ "google/protobuf/any.proto": "github.com/golang/protobuf/ptypes/any/any.proto",
+ "google/protobuf/duration.proto": "github.com/golang/protobuf/ptypes/duration/duration.proto",
+ "google/protobuf/empty.proto": "github.com/golang/protobuf/ptypes/empty/empty.proto",
+ "google/protobuf/struct.proto": "github.com/golang/protobuf/ptypes/struct/struct.proto",
+ "google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto",
+ "google/protobuf/wrappers.proto": "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto",
+ // Files for the google.golang.org/genproto/protobuf package at one point
+ // were registered with an anomalous "src/" prefix.
+ // (Fixed as of https://github.com/google/go-genproto/pull/31)
+ "google/protobuf/api.proto": "src/google/protobuf/api.proto",
+ "google/protobuf/field_mask.proto": "src/google/protobuf/field_mask.proto",
+ "google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto",
+ "google/protobuf/type.proto": "src/google/protobuf/type.proto",
+
+ // Other standard files (descriptor.proto and compiler/plugin.proto) are
+ // registered correctly, so we don't need rules for them here.
+}
+
+func init() {
+ // We provide aliasing in both directions, to support files with the
+ // proper import path linked against older versions of the generated
+ // files AND files that used the aliased import path but linked against
+ // newer versions of the generated files (which register with the
+ // correct path).
+
+ // Get all files defined above
+ keys := make([]string, 0, len(StdFileAliases))
+ for k := range StdFileAliases {
+ keys = append(keys, k)
+ }
+ // And add inverse mappings
+ for _, k := range keys {
+ alias := StdFileAliases[k]
+ StdFileAliases[alias] = k
+ }
+}
+
+type ErrNoSuchFile string
+
+func (e ErrNoSuchFile) Error() string {
+ return fmt.Sprintf("no such file: %q", string(e))
+}
+
+// LoadFileDescriptor loads a registered descriptor and decodes it. If the given
+// name cannot be loaded but is a known standard name, an alias will be tried,
+// so the standard files can be loaded even if linked against older "known bad"
+// versions of packages.
+func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+ fdb := proto.FileDescriptor(file)
+ aliased := false
+ if fdb == nil {
+ var ok bool
+ alias, ok := StdFileAliases[file]
+ if ok {
+ aliased = true
+ if fdb = proto.FileDescriptor(alias); fdb == nil {
+ return nil, ErrNoSuchFile(file)
+ }
+ } else {
+ return nil, ErrNoSuchFile(file)
+ }
+ }
+
+ fd, err := DecodeFileDescriptor(file, fdb)
+ if err != nil {
+ return nil, err
+ }
+
+ if aliased {
+ // the file descriptor will have the alias used to load it, but
+ // we need it to have the specified name in order to link it
+ fd.Name = proto.String(file)
+ }
+
+ return fd, nil
+}
+
+// DecodeFileDescriptor decodes the bytes of a registered file descriptor.
+// Registered file descriptors are first "proto encoded" (e.g. binary format
+// for the descriptor protos) and then gzipped. So this function gunzips and
+// then unmarshals into a descriptor proto.
+func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+ raw, err := decompress(fdb)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
+ }
+ fd := dpb.FileDescriptorProto{}
+ if err := proto.Unmarshal(raw, &fd); err != nil {
+ return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
+ }
+ return &fd, nil
+}
+
+func decompress(b []byte) ([]byte, error) {
+ r, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ out, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ return out, nil
+}