SEBA-829 update cordctl to use go mod;
implement run, install, help
Change-Id: I72716d2e245d5ef0dc0603aad149843723ddff9e
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/buffer.go b/vendor/github.com/jhump/protoreflect/codec/buffer.go
new file mode 100644
index 0000000..b9de99c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/buffer.go
@@ -0,0 +1,112 @@
+package codec
+
+import (
+ "fmt"
+ "io"
+)
+
+// Buffer is a reader and a writer that wraps a slice of bytes and also
+// provides API for decoding and encoding the protobuf binary format.
+//
+// Its operation is similar to that of a bytes.Buffer: writing pushes
+// data to the end of the buffer while reading pops data from the head
+// of the buffer. So the same buffer can be used to both read and write.
+type Buffer struct {
+ buf []byte
+ index int
+
+ // tmp is used when another byte slice is needed, such as when
+ // serializing messages, since we need to know the length before
+ // we can write the length prefix; by caching this, including
+ // after it is grown by serialization operations, we reduce the
+ // number of allocations needed
+ tmp []byte
+
+ deterministic bool
+}
+
+// NewBuffer creates a new buffer with the given slice of bytes as the
+// buffer's initial contents.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic sets this buffer to encode messages deterministically. This
+// is useful for tests. But the overhead is non-zero, so it should not likely be
+// used outside of tests. When true, map fields in a message must have their
+// keys sorted before serialization to ensure deterministic output. Otherwise,
+// values in a map field will be serialized in map iteration order.
+func (cb *Buffer) SetDeterministic(deterministic bool) {
+ cb.deterministic = deterministic
+}
+
+// Reset resets this buffer back to empty. Any subsequent writes/encodes
+// to the buffer will allocate a new backing slice of bytes.
+func (cb *Buffer) Reset() {
+ cb.buf = []byte(nil)
+ cb.index = 0
+}
+
+// Bytes returns the slice of bytes remaining in the buffer. Note that
+// this does not perform a copy: if the contents of the returned slice
+// are modified, the modifications will be visible to subsequent reads
+// via the buffer.
+func (cb *Buffer) Bytes() []byte {
+ return cb.buf[cb.index:]
+}
+
+// String returns the remaining bytes in the buffer as a string.
+func (cb *Buffer) String() string {
+ return string(cb.Bytes())
+}
+
+// EOF returns true if there are no more bytes remaining to read.
+func (cb *Buffer) EOF() bool {
+ return cb.index >= len(cb.buf)
+}
+
+// Skip attempts to skip the given number of bytes in the input. If
+// the input has fewer bytes than the given count, false is returned
+// and the buffer is unchanged. Otherwise, the given number of bytes
+// are skipped and true is returned.
+func (cb *Buffer) Skip(count int) error {
+ if count < 0 {
+ return fmt.Errorf("proto: bad byte length %d", count)
+ }
+ newIndex := cb.index + count
+ if newIndex < cb.index || newIndex > len(cb.buf) {
+ return io.ErrUnexpectedEOF
+ }
+ cb.index = newIndex
+ return nil
+}
+
+// Len returns the remaining number of bytes in the buffer.
+func (cb *Buffer) Len() int {
+ return len(cb.buf) - cb.index
+}
+
+// Read implements the io.Reader interface. If there are no bytes
+// remaining in the buffer, it will return 0, io.EOF. Otherwise,
+// it reads max(len(dest), cb.Len()) bytes from input and copies
+// them into dest. It returns the number of bytes copied and a nil
+// error in this case.
+func (cb *Buffer) Read(dest []byte) (int, error) {
+ if cb.index == len(cb.buf) {
+ return 0, io.EOF
+ }
+ copied := copy(dest, cb.buf[cb.index:])
+ cb.index += copied
+ return copied, nil
+}
+
+var _ io.Reader = (*Buffer)(nil)
+
+// Write implements the io.Writer interface. It always returns
+// len(data), nil.
+func (cb *Buffer) Write(data []byte) (int, error) {
+ cb.buf = append(cb.buf, data...)
+ return len(data), nil
+}
+
+var _ io.Writer = (*Buffer)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode.go b/vendor/github.com/jhump/protoreflect/codec/decode.go
new file mode 100644
index 0000000..2a7e59f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/decode.go
@@ -0,0 +1,372 @@
+package codec
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = errors.New("proto: integer overflow")
+
+// ErrBadWireType is returned when decoding a wire-type from a buffer that
+// is not valid.
+var ErrBadWireType = errors.New("proto: bad wiretype")
+
+var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+
+func init() {
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
+ varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+
+ fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
+ fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
+ fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+
+ fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
+ fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
+ fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+}
+
+func (cb *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := cb.index
+ l := len(cb.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := cb.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ cb.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = ErrOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) DecodeVarint() (uint64, error) {
+ i := cb.index
+ buf := cb.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ cb.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return cb.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x := uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, ErrOverflow
+
+done:
+ cb.index = i
+ return x, nil
+}
+
+// DecodeTagAndWireType decodes a field tag and wire type from input.
+// This reads a varint and then extracts the two fields from the varint
+// value read.
+func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) {
+ var v uint64
+ v, err = cb.DecodeVarint()
+ if err != nil {
+ return
+ }
+ // low 7 bits is wire type
+ wireType = int8(v & 7)
+ // rest is int32 tag number
+ v = v >> 3
+ if v > math.MaxInt32 {
+ err = fmt.Errorf("tag number out of range: %d", v)
+ return
+ }
+ tag = int32(v)
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := cb.index + 8
+ if i < 0 || i > len(cb.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ cb.index = i
+
+ x = uint64(cb.buf[i-8])
+ x |= uint64(cb.buf[i-7]) << 8
+ x |= uint64(cb.buf[i-6]) << 16
+ x |= uint64(cb.buf[i-5]) << 24
+ x |= uint64(cb.buf[i-4]) << 32
+ x |= uint64(cb.buf[i-3]) << 40
+ x |= uint64(cb.buf[i-2]) << 48
+ x |= uint64(cb.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := cb.index + 4
+ if i < 0 || i > len(cb.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ cb.index = i
+
+ x = uint64(cb.buf[i-4])
+ x |= uint64(cb.buf[i-3]) << 8
+ x |= uint64(cb.buf[i-2]) << 16
+ x |= uint64(cb.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigZag32 decodes a signed 32-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag32(v uint64) int32 {
+ return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31))
+}
+
+// DecodeZigZag64 decodes a signed 64-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag64(v uint64) int64 {
+ return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := cb.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := cb.index + nb
+ if end < cb.index || end > len(cb.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ buf = cb.buf[cb.index:end]
+ cb.index = end
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, cb.buf[cb.index:])
+ cb.index = end
+ return
+}
+
+// ReadGroup reads the input until a "group end" tag is found
+// and returns the data up to that point. Subsequent reads from
+// the buffer will read data after the group end tag. If alloc
+// is true, the data is copied to a new slice before being returned.
+// Otherwise, the returned slice is a view into the buffer's
+// underlying byte slice.
+//
+// This function correctly handles nested groups: if a "group start"
+// tag is found, then that group's end tag will be included in the
+// returned data.
+func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) {
+ var groupEnd, dataEnd int
+ groupEnd, dataEnd, err := cb.findGroupEnd()
+ if err != nil {
+ return nil, err
+ }
+ var results []byte
+ if !alloc {
+ results = cb.buf[cb.index:dataEnd]
+ } else {
+ results = make([]byte, dataEnd-cb.index)
+ copy(results, cb.buf[cb.index:])
+ }
+ cb.index = groupEnd
+ return results, nil
+}
+
+// SkipGroup is like ReadGroup, except that it discards the
+// data and just advances the buffer to point to the input
+// right *after* the "group end" tag.
+func (cb *Buffer) SkipGroup() error {
+ groupEnd, _, err := cb.findGroupEnd()
+ if err != nil {
+ return err
+ }
+ cb.index = groupEnd
+ return nil
+}
+
+func (cb *Buffer) findGroupEnd() (groupEnd int, dataEnd int, err error) {
+ bs := cb.buf
+ start := cb.index
+ defer func() {
+ cb.index = start
+ }()
+ for {
+ fieldStart := cb.index
+ // read a field tag
+ _, wireType, err := cb.DecodeTagAndWireType()
+ if err != nil {
+ return 0, 0, err
+ }
+ // skip past the field's data
+ switch wireType {
+ case proto.WireFixed32:
+ if err := cb.Skip(4); err != nil {
+ return 0, 0, err
+ }
+ case proto.WireFixed64:
+ if err := cb.Skip(8); err != nil {
+ return 0, 0, err
+ }
+ case proto.WireVarint:
+ // skip varint by finding last byte (has high bit unset)
+ i := cb.index
+ limit := i + 10 // varint cannot be >10 bytes
+ for {
+ if i >= limit {
+ return 0, 0, ErrOverflow
+ }
+ if i >= len(bs) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ if bs[i]&0x80 == 0 {
+ break
+ }
+ i++
+ }
+ // TODO: This would only overflow if buffer length was MaxInt and we
+ // read the last byte. This is not a real/feasible concern on 64-bit
+ // systems. Something to worry about for 32-bit systems? Do we care?
+ cb.index = i + 1
+ case proto.WireBytes:
+ l, err := cb.DecodeVarint()
+ if err != nil {
+ return 0, 0, err
+ }
+ if err := cb.Skip(int(l)); err != nil {
+ return 0, 0, err
+ }
+ case proto.WireStartGroup:
+ if err := cb.SkipGroup(); err != nil {
+ return 0, 0, err
+ }
+ case proto.WireEndGroup:
+ return cb.index, fieldStart, nil
+ default:
+ return 0, 0, ErrBadWireType
+ }
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
new file mode 100644
index 0000000..938b4d9
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -0,0 +1,283 @@
+package codec
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
+// it reads indicates an end-group marker.
+var ErrWireTypeEndGroup = errors.New("unexpected wire type: end group")
+
+// MessageFactory is used to instantiate messages when DecodeFieldValue needs to
+// decode a message value.
+//
+// Also see MessageFactory in "github.com/jhump/protoreflect/dynamic", which
+// implements this interface.
+type MessageFactory interface {
+ NewMessage(md *desc.MessageDescriptor) proto.Message
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+ // The tag number for the unrecognized field.
+ Tag int32
+
+ // Encoding indicates how the unknown field was encoded on the wire. If it
+ // is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+ // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+ // significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+ // Value.
+ Encoding int8
+ Contents []byte
+ Value uint64
+}
+
+// DecodeFieldValue will read a field value from the buffer and return its
+// value and the corresponding field descriptor. The given function is used
+// to lookup a field descriptor by tag number. The given factory is used to
+// instantiate a message if the field value is (or contains) a message value.
+//
+// On error, the field descriptor and value are typically nil. However, if the
+// error returned is ErrWireTypeEndGroup, the returned value will indicate any
+// tag number encoded in the end-group marker.
+//
+// If the field descriptor returned is nil, that means that the given function
+// returned nil. This is expected to happen for unrecognized tag numbers. In
+// that case, no error is returned, and the value will be an UnknownField.
+func (cb *Buffer) DecodeFieldValue(fieldFinder func(int32) *desc.FieldDescriptor, fact MessageFactory) (*desc.FieldDescriptor, interface{}, error) {
+ if cb.EOF() {
+ return nil, nil, io.EOF
+ }
+ tagNumber, wireType, err := cb.DecodeTagAndWireType()
+ if err != nil {
+ return nil, nil, err
+ }
+ if wireType == proto.WireEndGroup {
+ return nil, tagNumber, ErrWireTypeEndGroup
+ }
+ fd := fieldFinder(tagNumber)
+ if fd == nil {
+ val, err := cb.decodeUnknownField(tagNumber, wireType)
+ return nil, val, err
+ }
+ val, err := cb.decodeKnownField(fd, wireType, fact)
+ return fd, val, err
+}
+
+// DecodeScalarField extracts a properly-typed value from v. The returned value's
+// type depends on the given field descriptor type. It will be the same type as
+// generated structs use for the field descriptor's type. Enum types will return
+// an int32. If the given field type uses length-delimited encoding (nested
+// messages, bytes, and strings), an error is returned.
+func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return v != 0, nil
+ case descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return uint32(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM:
+ s := int64(v)
+ if s > math.MaxInt32 || s < math.MinInt32 {
+ return nil, ErrOverflow
+ }
+ return int32(s), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return int32(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return DecodeZigZag32(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ return v, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ return int64(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return DecodeZigZag64(v), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return math.Float32frombits(uint32(v)), nil
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return math.Float64frombits(v), nil
+
+ default:
+ // bytes, string, message, and group cannot be represented as a simple numeric value
+ return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName())
+ }
+}
+
+// DecodeLengthDelimitedField extracts a properly-typed value from bytes. The
+// returned value's type will usually be []byte, string, or, for nested messages,
+// the type returned from the given message factory. However, since repeated
+// scalar fields can be length-delimited, when they used packed encoding, it can
+// also return an []interface{}, where each element is a scalar value. Furthermore,
+// it could return a scalar type, not in a slice, if the given field descriptor is
+// not repeated. This is to support cases where a field is changed from optional
+// to repeated. New code may emit a packed repeated representation, but old code
+// still expects a single scalar value. In this case, if the actual data in bytes
+// contains multiple values, only the last value is returned.
+func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
+ switch {
+ case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return bytes, nil
+
+ case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+ return string(bytes), nil
+
+ case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
+ fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+ msg := mf.NewMessage(fd.GetMessageType())
+ err := proto.Unmarshal(bytes, msg)
+ if err != nil {
+ return nil, err
+ } else {
+ return msg, nil
+ }
+
+ default:
+ // even if the field is not repeated or not packed, we still parse it as such for
+ // backwards compatibility (e.g. message we are de-serializing could have been both
+ // repeated and packed at the time of serialization)
+ packedBuf := NewBuffer(bytes)
+ var slice []interface{}
+ var val interface{}
+ for !packedBuf.EOF() {
+ var v uint64
+ var err error
+ if varintTypes[fd.GetType()] {
+ v, err = packedBuf.DecodeVarint()
+ } else if fixed32Types[fd.GetType()] {
+ v, err = packedBuf.DecodeFixed32()
+ } else if fixed64Types[fd.GetType()] {
+ v, err = packedBuf.DecodeFixed64()
+ } else {
+ return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName())
+ }
+ if err != nil {
+ return nil, err
+ }
+ val, err = DecodeScalarField(fd, v)
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsRepeated() {
+ slice = append(slice, val)
+ }
+ }
+ if fd.IsRepeated() {
+ return slice, nil
+ } else {
+ // if not a repeated field, last value wins
+ return val, nil
+ }
+ }
+}
+
+func (b *Buffer) decodeKnownField(fd *desc.FieldDescriptor, encoding int8, fact MessageFactory) (interface{}, error) {
+ var val interface{}
+ var err error
+ switch encoding {
+ case proto.WireFixed32:
+ var num uint64
+ num, err = b.DecodeFixed32()
+ if err == nil {
+ val, err = DecodeScalarField(fd, num)
+ }
+ case proto.WireFixed64:
+ var num uint64
+ num, err = b.DecodeFixed64()
+ if err == nil {
+ val, err = DecodeScalarField(fd, num)
+ }
+ case proto.WireVarint:
+ var num uint64
+ num, err = b.DecodeVarint()
+ if err == nil {
+ val, err = DecodeScalarField(fd, num)
+ }
+
+ case proto.WireBytes:
+ alloc := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES
+ var raw []byte
+ raw, err = b.DecodeRawBytes(alloc)
+ if err == nil {
+ val, err = DecodeLengthDelimitedField(fd, raw, fact)
+ }
+
+ case proto.WireStartGroup:
+ if fd.GetMessageType() == nil {
+ return nil, fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName())
+ }
+ msg := fact.NewMessage(fd.GetMessageType())
+ var data []byte
+ data, err = b.ReadGroup(false)
+ if err == nil {
+ err = proto.Unmarshal(data, msg)
+ if err == nil {
+ val = msg
+ }
+ }
+
+ default:
+ return nil, ErrBadWireType
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return val, nil
+}
+
+func (b *Buffer) decodeUnknownField(tagNumber int32, encoding int8) (interface{}, error) {
+ u := UnknownField{Tag: tagNumber, Encoding: encoding}
+ var err error
+ switch encoding {
+ case proto.WireFixed32:
+ u.Value, err = b.DecodeFixed32()
+ case proto.WireFixed64:
+ u.Value, err = b.DecodeFixed64()
+ case proto.WireVarint:
+ u.Value, err = b.DecodeVarint()
+ case proto.WireBytes:
+ u.Contents, err = b.DecodeRawBytes(true)
+ case proto.WireStartGroup:
+ u.Contents, err = b.ReadGroup(true)
+ default:
+ err = ErrBadWireType
+ }
+ if err != nil {
+ return nil, err
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/doc.go b/vendor/github.com/jhump/protoreflect/codec/doc.go
new file mode 100644
index 0000000..f76499f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/doc.go
@@ -0,0 +1,7 @@
+// Package codec contains a reader/write type that assists with encoding
+// and decoding protobuf's binary representation.
+//
+// The code in this package began as a fork of proto.Buffer but provides
+// additional API to make it more useful to code that needs to dynamically
+// process or produce the protobuf binary format.
+package codec
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode.go b/vendor/github.com/jhump/protoreflect/codec/encode.go
new file mode 100644
index 0000000..c84523f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/encode.go
@@ -0,0 +1,163 @@
+package codec
+
+import "github.com/golang/protobuf/proto"
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ cb.buf = append(cb.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ cb.buf = append(cb.buf, uint8(x))
+ return nil
+}
+
+// EncodeTagAndWireType encodes the given field tag and wire type to the
+// buffer. This combines the two values and then writes them as a varint.
+func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error {
+ v := uint64((int64(tag) << 3) | int64(wireType))
+ return cb.EncodeVarint(v)
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) EncodeFixed64(x uint64) error {
+ cb.buf = append(cb.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) EncodeFixed32(x uint64) error {
+ cb.buf = append(cb.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigZag64 does zig-zag encoding to convert the given
+// signed 64-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag64(v int64) uint64 {
+ return (uint64(v) << 1) ^ uint64(v>>63)
+}
+
+// EncodeZigZag32 does zig-zag encoding to convert the given
+// signed 32-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag32(v int32) uint64 {
+ return uint64((uint32(v) << 1) ^ uint32((v >> 31)))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) EncodeRawBytes(b []byte) error {
+ if err := cb.EncodeVarint(uint64(len(b))); err != nil {
+ return err
+ }
+ cb.buf = append(cb.buf, b...)
+ return nil
+}
+
+// EncodeMessage writes the given message to the buffer.
+func (cb *Buffer) EncodeMessage(pm proto.Message) error {
+ bytes, err := marshalMessage(cb.buf, pm, cb.deterministic)
+ if err != nil {
+ return err
+ }
+ cb.buf = bytes
+ return nil
+}
+
+// EncodeDelimitedMessage writes the given message to the buffer with a
+// varint-encoded length prefix (the delimiter).
+func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error {
+ bytes, err := marshalMessage(cb.tmp, pm, cb.deterministic)
+ if err != nil {
+ return err
+ }
+ // save truncated buffer if it was grown (so we can re-use it and
+ // curtail future allocations)
+ if cap(bytes) > cap(cb.tmp) {
+ cb.tmp = bytes[:0]
+ }
+ return cb.EncodeRawBytes(bytes)
+}
+
+func marshalMessage(b []byte, pm proto.Message, deterministic bool) ([]byte, error) {
+ // we try to use the most efficient way to marshal to existing slice
+ nm, ok := pm.(interface {
+ // this interface is implemented by generated messages
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+ })
+ if ok {
+ sz := nm.XXX_Size()
+ if cap(b) < len(b)+sz {
+ // re-allocate to fit
+ bytes := make([]byte, len(b), len(b)+sz)
+ copy(bytes, b)
+ b = bytes
+ }
+ return nm.XXX_Marshal(b, deterministic)
+ }
+
+ if deterministic {
+ // see if the message has custom deterministic methods, preferring an
+ // "append" method over one that must always re-allocate
+ madm, ok := pm.(interface {
+ MarshalAppendDeterministic(b []byte) ([]byte, error)
+ })
+ if ok {
+ return madm.MarshalAppendDeterministic(b)
+ }
+
+ mdm, ok := pm.(interface {
+ MarshalDeterministic() ([]byte, error)
+ })
+ if ok {
+ bytes, err := mdm.MarshalDeterministic()
+ if err != nil {
+ return nil, err
+ }
+ if len(b) == 0 {
+ return bytes, nil
+ }
+ return append(b, bytes...), nil
+ }
+ }
+
+ mam, ok := pm.(interface {
+ // see if we can append the message, vs. having to re-allocate
+ MarshalAppend(b []byte) ([]byte, error)
+ })
+ if ok {
+ return mam.MarshalAppend(b)
+ }
+
+ // lowest common denominator
+ bytes, err := proto.Marshal(pm)
+ if err != nil {
+ return nil, err
+ }
+ if len(b) == 0 {
+ return bytes, nil
+ }
+ return append(b, bytes...), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
new file mode 100644
index 0000000..cda7299
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -0,0 +1,267 @@
+package codec
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+func (cb *Buffer) EncodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+ if fd.IsMap() {
+ mp := val.(map[interface{}]interface{})
+ entryType := fd.GetMessageType()
+ keyType := entryType.FindFieldByNumber(1)
+ valType := entryType.FindFieldByNumber(2)
+ var entryBuffer Buffer
+ if cb.deterministic {
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ for _, k := range keys {
+ v := mp[k]
+ entryBuffer.Reset()
+ if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+ return err
+ }
+ if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+ return err
+ }
+ if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+ return err
+ }
+ if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+ return err
+ }
+ }
+ } else {
+ for k, v := range mp {
+ entryBuffer.Reset()
+ if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+ return err
+ }
+ if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+ return err
+ }
+ if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+ return err
+ }
+ if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ } else if fd.IsRepeated() {
+ sl := val.([]interface{})
+ wt, err := getWireType(fd.GetType())
+ if err != nil {
+ return err
+ }
+ if isPacked(fd) && len(sl) > 1 &&
+ (wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) {
+ // packed repeated field
+ var packedBuffer Buffer
+ for _, v := range sl {
+ if err := packedBuffer.encodeFieldValue(fd, v); err != nil {
+ return err
+ }
+ }
+ if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+ return err
+ }
+ return cb.EncodeRawBytes(packedBuffer.Bytes())
+ } else {
+ // non-packed repeated field
+ for _, v := range sl {
+ if err := cb.encodeFieldElement(fd, v); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ } else {
+ return cb.encodeFieldElement(fd, val)
+ }
+}
+
+func isPacked(fd *desc.FieldDescriptor) bool {
+ opts := fd.AsFieldDescriptorProto().GetOptions()
+ // if set, use that value
+ if opts != nil && opts.Packed != nil {
+ return opts.GetPacked()
+ }
+ // if unset: proto2 defaults to false, proto3 to true
+ return fd.GetFile().IsProto3()
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+ return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+ vi := s[i]
+ vj := s[j]
+ switch reflect.TypeOf(vi).Kind() {
+ case reflect.Int32:
+ return vi.(int32) < vj.(int32)
+ case reflect.Int64:
+ return vi.(int64) < vj.(int64)
+ case reflect.Uint32:
+ return vi.(uint32) < vj.(uint32)
+ case reflect.Uint64:
+ return vi.(uint64) < vj.(uint64)
+ case reflect.String:
+ return vi.(string) < vj.(string)
+ case reflect.Bool:
+ return !vi.(bool) && vj.(bool)
+ default:
+ panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+ }
+}
+
+func (s sortable) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (b *Buffer) encodeFieldElement(fd *desc.FieldDescriptor, val interface{}) error {
+ wt, err := getWireType(fd.GetType())
+ if err != nil {
+ return err
+ }
+ if err := b.EncodeTagAndWireType(fd.GetNumber(), wt); err != nil {
+ return err
+ }
+ if err := b.encodeFieldValue(fd, val); err != nil {
+ return err
+ }
+ if wt == proto.WireStartGroup {
+ return b.EncodeTagAndWireType(fd.GetNumber(), proto.WireEndGroup)
+ }
+ return nil
+}
+
+func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ v := val.(bool)
+ if v {
+ return b.EncodeVarint(1)
+ }
+ return b.EncodeVarint(0)
+
+ case descriptor.FieldDescriptorProto_TYPE_ENUM,
+ descriptor.FieldDescriptorProto_TYPE_INT32:
+ v := val.(int32)
+ return b.EncodeVarint(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ v := val.(int32)
+ return b.EncodeFixed32(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ v := val.(int32)
+ return b.EncodeVarint(EncodeZigZag32(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ v := val.(uint32)
+ return b.EncodeVarint(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ v := val.(uint32)
+ return b.EncodeFixed32(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ v := val.(int64)
+ return b.EncodeVarint(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ v := val.(int64)
+ return b.EncodeFixed64(uint64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ v := val.(int64)
+ return b.EncodeVarint(EncodeZigZag64(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ v := val.(uint64)
+ return b.EncodeVarint(v)
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ v := val.(uint64)
+ return b.EncodeFixed64(v)
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ v := val.(float64)
+ return b.EncodeFixed64(math.Float64bits(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ v := val.(float32)
+ return b.EncodeFixed32(uint64(math.Float32bits(v)))
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ v := val.([]byte)
+ return b.EncodeRawBytes(v)
+
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ v := val.(string)
+ return b.EncodeRawBytes(([]byte)(v))
+
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return b.EncodeDelimitedMessage(val.(proto.Message))
+
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ // just append the nested message to this buffer
+ return b.EncodeMessage(val.(proto.Message))
+ // whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
+
+ default:
+ return fmt.Errorf("unrecognized field type: %v", fd.GetType())
+ }
+}
+
+func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+ switch t {
+ case descriptor.FieldDescriptorProto_TYPE_ENUM,
+ descriptor.FieldDescriptorProto_TYPE_BOOL,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64:
+ return proto.WireVarint, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ return proto.WireFixed32, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return proto.WireFixed64, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES,
+ descriptor.FieldDescriptorProto_TYPE_STRING,
+ descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return proto.WireBytes, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ return proto.WireStartGroup, nil
+
+ default:
+ return 0, ErrBadWireType
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
new file mode 100644
index 0000000..538820c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -0,0 +1,231 @@
+package desc
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc/internal"
+ intn "github.com/jhump/protoreflect/internal"
+)
+
+// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto.
+// The file's direct dependencies must be provided. If the given dependencies do not include
+// all of the file's dependencies or if the contents of the descriptors are internally
+// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
+func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+ return createFileDescriptor(fd, deps, nil)
+}
+
+func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+ ret := &FileDescriptor{
+ proto: fd,
+ symbols: map[string]Descriptor{},
+ fieldIndex: map[string]map[int32]*FieldDescriptor{},
+ }
+ pkg := fd.GetPackage()
+
+ // populate references to file descriptor dependencies
+ files := map[string]*FileDescriptor{}
+ for _, f := range deps {
+ files[f.proto.GetName()] = f
+ }
+ ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
+ for i, d := range fd.GetDependency() {
+ resolved := r.ResolveImport(fd.GetName(), d)
+ ret.deps[i] = files[resolved]
+ if ret.deps[i] == nil {
+ if resolved != d {
+ ret.deps[i] = files[d]
+ }
+ if ret.deps[i] == nil {
+ return nil, intn.ErrNoSuchFile(d)
+ }
+ }
+ }
+ ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
+ for i, pd := range fd.GetPublicDependency() {
+ ret.publicDeps[i] = ret.deps[pd]
+ }
+ ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency()))
+ for i, wd := range fd.GetWeakDependency() {
+ ret.weakDeps[i] = ret.deps[wd]
+ }
+ ret.isProto3 = fd.GetSyntax() == "proto3"
+
+ // populate all tables of child descriptors
+ for _, m := range fd.GetMessageType() {
+ md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
+ ret.symbols[n] = md
+ ret.messages = append(ret.messages, md)
+ }
+ for _, e := range fd.GetEnumType() {
+ ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
+ ret.symbols[n] = ed
+ ret.enums = append(ret.enums, ed)
+ }
+ for _, ex := range fd.GetExtension() {
+ exd, n := createFieldDescriptor(ret, ret, pkg, ex)
+ ret.symbols[n] = exd
+ ret.extensions = append(ret.extensions, exd)
+ }
+ for _, s := range fd.GetService() {
+ sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
+ ret.symbols[n] = sd
+ ret.services = append(ret.services, sd)
+ }
+
+ ret.sourceInfo = internal.CreateSourceInfoMap(fd)
+ ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
+
+ // now we can resolve all type references and source code info
+ scopes := []scope{fileScope(ret)}
+ path := make([]int32, 1, 8)
+ path[0] = internal.File_messagesTag
+ for i, md := range ret.messages {
+ if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+ return nil, err
+ }
+ }
+ path[0] = internal.File_enumsTag
+ for i, ed := range ret.enums {
+ ed.resolve(append(path, int32(i)))
+ }
+ path[0] = internal.File_extensionsTag
+ for i, exd := range ret.extensions {
+ if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+ return nil, err
+ }
+ }
+ path[0] = internal.File_servicesTag
+ for i, sd := range ret.services {
+ if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+ return nil, err
+ }
+ }
+
+ return ret, nil
+}
+
+// CreateFileDescriptors constructs a set of descriptors, one for each of the
+// given descriptor protos. The given set of descriptor protos must include all
+// transitive dependencies for every file.
+func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+ return createFileDescriptors(fds, nil)
+}
+
+func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+ if len(fds) == 0 {
+ return nil, nil
+ }
+ files := map[string]*dpb.FileDescriptorProto{}
+ resolved := map[string]*FileDescriptor{}
+ var name string
+ for _, fd := range fds {
+ name = fd.GetName()
+ files[name] = fd
+ }
+ for _, fd := range fds {
+ _, err := createFromSet(fd.GetName(), r, nil, files, resolved)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return resolved, nil
+}
+
+// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
+// file descriptors and their transitive dependencies. The files are topologically sorted
+// so that a file will always appear after its dependencies.
+func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
+ var fdps []*dpb.FileDescriptorProto
+ addAllFiles(fds, &fdps, map[string]struct{}{})
+ return &dpb.FileDescriptorSet{File: fdps}
+}
+
+func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+ for _, fd := range src {
+ if _, ok := seen[fd.GetName()]; ok {
+ continue
+ }
+ seen[fd.GetName()] = struct{}{}
+ addAllFiles(fd.GetDependencies(), results, seen)
+ *results = append(*results, fd.AsFileDescriptorProto())
+ }
+}
+
+// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The
+// set's *last* file will be the returned descriptor. The set's remaining files must comprise
+// the full set of transitive dependencies of that last file. This is the same format and
+// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+ return createFileDescriptorFromSet(fds, nil)
+}
+
+func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+ result, err := createFileDescriptorsFromSet(fds, r)
+ if err != nil {
+ return nil, err
+ }
+ files := fds.GetFile()
+ lastFilename := files[len(files)-1].GetName()
+ return result[lastFilename], nil
+}
+
+// CreateFileDescriptorsFromSet creates file descriptors from the given file descriptor set.
+// The returned map includes all files in the set, keyed b name. The set must include the
+// full set of transitive dependencies for all files therein or else a link error will occur
+// and be returned instead of the slice of descriptors. This is the same format used by
+// protoc when a FileDescriptorSet file with an invocation like so:
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+ return createFileDescriptorsFromSet(fds, nil)
+}
+
+func createFileDescriptorsFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+ files := fds.GetFile()
+ if len(files) == 0 {
+ return nil, errors.New("file descriptor set is empty")
+ }
+ return createFileDescriptors(files, r)
+}
+
+// createFromSet creates a descriptor for the given filename. It recursively
+// creates descriptors for the given file's dependencies.
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+ for _, s := range seen {
+ if filename == s {
+ return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
+ }
+ }
+ seen = append(seen, filename)
+
+ if d, ok := resolved[filename]; ok {
+ return d, nil
+ }
+ fdp := files[filename]
+ if fdp == nil {
+ return nil, intn.ErrNoSuchFile(filename)
+ }
+ deps := make([]*FileDescriptor, len(fdp.GetDependency()))
+ for i, depName := range fdp.GetDependency() {
+ resolvedDep := r.ResolveImport(filename, depName)
+ dep, err := createFromSet(resolvedDep, r, seen, files, resolved)
+ if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName {
+ dep, err = createFromSet(depName, r, seen, files, resolved)
+ }
+ if err != nil {
+ return nil, err
+ }
+ deps[i] = dep
+ }
+ d, err := createFileDescriptor(fdp, deps, r)
+ if err != nil {
+ return nil, err
+ }
+ resolved[filename] = d
+ return d, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
new file mode 100644
index 0000000..ab235a3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -0,0 +1,1666 @@
+package desc
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+// Descriptor is the common interface implemented by all descriptor objects.
+type Descriptor interface {
+ // GetName returns the name of the object described by the descriptor. This will
+ // be a base name that does not include enclosing message names or the package name.
+ // For file descriptors, this indicates the path and name to the described file.
+ GetName() string
+ // GetFullyQualifiedName returns the fully-qualified name of the object described by
+ // the descriptor. This will include the package name and any enclosing message names.
+ // For file descriptors, this returns the path and name to the described file (same as
+ // GetName).
+ GetFullyQualifiedName() string
+ // GetParent returns the enclosing element in a proto source file. If the described
+ // object is a top-level object, this returns the file descriptor. Otherwise, it returns
+ // the element in which the described object was declared. File descriptors have no
+ // parent and return nil.
+ GetParent() Descriptor
+ // GetFile returns the file descriptor in which this element was declared. File
+ // descriptors return themselves.
+ GetFile() *FileDescriptor
+ // GetOptions returns the options proto containing options for the described element.
+ GetOptions() proto.Message
+ // GetSourceInfo returns any source code information that was present in the file
+ // descriptor. Source code info is optional. If no source code info is available for
+ // the element (including if there is none at all in the file descriptor) then this
+ // returns nil
+ GetSourceInfo() *dpb.SourceCodeInfo_Location
+ // AsProto returns the underlying descriptor proto for this descriptor.
+ AsProto() proto.Message
+}
+
+type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc
+
+// FileDescriptor describes a proto source file.
+type FileDescriptor struct {
+ proto *dpb.FileDescriptorProto
+ symbols map[string]Descriptor
+ deps []*FileDescriptor
+ publicDeps []*FileDescriptor
+ weakDeps []*FileDescriptor
+ messages []*MessageDescriptor
+ enums []*EnumDescriptor
+ extensions []*FieldDescriptor
+ services []*ServiceDescriptor
+ fieldIndex map[string]map[int32]*FieldDescriptor
+ isProto3 bool
+ sourceInfo internal.SourceInfoMap
+ sourceInfoRecomputeFunc
+}
+
+func (fd *FileDescriptor) recomputeSourceInfo() {
+ internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
+}
+
+func (fd *FileDescriptor) registerField(field *FieldDescriptor) {
+ fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()]
+ if fields == nil {
+ fields = map[int32]*FieldDescriptor{}
+ fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields
+ }
+ fields[field.GetNumber()] = field
+}
+
+// GetName returns the name of the file, as it was given to the protoc invocation
+// to compile it, possibly including path (relative to a directory in the proto
+// import path).
+func (fd *FileDescriptor) GetName() string {
+ return fd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the name of the file, same as GetName. It is
+// present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFullyQualifiedName() string {
+ return fd.proto.GetName()
+}
+
+// GetPackage returns the name of the package declared in the file.
+func (fd *FileDescriptor) GetPackage() string {
+ return fd.proto.GetPackage()
+}
+
+// GetParent always returns nil: files are the root of descriptor hierarchies.
+// Is it present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetParent() Descriptor {
+ return nil
+}
+
+// GetFile returns the receiver, which is a file descriptor. This is present
+// to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFile() *FileDescriptor {
+ return fd
+}
+
+// GetOptions returns the file's options. Most usages will be more interested
+// in GetFileOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetOptions() proto.Message {
+ return fd.proto.GetOptions()
+}
+
+// GetFileOptions returns the file's options.
+func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+ return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
+// interface.
+func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return nil
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFileDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) AsProto() proto.Message {
+ return fd.proto
+}
+
+// AsFileDescriptorProto returns the underlying descriptor proto.
+func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+ return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FileDescriptor) String() string {
+ return fd.proto.String()
+}
+
+// IsProto3 returns true if the file declares a syntax of "proto3".
+func (fd *FileDescriptor) IsProto3() bool {
+ return fd.isProto3
+}
+
+// GetDependencies returns all of this file's dependencies. These correspond to
+// import statements in the file.
+func (fd *FileDescriptor) GetDependencies() []*FileDescriptor {
+ return fd.deps
+}
+
+// GetPublicDependencies returns all of this file's public dependencies. These
+// correspond to public import statements in the file.
+func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor {
+ return fd.publicDeps
+}
+
+// GetWeakDependencies returns all of this file's weak dependencies. These
+// correspond to weak import statements in the file.
+func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor {
+ return fd.weakDeps
+}
+
+// GetMessageTypes returns all top-level messages declared in this file.
+func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor {
+ return fd.messages
+}
+
+// GetEnumTypes returns all top-level enums declared in this file.
+func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor {
+ return fd.enums
+}
+
+// GetExtensions returns all top-level extensions declared in this file.
+func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor {
+ return fd.extensions
+}
+
+// GetServices returns all services declared in this file.
+func (fd *FileDescriptor) GetServices() []*ServiceDescriptor {
+ return fd.services
+}
+
+// FindSymbol returns the descriptor contained within this file for the
+// element with the given fully-qualified symbol name. If no such element
+// exists then this method returns nil.
+func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+ if symbol[0] == '.' {
+ symbol = symbol[1:]
+ }
+ return fd.symbols[symbol]
+}
+
+// FindMessage finds the message with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor {
+ if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok {
+ return md
+ } else {
+ return nil
+ }
+}
+
+// FindEnum finds the enum with the given fully-qualified name. If no such
+// element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor {
+ if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok {
+ return ed
+ } else {
+ return nil
+ }
+}
+
+// FindService finds the service with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor {
+ if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok {
+ return sd
+ } else {
+ return nil
+ }
+}
+
+// FindExtension finds the extension field for the given extended type name and
+// tag number. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor {
+ if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() {
+ return exd
+ } else {
+ return nil
+ }
+}
+
+// FindExtensionByName finds the extension field with the given fully-qualified
+// name. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor {
+ if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() {
+ return exd
+ } else {
+ return nil
+ }
+}
+
+// MessageDescriptor describes a protocol buffer message.
+type MessageDescriptor struct {
+ proto *dpb.DescriptorProto
+ parent Descriptor
+ file *FileDescriptor
+ fields []*FieldDescriptor
+ nested []*MessageDescriptor
+ enums []*EnumDescriptor
+ extensions []*FieldDescriptor
+ oneOfs []*OneOfDescriptor
+ extRanges extRanges
+ fqn string
+ sourceInfoPath []int32
+ jsonNames jsonNameMap
+ isProto3 bool
+ isMapEntry bool
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
+ msgName := merge(enclosing, md.GetName())
+ ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
+ for _, f := range md.GetField() {
+ fld, n := createFieldDescriptor(fd, ret, msgName, f)
+ symbols[n] = fld
+ ret.fields = append(ret.fields, fld)
+ }
+ for _, nm := range md.NestedType {
+ nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
+ symbols[n] = nmd
+ ret.nested = append(ret.nested, nmd)
+ }
+ for _, e := range md.EnumType {
+ ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
+ symbols[n] = ed
+ ret.enums = append(ret.enums, ed)
+ }
+ for _, ex := range md.GetExtension() {
+ exd, n := createFieldDescriptor(fd, ret, msgName, ex)
+ symbols[n] = exd
+ ret.extensions = append(ret.extensions, exd)
+ }
+ for i, o := range md.GetOneofDecl() {
+ od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
+ symbols[n] = od
+ ret.oneOfs = append(ret.oneOfs, od)
+ }
+ for _, r := range md.GetExtensionRange() {
+ // proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
+ // but protoc converts range to exclusive end in descriptor, so we must convert back
+ end := r.GetEnd() - 1
+ ret.extRanges = append(ret.extRanges, proto.ExtensionRange{
+ Start: r.GetStart(),
+ End: end})
+ }
+ sort.Sort(ret.extRanges)
+ ret.isProto3 = fd.isProto3
+ ret.isMapEntry = md.GetOptions().GetMapEntry() &&
+ len(ret.fields) == 2 &&
+ ret.fields[0].GetNumber() == 1 &&
+ ret.fields[1].GetNumber() == 2
+
+ return ret, msgName
+}
+
+func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
+ md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ path = append(path, internal.Message_nestedMessagesTag)
+ scopes = append(scopes, messageScope(md))
+ for i, nmd := range md.nested {
+ if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ path[len(path)-1] = internal.Message_enumsTag
+ for i, ed := range md.enums {
+ ed.resolve(append(path, int32(i)))
+ }
+ path[len(path)-1] = internal.Message_fieldsTag
+ for i, fld := range md.fields {
+ if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ path[len(path)-1] = internal.Message_extensionsTag
+ for i, exd := range md.extensions {
+ if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ path[len(path)-1] = internal.Message_oneOfsTag
+ for i, od := range md.oneOfs {
+ od.resolve(append(path, int32(i)))
+ }
+ return nil
+}
+
+// GetName returns the simple (unqualified) name of the message.
+func (md *MessageDescriptor) GetName() string {
+ return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the message. This
+// includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (md *MessageDescriptor) GetFullyQualifiedName() string {
+ return md.fqn
+}
+
+// GetParent returns the message's enclosing descriptor. For top-level messages,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (md *MessageDescriptor) GetParent() Descriptor {
+ return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this message is defined.
+func (md *MessageDescriptor) GetFile() *FileDescriptor {
+ return md.file
+}
+
+// GetOptions returns the message's options. Most usages will be more interested
+// in GetMessageOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) GetOptions() proto.Message {
+ return md.proto.GetOptions()
+}
+
+// GetMessageOptions returns the message's options.
+func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+ return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the message, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// message was defined and also contains comments associated with the message
+// definition.
+func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) AsProto() proto.Message {
+ return md.proto
+}
+
+// AsDescriptorProto returns the underlying descriptor proto.
+func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+ return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MessageDescriptor) String() string {
+ return md.proto.String()
+}
+
+// IsMapEntry returns true if this is a synthetic message type that represents an entry
+// in a map field.
+func (md *MessageDescriptor) IsMapEntry() bool {
+ return md.isMapEntry
+}
+
+// GetFields returns all of the fields for this message.
+func (md *MessageDescriptor) GetFields() []*FieldDescriptor {
+ return md.fields
+}
+
+// GetNestedMessageTypes returns all of the message types declared inside this message.
+func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor {
+ return md.nested
+}
+
+// GetNestedEnumTypes returns all of the enums declared inside this message.
+func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor {
+ return md.enums
+}
+
+// GetNestedExtensions returns all of the extensions declared inside this message.
+func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor {
+ return md.extensions
+}
+
+// GetOneOfs returns all of the one-of field sets declared inside this message.
+func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor {
+ return md.oneOfs
+}
+
+// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
+func (md *MessageDescriptor) IsProto3() bool {
+ return md.isProto3
+}
+
+// GetExtensionRanges returns the ranges of extension field numbers for this message.
+func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange {
+ return md.extRanges
+}
+
+// IsExtendable returns true if this message has any extension ranges.
+func (md *MessageDescriptor) IsExtendable() bool {
+ return len(md.extRanges) > 0
+}
+
+// IsExtension returns true if the given tag number is within any of this message's
+// extension ranges.
+func (md *MessageDescriptor) IsExtension(tagNumber int32) bool {
+ return md.extRanges.IsExtension(tagNumber)
+}
+
+type extRanges []proto.ExtensionRange
+
+func (er extRanges) String() string {
+ var buf bytes.Buffer
+ first := true
+ for _, r := range er {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(",")
+ }
+ fmt.Fprintf(&buf, "%d..%d", r.Start, r.End)
+ }
+ return buf.String()
+}
+
+func (er extRanges) IsExtension(tagNumber int32) bool {
+ i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber })
+ return i < len(er) && tagNumber >= er[i].Start
+}
+
+func (er extRanges) Len() int {
+ return len(er)
+}
+
+func (er extRanges) Less(i, j int) bool {
+ return er[i].Start < er[j].Start
+}
+
+func (er extRanges) Swap(i, j int) {
+ er[i], er[j] = er[j], er[i]
+}
+
+// FindFieldByName finds the field with the given name. If no such field exists
+// then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
+ fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+ if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
+ return fd
+ } else {
+ return nil
+ }
+}
+
+// FindFieldByNumber finds the field with the given tag number. If no such field
+// exists then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
+ if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+ return fd
+ } else {
+ return nil
+ }
+}
+
+// FieldDescriptor describes a field of a protocol buffer message.
+type FieldDescriptor struct {
+ proto *dpb.FieldDescriptorProto
+ parent Descriptor
+ owner *MessageDescriptor
+ file *FileDescriptor
+ oneOf *OneOfDescriptor
+ msgType *MessageDescriptor
+ enumType *EnumDescriptor
+ fqn string
+ sourceInfoPath []int32
+ def memoizedDefault
+ isMap bool
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
+ fldName := merge(enclosing, fld.GetName())
+ ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
+ if fld.GetExtendee() == "" {
+ ret.owner = parent.(*MessageDescriptor)
+ }
+ // owner for extensions, field type (be it message or enum), and one-ofs get resolved later
+ return ret, fldName
+}
+
+func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+ if fd.proto.OneofIndex != nil && fd.oneOf == nil {
+ return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+ }
+ fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
+ if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+ return err
+ } else {
+ fd.enumType = desc.(*EnumDescriptor)
+ }
+ }
+ if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+ if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+ return err
+ } else {
+ fd.msgType = desc.(*MessageDescriptor)
+ }
+ }
+ if fd.proto.GetExtendee() != "" {
+ if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+ return err
+ } else {
+ fd.owner = desc.(*MessageDescriptor)
+ }
+ }
+ fd.file.registerField(fd)
+ fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
+ fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().IsMapEntry()
+ return nil
+}
+
+func (fd *FieldDescriptor) determineDefault() interface{} {
+ if fd.IsMap() {
+ return map[interface{}]interface{}(nil)
+ } else if fd.IsRepeated() {
+ return []interface{}(nil)
+ } else if fd.msgType != nil {
+ return nil
+ }
+
+ proto3 := fd.file.isProto3
+ if !proto3 {
+ def := fd.AsFieldDescriptorProto().GetDefaultValue()
+ if def != "" {
+ ret := parseDefaultValue(fd, def)
+ if ret != nil {
+ return ret
+ }
+ // if we can't parse default value, fall-through to return normal default...
+ }
+ }
+
+ switch fd.GetType() {
+ case dpb.FieldDescriptorProto_TYPE_FIXED32,
+ dpb.FieldDescriptorProto_TYPE_UINT32:
+ return uint32(0)
+ case dpb.FieldDescriptorProto_TYPE_SFIXED32,
+ dpb.FieldDescriptorProto_TYPE_INT32,
+ dpb.FieldDescriptorProto_TYPE_SINT32:
+ return int32(0)
+ case dpb.FieldDescriptorProto_TYPE_FIXED64,
+ dpb.FieldDescriptorProto_TYPE_UINT64:
+ return uint64(0)
+ case dpb.FieldDescriptorProto_TYPE_SFIXED64,
+ dpb.FieldDescriptorProto_TYPE_INT64,
+ dpb.FieldDescriptorProto_TYPE_SINT64:
+ return int64(0)
+ case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ return float32(0.0)
+ case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return float64(0.0)
+ case dpb.FieldDescriptorProto_TYPE_BOOL:
+ return false
+ case dpb.FieldDescriptorProto_TYPE_BYTES:
+ return []byte(nil)
+ case dpb.FieldDescriptorProto_TYPE_STRING:
+ return ""
+ case dpb.FieldDescriptorProto_TYPE_ENUM:
+ if proto3 {
+ return int32(0)
+ }
+ enumVals := fd.GetEnumType().GetValues()
+ if len(enumVals) > 0 {
+ return enumVals[0].GetNumber()
+ } else {
+ return int32(0) // WTF?
+ }
+ default:
+ panic(fmt.Sprintf("Unknown field type: %v", fd.GetType()))
+ }
+}
+
+func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
+ switch fd.GetType() {
+ case dpb.FieldDescriptorProto_TYPE_ENUM:
+ vd := fd.GetEnumType().FindValueByName(val)
+ if vd != nil {
+ return vd.GetNumber()
+ }
+ return nil
+ case dpb.FieldDescriptorProto_TYPE_BOOL:
+ if val == "true" {
+ return true
+ } else if val == "false" {
+ return false
+ }
+ return nil
+ case dpb.FieldDescriptorProto_TYPE_BYTES:
+ return []byte(unescape(val))
+ case dpb.FieldDescriptorProto_TYPE_STRING:
+ return val
+ case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := strconv.ParseFloat(val, 32); err == nil {
+ return float32(f)
+ } else {
+ return float32(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ if f, err := strconv.ParseFloat(val, 64); err == nil {
+ return f
+ } else {
+ return float64(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_INT32,
+ dpb.FieldDescriptorProto_TYPE_SINT32,
+ dpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, err := strconv.ParseInt(val, 10, 32); err == nil {
+ return int32(i)
+ } else {
+ return int32(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_UINT32,
+ dpb.FieldDescriptorProto_TYPE_FIXED32:
+ if i, err := strconv.ParseUint(val, 10, 32); err == nil {
+ return uint32(i)
+ } else {
+ return uint32(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_INT64,
+ dpb.FieldDescriptorProto_TYPE_SINT64,
+ dpb.FieldDescriptorProto_TYPE_SFIXED64:
+ if i, err := strconv.ParseInt(val, 10, 64); err == nil {
+ return i
+ } else {
+ return int64(0)
+ }
+ case dpb.FieldDescriptorProto_TYPE_UINT64,
+ dpb.FieldDescriptorProto_TYPE_FIXED64:
+ if i, err := strconv.ParseUint(val, 10, 64); err == nil {
+ return i
+ } else {
+ return uint64(0)
+ }
+ default:
+ return nil
+ }
+}
+
+func unescape(s string) string {
+ // protoc encodes default values for 'bytes' fields using C escaping,
+ // so this function reverses that escaping
+ out := make([]byte, 0, len(s))
+ var buf [4]byte
+ for len(s) > 0 {
+ if s[0] != '\\' || len(s) < 2 {
+ // not escape sequence, or too short to be well-formed escape
+ out = append(out, s[0])
+ s = s[1:]
+ } else if s[1] == 'x' || s[1] == 'X' {
+ n := matchPrefix(s[2:], 2, isHex)
+ if n == 0 {
+ // bad escape
+ out = append(out, s[:2]...)
+ s = s[2:]
+ } else {
+ c, err := strconv.ParseUint(s[2:2+n], 16, 8)
+ if err != nil {
+ // shouldn't really happen...
+ out = append(out, s[:2+n]...)
+ } else {
+ out = append(out, byte(c))
+ }
+ s = s[2+n:]
+ }
+ } else if s[1] >= '0' && s[1] <= '7' {
+ n := 1 + matchPrefix(s[2:], 2, isOctal)
+ c, err := strconv.ParseUint(s[1:1+n], 8, 8)
+ if err != nil || c > 0xff {
+ out = append(out, s[:1+n]...)
+ } else {
+ out = append(out, byte(c))
+ }
+ s = s[1+n:]
+ } else if s[1] == 'u' {
+ if len(s) < 6 {
+ // bad escape
+ out = append(out, s...)
+ s = s[len(s):]
+ } else {
+ c, err := strconv.ParseUint(s[2:6], 16, 16)
+ if err != nil {
+ // bad escape
+ out = append(out, s[:6]...)
+ } else {
+ w := utf8.EncodeRune(buf[:], rune(c))
+ out = append(out, buf[:w]...)
+ }
+ s = s[6:]
+ }
+ } else if s[1] == 'U' {
+ if len(s) < 10 {
+ // bad escape
+ out = append(out, s...)
+ s = s[len(s):]
+ } else {
+ c, err := strconv.ParseUint(s[2:10], 16, 32)
+ if err != nil || c > 0x10ffff {
+ // bad escape
+ out = append(out, s[:10]...)
+ } else {
+ w := utf8.EncodeRune(buf[:], rune(c))
+ out = append(out, buf[:w]...)
+ }
+ s = s[10:]
+ }
+ } else {
+ switch s[1] {
+ case 'a':
+ out = append(out, '\a')
+ case 'b':
+ out = append(out, '\b')
+ case 'f':
+ out = append(out, '\f')
+ case 'n':
+ out = append(out, '\n')
+ case 'r':
+ out = append(out, '\r')
+ case 't':
+ out = append(out, '\t')
+ case 'v':
+ out = append(out, '\v')
+ case '\\':
+ out = append(out, '\\')
+ case '\'':
+ out = append(out, '\'')
+ case '"':
+ out = append(out, '"')
+ case '?':
+ out = append(out, '?')
+ default:
+ // invalid escape, just copy it as-is
+ out = append(out, s[:2]...)
+ }
+ s = s[2:]
+ }
+ }
+ return string(out)
+}
+
+func isOctal(b byte) bool { return b >= '0' && b <= '7' }
+func isHex(b byte) bool {
+ return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
+}
+func matchPrefix(s string, limit int, fn func(byte) bool) int {
+ l := len(s)
+ if l > limit {
+ l = limit
+ }
+ i := 0
+ for ; i < l; i++ {
+ if !fn(s[i]) {
+ return i
+ }
+ }
+ return i
+}
+
+// GetName returns the name of the field.
+func (fd *FieldDescriptor) GetName() string {
+ return fd.proto.GetName()
+}
+
+// GetNumber returns the tag number of this field.
+func (fd *FieldDescriptor) GetNumber() int32 {
+ return fd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
+// GetName, this includes fully qualified name of the enclosing message for
+// regular fields.
+//
+// For extension fields, this includes the package (if there is one) as well as
+// any enclosing messages. The package and/or enclosing messages are for where
+// the extension is defined, not the message it extends.
+//
+// If this field is part of a one-of, the fully qualified name does *not*
+// include the name of the one-of, only of the enclosing message.
+func (fd *FieldDescriptor) GetFullyQualifiedName() string {
+ return fd.fqn
+}
+
+// GetParent returns the fields's enclosing descriptor. For normal
+// (non-extension) fields, this is the enclosing message. For extensions, this
+// is the descriptor in which the extension is defined, not the message that is
+// extended. The parent for an extension may be a file descriptor or a message,
+// depending on where the extension is defined.
+func (fd *FieldDescriptor) GetParent() Descriptor {
+ return fd.parent
+}
+
+// GetFile returns the descriptor for the file in which this field is defined.
+func (fd *FieldDescriptor) GetFile() *FileDescriptor {
+ return fd.file
+}
+
+// GetOptions returns the field's options. Most usages will be more interested
+// in GetFieldOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) GetOptions() proto.Message {
+ return fd.proto.GetOptions()
+}
+
+// GetFieldOptions returns the field's options.
+func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+ return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the field, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// field was defined and also contains comments associated with the field
+// definition.
+func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return fd.file.sourceInfo.Get(fd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFieldDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) AsProto() proto.Message {
+ return fd.proto
+}
+
+// AsFieldDescriptorProto returns the underlying descriptor proto.
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+ return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FieldDescriptor) String() string {
+ return fd.proto.String()
+}
+
+// GetJSONName returns the name of the field as referenced in the message's JSON
+// format.
+func (fd *FieldDescriptor) GetJSONName() string {
+ if jsonName := fd.proto.GetJsonName(); jsonName != "" {
+ return jsonName
+ }
+ return fd.proto.GetName()
+}
+
+// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName),
+// but includes the fully qualified name of the enclosing message.
+//
+// If the field is an extension, it will return the package name (if there is
+// one) as well as the names of any enclosing messages. The package and/or
+// enclosing messages are for where the extension is defined, not the message it
+// extends.
+func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string {
+ parent := fd.GetParent()
+ switch parent := parent.(type) {
+ case *FileDescriptor:
+ pkg := parent.GetPackage()
+ if pkg == "" {
+ return fd.GetJSONName()
+ }
+ return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName())
+ default:
+ return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName())
+ }
+}
+
+// GetOwner returns the message type that this field belongs to. If this is a normal
+// field then this is the same as GetParent. But for extensions, this will be the
+// extendee message whereas GetParent refers to where the extension was declared.
+func (fd *FieldDescriptor) GetOwner() *MessageDescriptor {
+ return fd.owner
+}
+
+// IsExtension returns true if this is an extension field.
+func (fd *FieldDescriptor) IsExtension() bool {
+ return fd.proto.GetExtendee() != ""
+}
+
+// GetOneOf returns the one-of field set to which this field belongs. If this field
+// is not part of a one-of then this method returns nil.
+func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor {
+ return fd.oneOf
+}
+
+// GetType returns the type of this field. If the type indicates an enum, the
+// enum type can be queried via GetEnumType. If the type indicates a message, the
+// message type can be queried via GetMessageType.
+func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+ return fd.proto.GetType()
+}
+
+// GetLabel returns the label for this field. The label can be required (proto2-only),
+// optional (default for proto3), or required.
+func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+ return fd.proto.GetLabel()
+}
+
+// IsRequired returns true if this field has the "required" label.
+func (fd *FieldDescriptor) IsRequired() bool {
+ return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// IsRepeated returns true if this field has the "repeated" label.
+func (fd *FieldDescriptor) IsRepeated() bool {
+ return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// IsMap returns true if this is a map field. If so, it will have the "repeated"
+// label its type will be a message that represents a map entry. The map entry
+// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
+func (fd *FieldDescriptor) IsMap() bool {
+ return fd.isMap
+}
+
+// GetMapKeyType returns the type of the key field if this is a map field. If it is
+// not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
+ if fd.isMap {
+ return fd.msgType.FindFieldByNumber(int32(1))
+ }
+ return nil
+}
+
+// GetMapValueType returns the type of the value field if this is a map field. If it
+// is not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
+ if fd.isMap {
+ return fd.msgType.FindFieldByNumber(int32(2))
+ }
+ return nil
+}
+
+// GetMessageType returns the type of this field if it is a message type. If
+// this field is not a message type, it returns nil.
+func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor {
+ return fd.msgType
+}
+
+// GetEnumType returns the type of this field if it is an enum type. If this
+// field is not an enum type, it returns nil.
+func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor {
+ return fd.enumType
+}
+
+// GetDefaultValue returns the default value for this field.
+//
+// If this field represents a message type, this method always returns nil (even though
+// for proto2 files, the default value should be a default instance of the message type).
+// If the field represents an enum type, this method returns an int32 corresponding to the
+// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If
+// this field is repeated (and not a map), it returns a nil []interface{}.
+//
+// Otherwise, it returns the declared default value for the field or a zero value, if no
+// default is declared or if the file is proto3. The type of said return value corresponds
+// to the type of the field:
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
+func (fd *FieldDescriptor) GetDefaultValue() interface{} {
+ return fd.getDefaultValue()
+}
+
+// EnumDescriptor describes an enum declared in a proto file.
+type EnumDescriptor struct {
+ proto *dpb.EnumDescriptorProto
+ parent Descriptor
+ file *FileDescriptor
+ values []*EnumValueDescriptor
+ valuesByNum sortedValues
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
+ enumName := merge(enclosing, ed.GetName())
+ ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
+ for _, ev := range ed.GetValue() {
+ evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
+ symbols[n] = evd
+ ret.values = append(ret.values, evd)
+ }
+ if len(ret.values) > 0 {
+ ret.valuesByNum = make(sortedValues, len(ret.values))
+ copy(ret.valuesByNum, ret.values)
+ sort.Stable(ret.valuesByNum)
+ }
+ return ret, enumName
+}
+
+type sortedValues []*EnumValueDescriptor
+
+func (sv sortedValues) Len() int {
+ return len(sv)
+}
+
+func (sv sortedValues) Less(i, j int) bool {
+ return sv[i].GetNumber() < sv[j].GetNumber()
+}
+
+func (sv sortedValues) Swap(i, j int) {
+ sv[i], sv[j] = sv[j], sv[i]
+}
+
+func (ed *EnumDescriptor) resolve(path []int32) {
+ ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ path = append(path, internal.Enum_valuesTag)
+ for i, evd := range ed.values {
+ evd.resolve(append(path, int32(i)))
+ }
+}
+
+// GetName returns the simple (unqualified) name of the enum type.
+func (ed *EnumDescriptor) GetName() string {
+ return ed.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum type.
+// This includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (ed *EnumDescriptor) GetFullyQualifiedName() string {
+ return ed.fqn
+}
+
+// GetParent returns the enum type's enclosing descriptor. For top-level enums,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (ed *EnumDescriptor) GetParent() Descriptor {
+ return ed.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum is defined.
+func (ed *EnumDescriptor) GetFile() *FileDescriptor {
+ return ed.file
+}
+
+// GetOptions returns the enum type's options. Most usages will be more
+// interested in GetEnumOptions, which has a concrete return type. This generic
+// version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) GetOptions() proto.Message {
+ return ed.proto.GetOptions()
+}
+
+// GetEnumOptions returns the enum type's options.
+func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+ return ed.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum type, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum type was defined and also contains comments associated with the enum
+// definition.
+func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return ed.file.sourceInfo.Get(ed.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) AsProto() proto.Message {
+ return ed.proto
+}
+
+// AsEnumDescriptorProto returns the underlying descriptor proto.
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+ return ed.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (ed *EnumDescriptor) String() string {
+ return ed.proto.String()
+}
+
+// GetValues returns all of the allowed values defined for this enum.
+func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor {
+ return ed.values
+}
+
+// FindValueByName finds the enum value with the given name. If no such value exists
+// then nil is returned.
+func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
+ fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+ if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
+ return vd
+ } else {
+ return nil
+ }
+}
+
+// FindValueByNumber finds the value with the given numeric value. If no such value
+// exists then nil is returned. If aliases are allowed and multiple values have the
+// given number, the first declared value is returned.
+func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor {
+ index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num })
+ if index < len(ed.valuesByNum) {
+ vd := ed.valuesByNum[index]
+ if vd.GetNumber() == num {
+ return vd
+ }
+ }
+ return nil
+}
+
+// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
+type EnumValueDescriptor struct {
+ proto *dpb.EnumValueDescriptorProto
+ parent *EnumDescriptor
+ file *FileDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
+ valName := merge(enclosing, evd.GetName())
+ return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+}
+
+func (vd *EnumValueDescriptor) resolve(path []int32) {
+ vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the enum value.
+func (vd *EnumValueDescriptor) GetName() string {
+ return vd.proto.GetName()
+}
+
+// GetNumber returns the numeric value associated with this enum value.
+func (vd *EnumValueDescriptor) GetNumber() int32 {
+ return vd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum value.
+// Unlike GetName, this includes fully qualified name of the enclosing enum.
+func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
+ return vd.fqn
+}
+
+// GetParent returns the descriptor for the enum in which this enum value is
+// defined. Most usages will prefer to use GetEnum, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetParent() Descriptor {
+ return vd.parent
+}
+
+// GetEnum returns the enum in which this enum value is defined.
+func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor {
+ return vd.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum value is
+// defined.
+func (vd *EnumValueDescriptor) GetFile() *FileDescriptor {
+ return vd.file
+}
+
+// GetOptions returns the enum value's options. Most usages will be more
+// interested in GetEnumValueOptions, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetOptions() proto.Message {
+ return vd.proto.GetOptions()
+}
+
+// GetEnumValueOptions returns the enum value's options.
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+ return vd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum value, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum value was defined and also contains comments associated with the enum
+// value definition.
+func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return vd.file.sourceInfo.Get(vd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumValueDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) AsProto() proto.Message {
+ return vd.proto
+}
+
+// AsEnumValueDescriptorProto returns the underlying descriptor proto.
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+ return vd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (vd *EnumValueDescriptor) String() string {
+ return vd.proto.String()
+}
+
+// ServiceDescriptor describes an RPC service declared in a proto file.
+type ServiceDescriptor struct {
+ proto *dpb.ServiceDescriptorProto
+ file *FileDescriptor
+ methods []*MethodDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
+ serviceName := merge(enclosing, sd.GetName())
+ ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
+ for _, m := range sd.GetMethod() {
+ md, n := createMethodDescriptor(fd, ret, serviceName, m)
+ symbols[n] = md
+ ret.methods = append(ret.methods, md)
+ }
+ return ret, serviceName
+}
+
+func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
+ sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ path = append(path, internal.Service_methodsTag)
+ for i, md := range sd.methods {
+ if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetName returns the simple (unqualified) name of the service.
+func (sd *ServiceDescriptor) GetName() string {
+ return sd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the service. This
+// includes the package name (if there is one).
+func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
+ return sd.fqn
+}
+
+// GetParent returns the descriptor for the file in which this service is
+// defined. Most usages will prefer to use GetFile, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetParent() Descriptor {
+ return sd.file
+}
+
+// GetFile returns the descriptor for the file in which this service is defined.
+func (sd *ServiceDescriptor) GetFile() *FileDescriptor {
+ return sd.file
+}
+
+// GetOptions returns the service's options. Most usages will be more interested
+// in GetServiceOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetOptions() proto.Message {
+ return sd.proto.GetOptions()
+}
+
+// GetServiceOptions returns the service's options.
+func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+ return sd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the service, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// service was defined and also contains comments associated with the service
+// definition.
+func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return sd.file.sourceInfo.Get(sd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsServiceDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) AsProto() proto.Message {
+ return sd.proto
+}
+
+// AsServiceDescriptorProto returns the underlying descriptor proto.
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+ return sd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (sd *ServiceDescriptor) String() string {
+ return sd.proto.String()
+}
+
+// GetMethods returns all of the RPC methods for this service.
+func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor {
+ return sd.methods
+}
+
+// FindMethodByName finds the method with the given name. If no such method exists
+// then nil is returned.
+func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
+ fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+ if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
+ return md
+ } else {
+ return nil
+ }
+}
+
+// MethodDescriptor describes an RPC method declared in a proto file.
+type MethodDescriptor struct {
+ proto *dpb.MethodDescriptorProto
+ parent *ServiceDescriptor
+ file *FileDescriptor
+ inType *MessageDescriptor
+ outType *MessageDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
+ // request and response types get resolved later
+ methodName := merge(enclosing, md.GetName())
+ return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+}
+
+func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
+ md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
+ return err
+ } else {
+ md.inType = desc.(*MessageDescriptor)
+ }
+ if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+ return err
+ } else {
+ md.outType = desc.(*MessageDescriptor)
+ }
+ return nil
+}
+
+// GetName returns the name of the method.
+func (md *MethodDescriptor) GetName() string {
+ return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
+// GetName, this includes fully qualified name of the enclosing service.
+func (md *MethodDescriptor) GetFullyQualifiedName() string {
+ return md.fqn
+}
+
+// GetParent returns the descriptor for the service in which this method is
+// defined. Most usages will prefer to use GetService, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (md *MethodDescriptor) GetParent() Descriptor {
+ return md.parent
+}
+
+// GetService returns the RPC service in which this method is declared.
+func (md *MethodDescriptor) GetService() *ServiceDescriptor {
+ return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this method is defined.
+func (md *MethodDescriptor) GetFile() *FileDescriptor {
+ return md.file
+}
+
+// GetOptions returns the method's options. Most usages will be more interested
+// in GetMethodOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) GetOptions() proto.Message {
+ return md.proto.GetOptions()
+}
+
+// GetMethodOptions returns the method's options.
+func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+ return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the method, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// method was defined and also contains comments associated with the method
+// definition.
+func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsMethodDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) AsProto() proto.Message {
+ return md.proto
+}
+
+// AsMethodDescriptorProto returns the underlying descriptor proto.
+func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+ return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MethodDescriptor) String() string {
+ return md.proto.String()
+}
+
+// IsServerStreaming returns true if this is a server-streaming method.
+func (md *MethodDescriptor) IsServerStreaming() bool {
+ return md.proto.GetServerStreaming()
+}
+
+// IsClientStreaming returns true if this is a client-streaming method.
+func (md *MethodDescriptor) IsClientStreaming() bool {
+ return md.proto.GetClientStreaming()
+}
+
+// GetInputType returns the input type, or request type, of the RPC method.
+func (md *MethodDescriptor) GetInputType() *MessageDescriptor {
+ return md.inType
+}
+
+// GetOutputType returns the output type, or response type, of the RPC method.
+func (md *MethodDescriptor) GetOutputType() *MessageDescriptor {
+ return md.outType
+}
+
+// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
+type OneOfDescriptor struct {
+ proto *dpb.OneofDescriptorProto
+ parent *MessageDescriptor
+ file *FileDescriptor
+ choices []*FieldDescriptor
+ fqn string
+ sourceInfoPath []int32
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
+ oneOfName := merge(enclosing, od.GetName())
+ ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+ for _, f := range parent.fields {
+ oi := f.proto.OneofIndex
+ if oi != nil && *oi == int32(index) {
+ f.oneOf = ret
+ ret.choices = append(ret.choices, f)
+ }
+ }
+ return ret, oneOfName
+}
+
+func (od *OneOfDescriptor) resolve(path []int32) {
+ od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the one-of.
+func (od *OneOfDescriptor) GetName() string {
+ return od.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
+// GetName, this includes fully qualified name of the enclosing message.
+func (od *OneOfDescriptor) GetFullyQualifiedName() string {
+ return od.fqn
+}
+
+// GetParent returns the descriptor for the message in which this one-of is
+// defined. Most usages will prefer to use GetOwner, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (od *OneOfDescriptor) GetParent() Descriptor {
+ return od.parent
+}
+
+// GetOwner returns the message to which this one-of field set belongs.
+func (od *OneOfDescriptor) GetOwner() *MessageDescriptor {
+ return od.parent
+}
+
+// GetFile returns the descriptor for the file in which this one-fof is defined.
+func (od *OneOfDescriptor) GetFile() *FileDescriptor {
+ return od.file
+}
+
+// GetOptions returns the one-of's options. Most usages will be more interested
+// in GetOneOfOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) GetOptions() proto.Message {
+ return od.proto.GetOptions()
+}
+
+// GetOneOfOptions returns the one-of's options.
+func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+ return od.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the one-of, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// one-of was defined and also contains comments associated with the one-of
+// definition.
+func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+ return od.file.sourceInfo.Get(od.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsOneofDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) AsProto() proto.Message {
+ return od.proto
+}
+
+// AsOneofDescriptorProto returns the underlying descriptor proto.
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+ return od.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (od *OneOfDescriptor) String() string {
+ return od.proto.String()
+}
+
+// GetChoices returns the fields that are part of the one-of field set. At most one of
+// these fields may be set for a given message.
+func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor {
+ return od.choices
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(string) Descriptor
+
+func fileScope(fd *FileDescriptor) scope {
+ // we search symbols in this file, but also symbols in other files that have
+ // the same package as this file or a "parent" package (in protobuf,
+ // packages are a hierarchy like C++ namespaces)
+ prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
+ return func(name string) Descriptor {
+ for _, prefix := range prefixes {
+ n := merge(prefix, name)
+ d := findSymbol(fd, n, false)
+ if d != nil {
+ return d
+ }
+ }
+ return nil
+ }
+}
+
+func messageScope(md *MessageDescriptor) scope {
+ return func(name string) Descriptor {
+ n := merge(md.fqn, name)
+ if d, ok := md.file.symbols[n]; ok {
+ return d
+ }
+ return nil
+ }
+}
+
+func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
+ if strings.HasPrefix(name, ".") {
+ // already fully-qualified
+ d := findSymbol(fd, name[1:], false)
+ if d != nil {
+ return d, nil
+ }
+ } else {
+ // unqualified, so we look in the enclosing (last) scope first and move
+ // towards outermost (first) scope, trying to resolve the symbol
+ for i := len(scopes) - 1; i >= 0; i-- {
+ d := scopes[i](name)
+ if d != nil {
+ return d, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
+}
+
+func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
+ d := fd.symbols[name]
+ if d != nil {
+ return d
+ }
+
+ // When public = false, we are searching only directly imported symbols. But we
+ // also need to search transitive public imports due to semantics of public imports.
+ var deps []*FileDescriptor
+ if public {
+ deps = fd.publicDeps
+ } else {
+ deps = fd.deps
+ }
+ for _, dep := range deps {
+ d = findSymbol(dep, name, true)
+ if d != nil {
+ return d
+ }
+ }
+
+ return nil
+}
+
+func merge(a, b string) string {
+ if a == "" {
+ return b
+ } else {
+ return a + "." + b
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
new file mode 100644
index 0000000..cd7348e
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
@@ -0,0 +1,31 @@
+//+build appengine gopherjs purego
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+type jsonNameMap struct{}
+type memoizedDefault struct{}
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+ // NB: With allowed use of unsafe, we use it to atomically define an index
+ // via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index
+ // and do an linear scan of fields each time.
+ for _, f := range md.fields {
+ jn := f.proto.GetJsonName()
+ if jn == "" {
+ jn = f.proto.GetName()
+ }
+ if jn == jsonName {
+ return f
+ }
+ }
+ return nil
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+ return fd.determineDefault()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
new file mode 100644
index 0000000..19b808d
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
@@ -0,0 +1,60 @@
+//+build !appengine,!gopherjs,!purego
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe
+type memoizedDefault *interface{} // loaded/stored atomically via atomic+unsafe
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+ // NB: We don't want to eagerly index JSON names because many programs won't use it.
+ // So we want to do it lazily, but also make sure the result is thread-safe. So we
+ // atomically load/store the map as if it were a normal pointer. We don't use other
+ // mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to
+ // do this lazily because those types cannot be copied, and we'd rather not induce
+ // 'go vet' errors in programs that use descriptors and try to copy them.
+ // If multiple goroutines try to access the index at the same time, before it is
+ // built, they will all end up computing the index redundantly. Future reads of
+ // the index will use whatever was the "last one stored" by those racing goroutines.
+ // Since building the index is deterministic, this is fine: all indices computed
+ // will be the same.
+ addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames))
+ jsonNames := atomic.LoadPointer(addrOfJsonNames)
+ var index map[string]*FieldDescriptor
+ if jsonNames == nil {
+ // slow path: compute the index
+ index = map[string]*FieldDescriptor{}
+ for _, f := range md.fields {
+ jn := f.proto.GetJsonName()
+ if jn == "" {
+ jn = f.proto.GetName()
+ }
+ index[jn] = f
+ }
+ atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index)))
+ } else {
+ *(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames
+ }
+ return index[jsonName]
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+ addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def))
+ def := atomic.LoadPointer(addrOfDef)
+ if def != nil {
+ return *(*interface{})(def)
+ }
+ // slow path: compute the default, potentially involves decoding value
+ d := fd.determineDefault()
+ atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d)))
+ return d
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
new file mode 100644
index 0000000..1740dce
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -0,0 +1,41 @@
+// Package desc contains "rich descriptors" for protocol buffers. The built-in
+// descriptor types are simple protobuf messages, each one representing a
+// different kind of element in the AST of a .proto source file.
+//
+// Because of this inherent "tree" quality, these build-in descriptors cannot
+// refer to their enclosing file descriptor. Nor can a field descriptor refer to
+// a message or enum descriptor that represents the field's type (for enum and
+// nested message fields). All such links must instead be stringly typed. This
+// limitation makes them much harder to use for doing interesting things with
+// reflection.
+//
+// Without this package, resolving references to types is particularly complex.
+// For example, resolving a field's type, the message type an extension extends,
+// or the request and response types of an RPC method all require searching
+// through symbols defined not only in the file in which these elements are
+// declared but also in its transitive closure of dependencies.
+//
+// "Rich descriptors" avoid the need to deal with the complexities described
+// above. A rich descriptor has all type references resolved and provides
+// methods to access other rich descriptors for all referenced elements. Each
+// rich descriptor has a usefully broad API, but does not try to mimic the full
+// interface of the underlying descriptor proto. Instead, every rich descriptor
+// provides access to that underlying proto, for extracting descriptor
+// properties that are not immediately accessible through rich descriptor's
+// methods.
+//
+// Rich descriptors can be accessed in similar ways as their "poor" cousins
+// (descriptor protos). Instead of using proto.FileDescriptor, use
+// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
+// can also be easily accessed using desc.LoadMessageDescriptor and
+// desc.LoadFieldDescriptorForExtension, respectively.
+//
+// It is also possible create rich descriptors for proto messages that a given
+// Go program doesn't even know about. For example, they could be loaded from a
+// FileDescriptorSet file (which can be generated by protoc) or loaded from a
+// server. This enables interesting things like dynamic clients: where a Go
+// program can be an RPC client of a service it wasn't compiled to know about.
+//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
new file mode 100644
index 0000000..ab93032
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -0,0 +1,313 @@
+package desc
+
+import (
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+var (
+ globalImportPathConf map[string]string
+ globalImportPathMu sync.RWMutex
+)
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path. For more details on why alternate import paths may need to
+// be configured, see ImportResolver.
+//
+// This method panics if provided invalid input. An empty importPath is invalid.
+// An un-registered registerPath is also invalid. For example, if an attempt is
+// made to register the import path "foo/bar.proto" as "bar.proto", but there is
+// no "bar.proto" registered in the Go protobuf runtime, this method will panic.
+// This method also panics if an attempt is made to register the same import
+// path more than once.
+//
+// This function works globally, applying to all descriptors loaded by this
+// package. If you instead want more granular support for handling alternate
+// import paths -- such as for a single invocation of a function in this
+// package or when the alternate path is only used from one file (so you don't
+// want the alternate path used when loading every other file), use an
+// ImportResolver instead.
+func RegisterImportPath(registerPath, importPath string) {
+ if len(importPath) == 0 {
+ panic("import path cannot be empty")
+ }
+ desc := proto.FileDescriptor(registerPath)
+ if len(desc) == 0 {
+ panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
+ }
+ globalImportPathMu.Lock()
+ defer globalImportPathMu.Unlock()
+ if reg := globalImportPathConf[importPath]; reg != "" {
+ panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg))
+ }
+ if globalImportPathConf == nil {
+ globalImportPathConf = map[string]string{}
+ }
+ globalImportPathConf[importPath] = registerPath
+}
+
+// ResolveImport resolves the given import path. If it has been registered as an
+// alternate via RegisterImportPath, the registered path is returned. Otherwise,
+// the given import path is returned unchanged.
+func ResolveImport(importPath string) string {
+ importPath = clean(importPath)
+ globalImportPathMu.RLock()
+ defer globalImportPathMu.RUnlock()
+ reg := globalImportPathConf[importPath]
+ if reg == "" {
+ return importPath
+ }
+ return reg
+}
+
+// ImportResolver lets you work-around linking issues that are caused by
+// mismatches between how a particular proto source file is registered in the Go
+// protobuf runtime and how that same file is imported by other files. The file
+// is registered using the same relative path given to protoc when the file is
+// compiled (i.e. when Go code is generated). So if any file tries to import
+// that source file, but using a different relative path, then a link error will
+// occur when this package tries to load a descriptor for the importing file.
+//
+// For example, let's say we have two proto source files: "foo/bar.proto" and
+// "fubar/baz.proto". The latter imports the former using a line like so:
+// import "foo/bar.proto";
+// However, when protoc is invoked, the command-line args looks like so:
+// protoc -Ifoo/ --go_out=foo/ bar.proto
+// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
+// how they are registered in the Go protobuf runtime. So, when loading the
+// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
+// but will find no file registered with that path:
+// fd, err := desc.LoadFileDescriptor("baz.proto")
+// // err will be non-nil, complaining that there is no such file
+// // found named "foo/bar.proto"
+//
+// This can be remedied by registering alternate import paths using an
+// ImportResolver. Continuing with the example above, the code below would fix
+// any link issue:
+// var r desc.ImportResolver
+// r.RegisterImportPath("bar.proto", "foo/bar.proto")
+// fd, err := r.LoadFileDescriptor("baz.proto")
+// // err will be nil; descriptor successfully loaded!
+//
+// If there are files that are *always* imported using a different relative
+// path then how they are registered, consider using the global
+// RegisterImportPath function, so you don't have to use an ImportResolver for
+// every file that imports it.
+type ImportResolver struct {
+ children map[string]*ImportResolver
+ importPaths map[string]string
+
+ // By default, an ImportResolver will fallback to consulting any paths
+ // registered via the top-level RegisterImportPath function. Setting this
+ // field to true will cause the ImportResolver to skip that fallback and
+ // only examine its own locally registered paths.
+ SkipFallbackRules bool
+}
+
+// ResolveImport resolves the given import path in the context of the given
+// source file. If a matching alternate has been registered with this resolver
+// via a call to RegisterImportPath or RegisterImportPathFrom, then the
+// registered path is returned. Otherwise, the given import path is returned
+// unchanged.
+func (r *ImportResolver) ResolveImport(source, importPath string) string {
+ if r != nil {
+ res := r.resolveImport(clean(source), clean(importPath))
+ if res != "" {
+ return res
+ }
+ if r.SkipFallbackRules {
+ return importPath
+ }
+ }
+ return ResolveImport(importPath)
+}
+
+func (r *ImportResolver) resolveImport(source, importPath string) string {
+ if source == "" {
+ return r.importPaths[importPath]
+ }
+ var car, cdr string
+ idx := strings.IndexRune(source, filepath.Separator)
+ if idx < 0 {
+ car, cdr = source, ""
+ } else {
+ car, cdr = source[:idx], source[idx+1:]
+ }
+ ch := r.children[car]
+ if ch != nil {
+ if reg := ch.resolveImport(cdr, importPath); reg != "" {
+ return reg
+ }
+ }
+ return r.importPaths[importPath]
+}
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path with this resolver. Any appearance of the given import path
+// when linking files will instead try to link the given registered path. If the
+// registered path cannot be located, then linking will fallback to the actual
+// imported path.
+//
+// This method will panic if given an empty path or if the same import path is
+// registered more than once.
+//
+// To constrain the contexts where the given import path is to be re-written,
+// use RegisterImportPathFrom instead.
+func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) {
+ r.RegisterImportPathFrom(registerPath, importPath, "")
+}
+
+// RegisterImportPathFrom registers an alternate import path for a given
+// registered proto file path with this resolver, but only for imports in the
+// specified source context.
+//
+// The source context can be the name of a folder or a proto source file. Any
+// appearance of the given import path in that context will instead try to link
+// the given registered path. To be in context, the file that is being linked
+// (i.e. the one whose import statement is being resolved) must be the same
+// relative path of the source context or be a sub-path (i.e. a descendant of
+// the source folder).
+//
+// If the registered path cannot be located, then linking will fallback to the
+// actual imported path.
+//
+// This method will panic if given an empty path. The source context, on the
+// other hand, is allowed to be blank. A blank source matches all files. This
+// method also panics if the same import path is registered in the same source
+// context more than once.
+func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) {
+ importPath = clean(importPath)
+ if len(importPath) == 0 {
+ panic("import path cannot be empty")
+ }
+ registerPath = clean(registerPath)
+ if len(registerPath) == 0 {
+ panic("registered path cannot be empty")
+ }
+ r.registerImportPathFrom(registerPath, importPath, clean(source))
+}
+
+func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) {
+ if source == "" {
+ if r.importPaths == nil {
+ r.importPaths = map[string]string{}
+ } else if reg := r.importPaths[importPath]; reg != "" {
+ panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath))
+ }
+ r.importPaths[importPath] = registerPath
+ return
+ }
+ var car, cdr string
+ idx := strings.IndexRune(source, filepath.Separator)
+ if idx < 0 {
+ car, cdr = source, ""
+ } else {
+ car, cdr = source[:idx], source[idx+1:]
+ }
+ ch := r.children[car]
+ if ch == nil {
+ if r.children == nil {
+ r.children = map[string]*ImportResolver{}
+ }
+ ch = &ImportResolver{}
+ r.children[car] = ch
+ }
+ ch.registerImportPathFrom(registerPath, importPath, cdr)
+}
+
+// LoadFileDescriptor is the same as the package function of the same name, but
+// any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
+ return loadFileDescriptor(filePath, r)
+}
+
+// LoadMessageDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking
+// files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
+ return loadMessageDescriptor(msgName, r)
+}
+
+// LoadMessageDescriptorForMessage is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForMessage(msg, r)
+}
+
+// LoadMessageDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForType(msgType, r)
+}
+
+// LoadEnumDescriptorForEnum is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForEnum(enum, r)
+}
+
+// LoadEnumDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForType(enumType, r)
+}
+
+// LoadFieldDescriptorForExtension is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+ return loadFieldDescriptorForExtension(ext, r)
+}
+
+// CreateFileDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+ return createFileDescriptor(fdp, deps, r)
+}
+
+// CreateFileDescriptors is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor protos.
+func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+ return createFileDescriptors(fds, r)
+}
+
+// CreateFileDescriptorFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+ return createFileDescriptorFromSet(fds, r)
+}
+
+// CreateFileDescriptorsFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+ return createFileDescriptorsFromSet(fds, r)
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+
+func clean(path string) string {
+ if path == "" {
+ return ""
+ }
+ path = filepath.Clean(path)
+ if path == "." {
+ return ""
+ }
+ return strings.TrimPrefix(path, dotPrefix)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
new file mode 100644
index 0000000..b4150b8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -0,0 +1,107 @@
+package internal
+
+import (
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// SourceInfoMap is a map of paths in a descriptor to the corresponding source
+// code info.
+type SourceInfoMap map[string][]*dpb.SourceCodeInfo_Location
+
+// Get returns the source code info for the given path. If there are
+// multiple locations for the same path, the first one is returned.
+func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+ v := m[asMapKey(path)]
+ if len(v) > 0 {
+ return v[0]
+ }
+ return nil
+}
+
+// GetAll returns all source code info for the given path.
+func (m SourceInfoMap) GetAll(path []int32) []*dpb.SourceCodeInfo_Location {
+ return m[asMapKey(path)]
+}
+
+// Add stores the given source code info for the given path.
+func (m SourceInfoMap) Add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+ m[asMapKey(path)] = append(m[asMapKey(path)], loc)
+}
+
+// PutIfAbsent stores the given source code info for the given path only if the
+// given path does not exist in the map. This method returns true when the value
+// is stored, false if the path already exists.
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+ k := asMapKey(path)
+ if _, ok := m[k]; ok {
+ return false
+ }
+ m[k] = []*dpb.SourceCodeInfo_Location{loc}
+ return true
+}
+
+func asMapKey(slice []int32) string {
+ // NB: arrays should be usable as map keys, but this does not
+ // work due to a bug: https://github.com/golang/go/issues/22605
+ //rv := reflect.ValueOf(slice)
+ //arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem())
+ //array := reflect.New(arrayType).Elem()
+ //reflect.Copy(array, rv)
+ //return array.Interface()
+
+ b := make([]byte, len(slice)*4)
+ j := 0
+ for _, s := range slice {
+ b[j] = byte(s)
+ b[j+1] = byte(s >> 8)
+ b[j+2] = byte(s >> 16)
+ b[j+3] = byte(s >> 24)
+ j += 4
+ }
+ return string(b)
+}
+
+// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
+// source code info in the given file descriptor proto.
+func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+ res := SourceInfoMap{}
+ PopulateSourceInfoMap(fd, res)
+ return res
+}
+
+// PopulateSourceInfoMap populates the given SourceInfoMap with information from
+// the given file descriptor.
+func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+ for _, l := range fd.GetSourceCodeInfo().GetLocation() {
+ m.Add(l.Path, l)
+ }
+}
+
+// NB: This wonkiness allows desc.Descriptor impl to implement an interface that
+// is only usable from this package, by embedding a SourceInfoComputeFunc that
+// implements the actual logic (which must live in desc package to avoid a
+// dependency cycle).
+
+// SourceInfoComputer is a single method which will be invoked to recompute
+// source info. This is needed for the protoparse package, which needs to link
+// descriptors without source info in order to interpret options, but then needs
+// to re-compute source info after that interpretation so that final linked
+// descriptors expose the right info.
+type SourceInfoComputer interface {
+ recomputeSourceInfo()
+}
+
+// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will
+// be aliased in the desc package to an unexported name so it is not marked as
+// an exported field in reflection and not present in Go docs.
+type SourceInfoComputeFunc func()
+
+func (f SourceInfoComputeFunc) recomputeSourceInfo() {
+ f()
+}
+
+// RecomputeSourceInfo is used to initiate recomputation of source info. This is
+// is used by the protoparse package, after it interprets options.
+func RecomputeSourceInfo(c SourceInfoComputer) {
+ c.recomputeSourceInfo()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
new file mode 100644
index 0000000..139c9cd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -0,0 +1,270 @@
+package internal
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ // MaxTag is the maximum allowed tag number for a field.
+ MaxTag = 536870911 // 2^29 - 1
+
+ // SpecialReservedStart is the first tag in a range that is reserved and not
+ // allowed for use in message definitions.
+ SpecialReservedStart = 19000
+ // SpecialReservedEnd is the last tag in a range that is reserved and not
+ // allowed for use in message definitions.
+ SpecialReservedEnd = 19999
+
+ // NB: It would be nice to use constants from generated code instead of
+ // hard-coding these here. But code-gen does not emit these as constants
+ // anywhere. The only places they appear in generated code are struct tags
+ // on fields of the generated descriptor protos.
+
+ // File_packageTag is the tag number of the package element in a file
+ // descriptor proto.
+ File_packageTag = 2
+ // File_dependencyTag is the tag number of the dependencies element in a
+ // file descriptor proto.
+ File_dependencyTag = 3
+ // File_messagesTag is the tag number of the messages element in a file
+ // descriptor proto.
+ File_messagesTag = 4
+ // File_enumsTag is the tag number of the enums element in a file descriptor
+ // proto.
+ File_enumsTag = 5
+ // File_servicesTag is the tag number of the services element in a file
+ // descriptor proto.
+ File_servicesTag = 6
+ // File_extensionsTag is the tag number of the extensions element in a file
+ // descriptor proto.
+ File_extensionsTag = 7
+ // File_optionsTag is the tag number of the options element in a file
+ // descriptor proto.
+ File_optionsTag = 8
+ // File_syntaxTag is the tag number of the syntax element in a file
+ // descriptor proto.
+ File_syntaxTag = 12
+ // Message_nameTag is the tag number of the name element in a message
+ // descriptor proto.
+ Message_nameTag = 1
+ // Message_fieldsTag is the tag number of the fields element in a message
+ // descriptor proto.
+ Message_fieldsTag = 2
+ // Message_nestedMessagesTag is the tag number of the nested messages
+ // element in a message descriptor proto.
+ Message_nestedMessagesTag = 3
+ // Message_enumsTag is the tag number of the enums element in a message
+ // descriptor proto.
+ Message_enumsTag = 4
+ // Message_extensionRangeTag is the tag number of the extension ranges
+ // element in a message descriptor proto.
+ Message_extensionRangeTag = 5
+ // Message_extensionsTag is the tag number of the extensions element in a
+ // message descriptor proto.
+ Message_extensionsTag = 6
+ // Message_optionsTag is the tag number of the options element in a message
+ // descriptor proto.
+ Message_optionsTag = 7
+ // Message_oneOfsTag is the tag number of the one-ofs element in a message
+ // descriptor proto.
+ Message_oneOfsTag = 8
+ // Message_reservedRangeTag is the tag number of the reserved ranges element
+ // in a message descriptor proto.
+ Message_reservedRangeTag = 9
+ // Message_reservedNameTag is the tag number of the reserved names element
+ // in a message descriptor proto.
+ Message_reservedNameTag = 10
+ // ExtensionRange_startTag is the tag number of the start index in an
+ // extension range proto.
+ ExtensionRange_startTag = 1
+ // ExtensionRange_endTag is the tag number of the end index in an
+ // extension range proto.
+ ExtensionRange_endTag = 2
+ // ExtensionRange_optionsTag is the tag number of the options element in an
+ // extension range proto.
+ ExtensionRange_optionsTag = 3
+ // ReservedRange_startTag is the tag number of the start index in a reserved
+ // range proto.
+ ReservedRange_startTag = 1
+ // ReservedRange_endTag is the tag number of the end index in a reserved
+ // range proto.
+ ReservedRange_endTag = 2
+ // Field_nameTag is the tag number of the name element in a field descriptor
+ // proto.
+ Field_nameTag = 1
+ // Field_extendeeTag is the tag number of the extendee element in a field
+ // descriptor proto.
+ Field_extendeeTag = 2
+ // Field_numberTag is the tag number of the number element in a field
+ // descriptor proto.
+ Field_numberTag = 3
+ // Field_labelTag is the tag number of the label element in a field
+ // descriptor proto.
+ Field_labelTag = 4
+ // Field_typeTag is the tag number of the type element in a field descriptor
+ // proto.
+ Field_typeTag = 5
+ // Field_typeNameTag is the tag number of the type name element in a field
+ // descriptor proto.
+ Field_typeNameTag = 6
+ // Field_defaultTag is the tag number of the default value element in a
+ // field descriptor proto.
+ Field_defaultTag = 7
+ // Field_optionsTag is the tag number of the options element in a field
+ // descriptor proto.
+ Field_optionsTag = 8
+ // Field_jsonNameTag is the tag number of the JSON name element in a field
+ // descriptor proto.
+ Field_jsonNameTag = 10
+ // OneOf_nameTag is the tag number of the name element in a one-of
+ // descriptor proto.
+ OneOf_nameTag = 1
+ // OneOf_optionsTag is the tag number of the options element in a one-of
+ // descriptor proto.
+ OneOf_optionsTag = 2
+ // Enum_nameTag is the tag number of the name element in an enum descriptor
+ // proto.
+ Enum_nameTag = 1
+ // Enum_valuesTag is the tag number of the values element in an enum
+ // descriptor proto.
+ Enum_valuesTag = 2
+ // Enum_optionsTag is the tag number of the options element in an enum
+ // descriptor proto.
+ Enum_optionsTag = 3
+ // Enum_reservedRangeTag is the tag number of the reserved ranges element in
+ // an enum descriptor proto.
+ Enum_reservedRangeTag = 4
+ // Enum_reservedNameTag is the tag number of the reserved names element in
+ // an enum descriptor proto.
+ Enum_reservedNameTag = 5
+ // EnumVal_nameTag is the tag number of the name element in an enum value
+ // descriptor proto.
+ EnumVal_nameTag = 1
+ // EnumVal_numberTag is the tag number of the number element in an enum
+ // value descriptor proto.
+ EnumVal_numberTag = 2
+ // EnumVal_optionsTag is the tag number of the options element in an enum
+ // value descriptor proto.
+ EnumVal_optionsTag = 3
+ // Service_nameTag is the tag number of the name element in a service
+ // descriptor proto.
+ Service_nameTag = 1
+ // Service_methodsTag is the tag number of the methods element in a service
+ // descriptor proto.
+ Service_methodsTag = 2
+ // Service_optionsTag is the tag number of the options element in a service
+ // descriptor proto.
+ Service_optionsTag = 3
+ // Method_nameTag is the tag number of the name element in a method
+ // descriptor proto.
+ Method_nameTag = 1
+ // Method_inputTag is the tag number of the input type element in a method
+ // descriptor proto.
+ Method_inputTag = 2
+ // Method_outputTag is the tag number of the output type element in a method
+ // descriptor proto.
+ Method_outputTag = 3
+ // Method_optionsTag is the tag number of the options element in a method
+ // descriptor proto.
+ Method_optionsTag = 4
+ // Method_inputStreamTag is the tag number of the input stream flag in a
+ // method descriptor proto.
+ Method_inputStreamTag = 5
+ // Method_outputStreamTag is the tag number of the output stream flag in a
+ // method descriptor proto.
+ Method_outputStreamTag = 6
+
+ // UninterpretedOptionsTag is the tag number of the uninterpreted options
+ // element. All *Options messages use the same tag for the field that stores
+ // uninterpreted options.
+ UninterpretedOptionsTag = 999
+
+ // Uninterpreted_nameTag is the tag number of the name element in an
+ // uninterpreted options proto.
+ Uninterpreted_nameTag = 2
+ // Uninterpreted_identTag is the tag number of the identifier value in an
+ // uninterpreted options proto.
+ Uninterpreted_identTag = 3
+ // Uninterpreted_posIntTag is the tag number of the positive int value in an
+ // uninterpreted options proto.
+ Uninterpreted_posIntTag = 4
+ // Uninterpreted_negIntTag is the tag number of the negative int value in an
+ // uninterpreted options proto.
+ Uninterpreted_negIntTag = 5
+ // Uninterpreted_doubleTag is the tag number of the double value in an
+ // uninterpreted options proto.
+ Uninterpreted_doubleTag = 6
+ // Uninterpreted_stringTag is the tag number of the string value in an
+ // uninterpreted options proto.
+ Uninterpreted_stringTag = 7
+ // Uninterpreted_aggregateTag is the tag number of the aggregate value in an
+ // uninterpreted options proto.
+ Uninterpreted_aggregateTag = 8
+ // UninterpretedName_nameTag is the tag number of the name element in an
+ // uninterpreted option name proto.
+ UninterpretedName_nameTag = 1
+)
+
+// JsonName returns the default JSON name for a field with the given name.
+func JsonName(name string) string {
+ var js []rune
+ nextUpper := false
+ for i, r := range name {
+ if r == '_' {
+ nextUpper = true
+ continue
+ }
+ if i == 0 {
+ js = append(js, r)
+ } else if nextUpper {
+ nextUpper = false
+ js = append(js, unicode.ToUpper(r))
+ } else {
+ js = append(js, r)
+ }
+ }
+ return string(js)
+}
+
+// InitCap returns the given field name, but with the first letter capitalized.
+func InitCap(name string) string {
+ r, sz := utf8.DecodeRuneInString(name)
+ return string(unicode.ToUpper(r)) + name[sz:]
+}
+
+// CreatePrefixList returns a list of package prefixes to search when resolving
+// a symbol name. If the given package is blank, it returns only the empty
+// string. If the given package contains only one token, e.g. "foo", it returns
+// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
+// successively shorter prefixes of the package and then the empty string. For
+// example, for a package named "foo.bar.baz" it will return the following list:
+// ["foo.bar.baz", "foo.bar", "foo", ""]
+func CreatePrefixList(pkg string) []string {
+ if pkg == "" {
+ return []string{""}
+ }
+
+ numDots := 0
+ // one pass to pre-allocate the returned slice
+ for i := 0; i < len(pkg); i++ {
+ if pkg[i] == '.' {
+ numDots++
+ }
+ }
+ if numDots == 0 {
+ return []string{pkg, ""}
+ }
+
+ prefixes := make([]string, numDots+2)
+ // second pass to fill in returned slice
+ for i := 0; i < len(pkg); i++ {
+ if pkg[i] == '.' {
+ prefixes[numDots] = pkg[:i]
+ numDots--
+ }
+ }
+ prefixes[0] = pkg
+
+ return prefixes
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
new file mode 100644
index 0000000..4a05830
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -0,0 +1,341 @@
+package desc
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/internal"
+)
+
+var (
+ cacheMu sync.RWMutex
+ filesCache = map[string]*FileDescriptor{}
+ messagesCache = map[string]*MessageDescriptor{}
+ enumCache = map[reflect.Type]*EnumDescriptor{}
+)
+
+// LoadFileDescriptor creates a file descriptor using the bytes returned by
+// proto.FileDescriptor. Descriptors are cached so that they do not need to be
+// re-processed if the same file is fetched again later.
+func LoadFileDescriptor(file string) (*FileDescriptor, error) {
+ return loadFileDescriptor(file, nil)
+}
+
+func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
+ f := getFileFromCache(file)
+ if f != nil {
+ return f, nil
+ }
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadFileDescriptorLocked(file, r)
+}
+
+func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
+ f := filesCache[file]
+ if f != nil {
+ return f, nil
+ }
+ fd, err := internal.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err = toFileDescriptorLocked(fd, r)
+ if err != nil {
+ return nil, err
+ }
+ putCacheLocked(file, f)
+ return f, nil
+}
+
+func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
+ deps := make([]*FileDescriptor, len(fd.GetDependency()))
+ for i, dep := range fd.GetDependency() {
+ resolvedDep := r.ResolveImport(fd.GetName(), dep)
+ var err error
+ deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
+ if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
+ // try original path
+ deps[i], err = loadFileDescriptorLocked(dep, r)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return CreateFileDescriptor(fd, deps...)
+}
+
+func getFileFromCache(file string) *FileDescriptor {
+ cacheMu.RLock()
+ defer cacheMu.RUnlock()
+ return filesCache[file]
+}
+
+func putCacheLocked(filename string, fd *FileDescriptor) {
+ filesCache[filename] = fd
+ putMessageCacheLocked(fd.messages)
+}
+
+func putMessageCacheLocked(mds []*MessageDescriptor) {
+ for _, md := range mds {
+ messagesCache[md.fqn] = md
+ putMessageCacheLocked(md.nested)
+ }
+}
+
+// interface implemented by generated messages, which all have a Descriptor() method in
+// addition to the methods of proto.Message
+type protoMessage interface {
+ proto.Message
+ Descriptor() ([]byte, []int)
+}
+
+// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
+// Message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
+ return loadMessageDescriptor(message, nil)
+}
+
+func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
+ m := getMessageFromCache(message)
+ if m != nil {
+ return m, nil
+ }
+
+ pt := proto.MessageType(message)
+ if pt == nil {
+ return nil, nil
+ }
+ msg, err := messageFromType(pt)
+ if err != nil {
+ return nil, err
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadMessageDescriptorForTypeLocked(message, msg, r)
+}
+
+// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForType(messageType, nil)
+}
+
+func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
+ m, err := messageFromType(messageType)
+ if err != nil {
+ return nil, err
+ }
+ return loadMessageDescriptorForMessage(m, r)
+}
+
+// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
+// returned by message.Descriptor(). If the given type is not recognized, then a nil
+// descriptor is returned.
+func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
+ return loadMessageDescriptorForMessage(message, nil)
+}
+
+func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
+ // efficiently handle dynamic messages
+ type descriptorable interface {
+ GetMessageDescriptor() *MessageDescriptor
+ }
+ if d, ok := message.(descriptorable); ok {
+ return d.GetMessageDescriptor(), nil
+ }
+
+ name := proto.MessageName(message)
+ if name == "" {
+ return nil, nil
+ }
+ m := getMessageFromCache(name)
+ if m != nil {
+ return m, nil
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+}
+
+func messageFromType(mt reflect.Type) (protoMessage, error) {
+ if mt.Kind() != reflect.Ptr {
+ mt = reflect.PtrTo(mt)
+ }
+ m, ok := reflect.Zero(mt).Interface().(protoMessage)
+ if !ok {
+ return nil, fmt.Errorf("failed to create message from type: %v", mt)
+ }
+ return m, nil
+}
+
+func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
+ m := messagesCache[name]
+ if m != nil {
+ return m, nil
+ }
+
+ fdb, _ := message.Descriptor()
+ fd, err := internal.DecodeFileDescriptor(name, fdb)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := toFileDescriptorLocked(fd, r)
+ if err != nil {
+ return nil, err
+ }
+ putCacheLocked(fd.GetName(), f)
+ return f.FindSymbol(name).(*MessageDescriptor), nil
+}
+
+func getMessageFromCache(message string) *MessageDescriptor {
+ cacheMu.RLock()
+ defer cacheMu.RUnlock()
+ return messagesCache[message]
+}
+
+// interface implemented by all generated enums
+type protoEnum interface {
+ EnumDescriptor() ([]byte, []int)
+}
+
+// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because
+// it is not useful since protoc-gen-go does not expose the name anywhere in generated
+// code or register it in a way that is it accessible for reflection code. This also
+// means we have to cache enum descriptors differently -- we can only cache them as
+// they are requested, as opposed to caching all enum types whenever a file descriptor
+// is cached. This is because we need to know the generated type of the enums, and we
+// don't know that at the time of caching file descriptors.
+
+// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by enum.EnumDescriptor() for the given enum type.
+func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForType(enumType, nil)
+}
+
+func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
+ // we cache descriptors using non-pointer type
+ if enumType.Kind() == reflect.Ptr {
+ enumType = enumType.Elem()
+ }
+ e := getEnumFromCache(enumType)
+ if e != nil {
+ return e, nil
+ }
+ enum, err := enumFromType(enumType)
+ if err != nil {
+ return nil, err
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+}
+
+// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
+// returned by enum.EnumDescriptor().
+func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+ return loadEnumDescriptorForEnum(enum, nil)
+}
+
+func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+ et := reflect.TypeOf(enum)
+ // we cache descriptors using non-pointer type
+ if et.Kind() == reflect.Ptr {
+ et = et.Elem()
+ enum = reflect.Zero(et).Interface().(protoEnum)
+ }
+ e := getEnumFromCache(et)
+ if e != nil {
+ return e, nil
+ }
+
+ cacheMu.Lock()
+ defer cacheMu.Unlock()
+ return loadEnumDescriptorForTypeLocked(et, enum, r)
+}
+
+func enumFromType(et reflect.Type) (protoEnum, error) {
+ if et.Kind() != reflect.Int32 {
+ et = reflect.PtrTo(et)
+ }
+ e, ok := reflect.Zero(et).Interface().(protoEnum)
+ if !ok {
+ return nil, fmt.Errorf("failed to create enum from type: %v", et)
+ }
+ return e, nil
+}
+
+func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+ e := enumCache[et]
+ if e != nil {
+ return e, nil
+ }
+
+ fdb, path := enum.EnumDescriptor()
+ name := fmt.Sprintf("%v", et)
+ fd, err := internal.DecodeFileDescriptor(name, fdb)
+ if err != nil {
+ return nil, err
+ }
+ // see if we already have cached "rich" descriptor
+ f, ok := filesCache[fd.GetName()]
+ if !ok {
+ f, err = toFileDescriptorLocked(fd, r)
+ if err != nil {
+ return nil, err
+ }
+ putCacheLocked(fd.GetName(), f)
+ }
+
+ ed := findEnum(f, path)
+ enumCache[et] = ed
+ return ed, nil
+}
+
+func getEnumFromCache(et reflect.Type) *EnumDescriptor {
+ cacheMu.RLock()
+ defer cacheMu.RUnlock()
+ return enumCache[et]
+}
+
+func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
+ if len(path) == 1 {
+ return fd.GetEnumTypes()[path[0]]
+ }
+ md := fd.GetMessageTypes()[path[0]]
+ for _, i := range path[1 : len(path)-1] {
+ md = md.GetNestedMessageTypes()[i]
+ }
+ return md.GetNestedEnumTypes()[path[len(path)-1]]
+}
+
+// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
+// extension description.
+func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+ return loadFieldDescriptorForExtension(ext, nil)
+}
+
+func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
+ file, err := loadFileDescriptor(ext.Filename, r)
+ if err != nil {
+ return nil, err
+ }
+ field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor)
+ // make sure descriptor agrees with attributes of the ExtensionDesc
+ if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) ||
+ field.GetNumber() != ext.Field {
+ return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name)
+ }
+ return field, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore b/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore
new file mode 100644
index 0000000..2652053
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore
@@ -0,0 +1 @@
+y.output
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
new file mode 100644
index 0000000..e1eb4df
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
@@ -0,0 +1,1056 @@
+package protoparse
+
+// This file defines all of the nodes in the proto AST.
+
+// SourcePos identifies a location in a proto source file.
+type SourcePos struct {
+ Filename string
+ Line, Col int
+ Offset int
+}
+
+func unknownPos(filename string) *SourcePos {
+ return &SourcePos{Filename: filename}
+}
+
+// node is the interface implemented by all nodes in the AST
+type node interface {
+ start() *SourcePos
+ end() *SourcePos
+ leadingComments() []comment
+ trailingComments() []comment
+}
+
+type terminalNode interface {
+ node
+ popLeadingComment() comment
+ pushTrailingComment(comment)
+}
+
+var _ terminalNode = (*basicNode)(nil)
+var _ terminalNode = (*stringLiteralNode)(nil)
+var _ terminalNode = (*intLiteralNode)(nil)
+var _ terminalNode = (*floatLiteralNode)(nil)
+var _ terminalNode = (*identNode)(nil)
+
+type fileDecl interface {
+ node
+ getSyntax() node
+}
+
+var _ fileDecl = (*fileNode)(nil)
+var _ fileDecl = (*noSourceNode)(nil)
+
+type optionDecl interface {
+ node
+ getName() node
+ getValue() valueNode
+}
+
+var _ optionDecl = (*optionNode)(nil)
+var _ optionDecl = (*noSourceNode)(nil)
+
+type fieldDecl interface {
+ node
+ fieldLabel() node
+ fieldName() node
+ fieldType() node
+ fieldTag() node
+ fieldExtendee() node
+ getGroupKeyword() node
+}
+
+var _ fieldDecl = (*fieldNode)(nil)
+var _ fieldDecl = (*groupNode)(nil)
+var _ fieldDecl = (*mapFieldNode)(nil)
+var _ fieldDecl = (*syntheticMapField)(nil)
+var _ fieldDecl = (*noSourceNode)(nil)
+
+type rangeDecl interface {
+ node
+ rangeStart() node
+ rangeEnd() node
+}
+
+var _ rangeDecl = (*rangeNode)(nil)
+var _ rangeDecl = (*noSourceNode)(nil)
+
+type enumValueDecl interface {
+ node
+ getName() node
+ getNumber() node
+}
+
+var _ enumValueDecl = (*enumValueNode)(nil)
+var _ enumValueDecl = (*noSourceNode)(nil)
+
+type msgDecl interface {
+ node
+ messageName() node
+}
+
+var _ msgDecl = (*messageNode)(nil)
+var _ msgDecl = (*groupNode)(nil)
+var _ msgDecl = (*mapFieldNode)(nil)
+var _ msgDecl = (*noSourceNode)(nil)
+
+type methodDecl interface {
+ node
+ getInputType() node
+ getOutputType() node
+}
+
+var _ methodDecl = (*methodNode)(nil)
+var _ methodDecl = (*noSourceNode)(nil)
+
+type posRange struct {
+ start, end SourcePos
+}
+
+type basicNode struct {
+ posRange
+ leading []comment
+ trailing []comment
+}
+
+func (n *basicNode) start() *SourcePos {
+ return &n.posRange.start
+}
+
+func (n *basicNode) end() *SourcePos {
+ return &n.posRange.end
+}
+
+func (n *basicNode) leadingComments() []comment {
+ return n.leading
+}
+
+func (n *basicNode) trailingComments() []comment {
+ return n.trailing
+}
+
+func (n *basicNode) popLeadingComment() comment {
+ c := n.leading[0]
+ n.leading = n.leading[1:]
+ return c
+}
+
+func (n *basicNode) pushTrailingComment(c comment) {
+ n.trailing = append(n.trailing, c)
+}
+
+type comment struct {
+ posRange
+ text string
+}
+
+type basicCompositeNode struct {
+ first node
+ last node
+}
+
+func (n *basicCompositeNode) start() *SourcePos {
+ return n.first.start()
+}
+
+func (n *basicCompositeNode) end() *SourcePos {
+ return n.last.end()
+}
+
+func (n *basicCompositeNode) leadingComments() []comment {
+ return n.first.leadingComments()
+}
+
+func (n *basicCompositeNode) trailingComments() []comment {
+ return n.last.trailingComments()
+}
+
+func (n *basicCompositeNode) setRange(first, last node) {
+ n.first = first
+ n.last = last
+}
+
+type fileNode struct {
+ basicCompositeNode
+ syntax *syntaxNode
+ decls []*fileElement
+
+ // This field is populated after parsing, to make it easier to find
+ // source locations by import name for constructing link errors.
+ imports []*importNode
+}
+
+func (n *fileNode) getSyntax() node {
+ return n.syntax
+}
+
+type fileElement struct {
+ // a discriminated union: only one field will be set
+ imp *importNode
+ pkg *packageNode
+ option *optionNode
+ message *messageNode
+ enum *enumNode
+ extend *extendNode
+ service *serviceNode
+ empty *basicNode
+}
+
+func (n *fileElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *fileElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *fileElement) leadingComments() []comment {
+ return n.get().leadingComments()
+}
+
+func (n *fileElement) trailingComments() []comment {
+ return n.get().trailingComments()
+}
+
+func (n *fileElement) get() node {
+ switch {
+ case n.imp != nil:
+ return n.imp
+ case n.pkg != nil:
+ return n.pkg
+ case n.option != nil:
+ return n.option
+ case n.message != nil:
+ return n.message
+ case n.enum != nil:
+ return n.enum
+ case n.extend != nil:
+ return n.extend
+ case n.service != nil:
+ return n.service
+ default:
+ return n.empty
+ }
+}
+
+type syntaxNode struct {
+ basicCompositeNode
+ syntax *compoundStringNode
+}
+
+type importNode struct {
+ basicCompositeNode
+ name *compoundStringNode
+ public bool
+ weak bool
+}
+
+type packageNode struct {
+ basicCompositeNode
+ name *compoundIdentNode
+}
+
+type identifier string
+
+type identNode struct {
+ basicNode
+ val string
+}
+
+func (n *identNode) value() interface{} {
+ return identifier(n.val)
+}
+
+type compoundIdentNode struct {
+ basicCompositeNode
+ val string
+}
+
+func (n *compoundIdentNode) value() interface{} {
+ return identifier(n.val)
+}
+
+type compactOptionsNode struct {
+ basicCompositeNode
+ decls []*optionNode
+}
+
+func (n *compactOptionsNode) Elements() []*optionNode {
+ if n == nil {
+ return nil
+ }
+ return n.decls
+}
+
+type optionNode struct {
+ basicCompositeNode
+ name *optionNameNode
+ val valueNode
+}
+
+func (n *optionNode) getName() node {
+ return n.name
+}
+
+func (n *optionNode) getValue() valueNode {
+ return n.val
+}
+
+type optionNameNode struct {
+ basicCompositeNode
+ parts []*optionNamePartNode
+}
+
+type optionNamePartNode struct {
+ basicCompositeNode
+ text *compoundIdentNode
+ offset int
+ length int
+ isExtension bool
+ st, en *SourcePos
+}
+
+func (n *optionNamePartNode) start() *SourcePos {
+ if n.isExtension {
+ return n.basicCompositeNode.start()
+ }
+ return n.st
+}
+
+func (n *optionNamePartNode) end() *SourcePos {
+ if n.isExtension {
+ return n.basicCompositeNode.end()
+ }
+ return n.en
+}
+
+func (n *optionNamePartNode) setRange(first, last node) {
+ n.basicCompositeNode.setRange(first, last)
+ if !n.isExtension {
+ st := *first.start()
+ st.Col += n.offset
+ n.st = &st
+ en := st
+ en.Col += n.length
+ n.en = &en
+ }
+}
+
+type valueNode interface {
+ node
+ value() interface{}
+}
+
+var _ valueNode = (*identNode)(nil)
+var _ valueNode = (*compoundIdentNode)(nil)
+var _ valueNode = (*stringLiteralNode)(nil)
+var _ valueNode = (*compoundStringNode)(nil)
+var _ valueNode = (*intLiteralNode)(nil)
+var _ valueNode = (*compoundIntNode)(nil)
+var _ valueNode = (*compoundUintNode)(nil)
+var _ valueNode = (*floatLiteralNode)(nil)
+var _ valueNode = (*compoundFloatNode)(nil)
+var _ valueNode = (*boolLiteralNode)(nil)
+var _ valueNode = (*sliceLiteralNode)(nil)
+var _ valueNode = (*aggregateLiteralNode)(nil)
+var _ valueNode = (*noSourceNode)(nil)
+
+type stringLiteralNode struct {
+ basicNode
+ val string
+}
+
+func (n *stringLiteralNode) value() interface{} {
+ return n.val
+}
+
+type compoundStringNode struct {
+ basicCompositeNode
+ val string
+}
+
+func (n *compoundStringNode) value() interface{} {
+ return n.val
+}
+
+type intLiteralNode struct {
+ basicNode
+ val uint64
+}
+
+func (n *intLiteralNode) value() interface{} {
+ return n.val
+}
+
+type compoundUintNode struct {
+ basicCompositeNode
+ val uint64
+}
+
+func (n *compoundUintNode) value() interface{} {
+ return n.val
+}
+
+type compoundIntNode struct {
+ basicCompositeNode
+ val int64
+}
+
+func (n *compoundIntNode) value() interface{} {
+ return n.val
+}
+
+type floatLiteralNode struct {
+ basicNode
+ val float64
+}
+
+func (n *floatLiteralNode) value() interface{} {
+ return n.val
+}
+
+type compoundFloatNode struct {
+ basicCompositeNode
+ val float64
+}
+
+func (n *compoundFloatNode) value() interface{} {
+ return n.val
+}
+
+type boolLiteralNode struct {
+ *identNode
+ val bool
+}
+
+func (n *boolLiteralNode) value() interface{} {
+ return n.val
+}
+
+type sliceLiteralNode struct {
+ basicCompositeNode
+ elements []valueNode
+}
+
+func (n *sliceLiteralNode) value() interface{} {
+ return n.elements
+}
+
+type aggregateLiteralNode struct {
+ basicCompositeNode
+ elements []*aggregateEntryNode
+}
+
+func (n *aggregateLiteralNode) value() interface{} {
+ return n.elements
+}
+
+type aggregateEntryNode struct {
+ basicCompositeNode
+ name *aggregateNameNode
+ val valueNode
+}
+
+type aggregateNameNode struct {
+ basicCompositeNode
+ name *compoundIdentNode
+ isExtension bool
+}
+
+func (a *aggregateNameNode) value() string {
+ if a.isExtension {
+ return "[" + a.name.val + "]"
+ } else {
+ return a.name.val
+ }
+}
+
+type fieldNode struct {
+ basicCompositeNode
+ label fieldLabel
+ fldType *compoundIdentNode
+ name *identNode
+ tag *intLiteralNode
+ options *compactOptionsNode
+
+ // This field is populated after parsing, to allow lookup of extendee source
+ // locations when field extendees cannot be linked. (Otherwise, this is just
+ // stored as a string in the field descriptors defined inside the extend
+ // block).
+ extendee *extendNode
+}
+
+func (n *fieldNode) fieldLabel() node {
+ // proto3 fields and fields inside one-ofs will not have a label and we need
+ // this check in order to return a nil node -- otherwise we'd return a
+ // non-nil node that has a nil pointer value in it :/
+ if n.label.identNode == nil {
+ return nil
+ }
+ return n.label.identNode
+}
+
+func (n *fieldNode) fieldName() node {
+ return n.name
+}
+
+func (n *fieldNode) fieldType() node {
+ return n.fldType
+}
+
+func (n *fieldNode) fieldTag() node {
+ return n.tag
+}
+
+func (n *fieldNode) fieldExtendee() node {
+ if n.extendee != nil {
+ return n.extendee.extendee
+ }
+ return nil
+}
+
+func (n *fieldNode) getGroupKeyword() node {
+ return nil
+}
+
+type fieldLabel struct {
+ *identNode
+ repeated bool
+ required bool
+}
+
+type groupNode struct {
+ basicCompositeNode
+ groupKeyword *identNode
+ label fieldLabel
+ name *identNode
+ tag *intLiteralNode
+ decls []*messageElement
+
+ // This field is populated after parsing, to allow lookup of extendee source
+ // locations when field extendees cannot be linked. (Otherwise, this is just
+ // stored as a string in the field descriptors defined inside the extend
+ // block).
+ extendee *extendNode
+}
+
+func (n *groupNode) fieldLabel() node {
+ if n.label.identNode == nil {
+ // return nil interface to indicate absence, not a typed nil
+ return nil
+ }
+ return n.label.identNode
+}
+
+func (n *groupNode) fieldName() node {
+ return n.name
+}
+
+func (n *groupNode) fieldType() node {
+ return n.groupKeyword
+}
+
+func (n *groupNode) fieldTag() node {
+ return n.tag
+}
+
+func (n *groupNode) fieldExtendee() node {
+ if n.extendee != nil {
+ return n.extendee.extendee
+ }
+ return nil
+}
+
+func (n *groupNode) getGroupKeyword() node {
+ return n.groupKeyword
+}
+
+func (n *groupNode) messageName() node {
+ return n.name
+}
+
+type oneOfNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*oneOfElement
+}
+
+type oneOfElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ field *fieldNode
+ group *groupNode
+ empty *basicNode
+}
+
+func (n *oneOfElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *oneOfElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *oneOfElement) leadingComments() []comment {
+ return n.get().leadingComments()
+}
+
+func (n *oneOfElement) trailingComments() []comment {
+ return n.get().trailingComments()
+}
+
+func (n *oneOfElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.field != nil:
+ return n.field
+ default:
+ return n.empty
+ }
+}
+
+type mapTypeNode struct {
+ basicCompositeNode
+ mapKeyword *identNode
+ keyType *identNode
+ valueType *compoundIdentNode
+}
+
+type mapFieldNode struct {
+ basicCompositeNode
+ mapType *mapTypeNode
+ name *identNode
+ tag *intLiteralNode
+ options *compactOptionsNode
+}
+
+func (n *mapFieldNode) fieldLabel() node {
+ return nil
+}
+
+func (n *mapFieldNode) fieldName() node {
+ return n.name
+}
+
+func (n *mapFieldNode) fieldType() node {
+ return n.mapType
+}
+
+func (n *mapFieldNode) fieldTag() node {
+ return n.tag
+}
+
+func (n *mapFieldNode) fieldExtendee() node {
+ return nil
+}
+
+func (n *mapFieldNode) getGroupKeyword() node {
+ return nil
+}
+
+func (n *mapFieldNode) messageName() node {
+ return n.name
+}
+
+func (n *mapFieldNode) keyField() *syntheticMapField {
+ k := n.mapType.keyType
+ t := &compoundIdentNode{val: k.val}
+ t.setRange(k, k)
+ return newSyntheticMapField(t, 1)
+}
+
+func (n *mapFieldNode) valueField() *syntheticMapField {
+ return newSyntheticMapField(n.mapType.valueType, 2)
+}
+
+func newSyntheticMapField(ident *compoundIdentNode, tagNum uint64) *syntheticMapField {
+ tag := &intLiteralNode{
+ basicNode: basicNode{
+ posRange: posRange{start: *ident.start(), end: *ident.end()},
+ },
+ val: tagNum,
+ }
+ return &syntheticMapField{ident: ident, tag: tag}
+}
+
+type syntheticMapField struct {
+ ident *compoundIdentNode
+ tag *intLiteralNode
+}
+
+func (n *syntheticMapField) start() *SourcePos {
+ return n.ident.start()
+}
+
+func (n *syntheticMapField) end() *SourcePos {
+ return n.ident.end()
+}
+
+func (n *syntheticMapField) leadingComments() []comment {
+ return nil
+}
+
+func (n *syntheticMapField) trailingComments() []comment {
+ return nil
+}
+
+func (n *syntheticMapField) fieldLabel() node {
+ return n.ident
+}
+
+func (n *syntheticMapField) fieldName() node {
+ return n.ident
+}
+
+func (n *syntheticMapField) fieldType() node {
+ return n.ident
+}
+
+func (n *syntheticMapField) fieldTag() node {
+ return n.tag
+}
+
+func (n *syntheticMapField) fieldExtendee() node {
+ return nil
+}
+
+func (n *syntheticMapField) getGroupKeyword() node {
+ return nil
+}
+
+type extensionRangeNode struct {
+ basicCompositeNode
+ ranges []*rangeNode
+ options *compactOptionsNode
+}
+
+type rangeNode struct {
+ basicCompositeNode
+ stNode, enNode node
+ st, en int32
+}
+
+func (n *rangeNode) rangeStart() node {
+ return n.stNode
+}
+
+func (n *rangeNode) rangeEnd() node {
+ return n.enNode
+}
+
+type reservedNode struct {
+ basicCompositeNode
+ ranges []*rangeNode
+ names []*compoundStringNode
+}
+
+type enumNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*enumElement
+}
+
+type enumElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ value *enumValueNode
+ reserved *reservedNode
+ empty *basicNode
+}
+
+func (n *enumElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *enumElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *enumElement) leadingComments() []comment {
+ return n.get().leadingComments()
+}
+
+func (n *enumElement) trailingComments() []comment {
+ return n.get().trailingComments()
+}
+
+func (n *enumElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.value != nil:
+ return n.value
+ default:
+ return n.empty
+ }
+}
+
+type enumValueNode struct {
+ basicCompositeNode
+ name *identNode
+ options *compactOptionsNode
+ number *compoundIntNode
+}
+
+func (n *enumValueNode) getName() node {
+ return n.name
+}
+
+func (n *enumValueNode) getNumber() node {
+ return n.number
+}
+
+type messageNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*messageElement
+}
+
+func (n *messageNode) messageName() node {
+ return n.name
+}
+
+type messageElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ field *fieldNode
+ mapField *mapFieldNode
+ oneOf *oneOfNode
+ group *groupNode
+ nested *messageNode
+ enum *enumNode
+ extend *extendNode
+ extensionRange *extensionRangeNode
+ reserved *reservedNode
+ empty *basicNode
+}
+
+func (n *messageElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *messageElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *messageElement) leadingComments() []comment {
+ return n.get().leadingComments()
+}
+
+func (n *messageElement) trailingComments() []comment {
+ return n.get().trailingComments()
+}
+
+func (n *messageElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.field != nil:
+ return n.field
+ case n.mapField != nil:
+ return n.mapField
+ case n.oneOf != nil:
+ return n.oneOf
+ case n.group != nil:
+ return n.group
+ case n.nested != nil:
+ return n.nested
+ case n.enum != nil:
+ return n.enum
+ case n.extend != nil:
+ return n.extend
+ case n.extensionRange != nil:
+ return n.extensionRange
+ case n.reserved != nil:
+ return n.reserved
+ default:
+ return n.empty
+ }
+}
+
+type extendNode struct {
+ basicCompositeNode
+ extendee *compoundIdentNode
+ decls []*extendElement
+}
+
+type extendElement struct {
+ // a discriminated union: only one field will be set
+ field *fieldNode
+ group *groupNode
+ empty *basicNode
+}
+
+func (n *extendElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *extendElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *extendElement) leadingComments() []comment {
+ return n.get().leadingComments()
+}
+
+func (n *extendElement) trailingComments() []comment {
+ return n.get().trailingComments()
+}
+
+func (n *extendElement) get() node {
+ switch {
+ case n.field != nil:
+ return n.field
+ case n.group != nil:
+ return n.group
+ default:
+ return n.empty
+ }
+}
+
+type serviceNode struct {
+ basicCompositeNode
+ name *identNode
+ decls []*serviceElement
+}
+
+type serviceElement struct {
+ // a discriminated union: only one field will be set
+ option *optionNode
+ rpc *methodNode
+ empty *basicNode
+}
+
+func (n *serviceElement) start() *SourcePos {
+ return n.get().start()
+}
+
+func (n *serviceElement) end() *SourcePos {
+ return n.get().end()
+}
+
+func (n *serviceElement) leadingComments() []comment {
+ return n.get().leadingComments()
+}
+
+func (n *serviceElement) trailingComments() []comment {
+ return n.get().trailingComments()
+}
+
+func (n *serviceElement) get() node {
+ switch {
+ case n.option != nil:
+ return n.option
+ case n.rpc != nil:
+ return n.rpc
+ default:
+ return n.empty
+ }
+}
+
+type methodNode struct {
+ basicCompositeNode
+ name *identNode
+ input *rpcTypeNode
+ output *rpcTypeNode
+ options []*optionNode
+}
+
+func (n *methodNode) getInputType() node {
+ return n.input.msgType
+}
+
+func (n *methodNode) getOutputType() node {
+ return n.output.msgType
+}
+
+type rpcTypeNode struct {
+ basicCompositeNode
+ msgType *compoundIdentNode
+ streamKeyword node
+}
+
+type noSourceNode struct {
+ pos *SourcePos
+}
+
+func (n noSourceNode) start() *SourcePos {
+ return n.pos
+}
+
+func (n noSourceNode) end() *SourcePos {
+ return n.pos
+}
+
+func (n noSourceNode) leadingComments() []comment {
+ return nil
+}
+
+func (n noSourceNode) trailingComments() []comment {
+ return nil
+}
+
+func (n noSourceNode) getSyntax() node {
+ return n
+}
+
+func (n noSourceNode) getName() node {
+ return n
+}
+
+func (n noSourceNode) getValue() valueNode {
+ return n
+}
+
+func (n noSourceNode) fieldLabel() node {
+ return n
+}
+
+func (n noSourceNode) fieldName() node {
+ return n
+}
+
+func (n noSourceNode) fieldType() node {
+ return n
+}
+
+func (n noSourceNode) fieldTag() node {
+ return n
+}
+
+func (n noSourceNode) fieldExtendee() node {
+ return n
+}
+
+func (n noSourceNode) getGroupKeyword() node {
+ return n
+}
+
+func (n noSourceNode) rangeStart() node {
+ return n
+}
+
+func (n noSourceNode) rangeEnd() node {
+ return n
+}
+
+func (n noSourceNode) getNumber() node {
+ return n
+}
+
+func (n noSourceNode) messageName() node {
+ return n
+}
+
+func (n noSourceNode) getInputType() node {
+ return n
+}
+
+func (n noSourceNode) getOutputType() node {
+ return n
+}
+
+func (n noSourceNode) value() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
new file mode 100644
index 0000000..c6446d3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
@@ -0,0 +1,10 @@
+// Package protoparse provides functionality for parsing *.proto source files
+// into descriptors that can be used with other protoreflect packages, like
+// dynamic messages and dynamic GRPC clients.
+//
+// This package links in other packages that include compiled descriptors for
+// the various "google/protobuf/*.proto" files that are included with protoc.
+// That way, like when invoking protoc, programs need not supply copies of these
+// "builtin" files. Though if copies of the files are provided, they will be
+// used instead of the builtin descriptors.
+package protoparse
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go
new file mode 100644
index 0000000..07a6214
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go
@@ -0,0 +1,103 @@
+package protoparse
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrInvalidSource is a sentinel error that is returned by calls to
+// Parser.ParseFiles and Parser.ParseFilesButDoNotLink in the event that syntax
+// or link errors are encountered, but the parser's configured ErrorReporter
+// always returns nil.
+var ErrInvalidSource = errors.New("parse failed: invalid proto source")
+
+// ErrorReporter is responsible for reporting the given error. If the reporter
+// returns a non-nil error, parsing/linking will abort with that error. If the
+// reporter returns nil, parsing will continue, allowing the parser to try to
+// report as many syntax and/or link errors as it can find.
+type ErrorReporter func(err ErrorWithPos) error
+
+func defaultErrorReporter(err ErrorWithPos) error {
+ // abort parsing after first error encountered
+ return err
+}
+
+type errorHandler struct {
+ reporter ErrorReporter
+ errsReported bool
+ err error
+}
+
+func newErrorHandler(reporter ErrorReporter) *errorHandler {
+ if reporter == nil {
+ reporter = defaultErrorReporter
+ }
+ return &errorHandler{
+ reporter: reporter,
+ }
+}
+
+func (h *errorHandler) handleError(err error) error {
+ if h.err != nil {
+ return h.err
+ }
+ if ewp, ok := err.(ErrorWithPos); ok {
+ h.errsReported = true
+ err = h.reporter(ewp)
+ }
+ h.err = err
+ return err
+}
+
+func (h *errorHandler) getError() error {
+ if h.errsReported && h.err == nil {
+ return ErrInvalidSource
+ }
+ return h.err
+}
+
+// ErrorWithPos is an error about a proto source file that includes information
+// about the location in the file that caused the error.
+//
+// The value of Error() will contain both the SourcePos and Underlying error.
+// The value of Unwrap() will only be the Underlying error.
+type ErrorWithPos interface {
+ error
+ GetPosition() SourcePos
+ Unwrap() error
+}
+
+// ErrorWithSourcePos is an error about a proto source file that includes
+// information about the location in the file that caused the error.
+//
+// Errors that include source location information *might* be of this type.
+// However, calling code that is trying to examine errors with location info
+// should instead look for instances of the ErrorWithPos interface, which
+// will find other kinds of errors. This type is only exported for backwards
+// compatibility.
+type ErrorWithSourcePos struct {
+ Underlying error
+ Pos *SourcePos
+}
+
+// Error implements the error interface
+func (e ErrorWithSourcePos) Error() string {
+ if e.Pos.Line <= 0 || e.Pos.Col <= 0 {
+ return fmt.Sprintf("%s: %v", e.Pos.Filename, e.Underlying)
+ }
+ return fmt.Sprintf("%s:%d:%d: %v", e.Pos.Filename, e.Pos.Line, e.Pos.Col, e.Underlying)
+}
+
+// GetPosition implements the ErrorWithPos interface, supplying a location in
+// proto source that caused the error.
+func (e ErrorWithSourcePos) GetPosition() SourcePos {
+ return *e.Pos
+}
+
+// Unwrap implements the ErrorWithPos interface, supplying the underlying
+// error. This error will not include location information.
+func (e ErrorWithSourcePos) Unwrap() error {
+ return e.Underlying
+}
+
+var _ ErrorWithPos = ErrorWithSourcePos{}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go
new file mode 100644
index 0000000..b0b5f5b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go
@@ -0,0 +1,758 @@
+package protoparse
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type runeReader struct {
+ rr *bufio.Reader
+ unread []rune
+ err error
+}
+
+func (rr *runeReader) readRune() (r rune, size int, err error) {
+ if rr.err != nil {
+ return 0, 0, rr.err
+ }
+ if len(rr.unread) > 0 {
+ r := rr.unread[len(rr.unread)-1]
+ rr.unread = rr.unread[:len(rr.unread)-1]
+ return r, utf8.RuneLen(r), nil
+ }
+ r, sz, err := rr.rr.ReadRune()
+ if err != nil {
+ rr.err = err
+ }
+ return r, sz, err
+}
+
+func (rr *runeReader) unreadRune(r rune) {
+ rr.unread = append(rr.unread, r)
+}
+
+func lexError(l protoLexer, pos *SourcePos, err string) {
+ pl := l.(*protoLex)
+ _ = pl.errs.handleError(ErrorWithSourcePos{Underlying: errors.New(err), Pos: pos})
+}
+
+type protoLex struct {
+ filename string
+ input *runeReader
+ errs *errorHandler
+ res *fileNode
+
+ lineNo int
+ colNo int
+ offset int
+
+ prevSym terminalNode
+
+ prevLineNo int
+ prevColNo int
+ prevOffset int
+ comments []comment
+}
+
+func newTestLexer(in io.Reader) *protoLex {
+ return newLexer(in, "test.proto", newErrorHandler(nil))
+}
+
+func newLexer(in io.Reader, filename string, errs *errorHandler) *protoLex {
+ return &protoLex{
+ input: &runeReader{rr: bufio.NewReader(in)},
+ filename: filename,
+ errs: errs,
+ }
+}
+
+var keywords = map[string]int{
+ "syntax": _SYNTAX,
+ "import": _IMPORT,
+ "weak": _WEAK,
+ "public": _PUBLIC,
+ "package": _PACKAGE,
+ "option": _OPTION,
+ "true": _TRUE,
+ "false": _FALSE,
+ "inf": _INF,
+ "nan": _NAN,
+ "repeated": _REPEATED,
+ "optional": _OPTIONAL,
+ "required": _REQUIRED,
+ "double": _DOUBLE,
+ "float": _FLOAT,
+ "int32": _INT32,
+ "int64": _INT64,
+ "uint32": _UINT32,
+ "uint64": _UINT64,
+ "sint32": _SINT32,
+ "sint64": _SINT64,
+ "fixed32": _FIXED32,
+ "fixed64": _FIXED64,
+ "sfixed32": _SFIXED32,
+ "sfixed64": _SFIXED64,
+ "bool": _BOOL,
+ "string": _STRING,
+ "bytes": _BYTES,
+ "group": _GROUP,
+ "oneof": _ONEOF,
+ "map": _MAP,
+ "extensions": _EXTENSIONS,
+ "to": _TO,
+ "max": _MAX,
+ "reserved": _RESERVED,
+ "enum": _ENUM,
+ "message": _MESSAGE,
+ "extend": _EXTEND,
+ "service": _SERVICE,
+ "rpc": _RPC,
+ "stream": _STREAM,
+ "returns": _RETURNS,
+}
+
+func (l *protoLex) cur() SourcePos {
+ return SourcePos{
+ Filename: l.filename,
+ Offset: l.offset,
+ Line: l.lineNo + 1,
+ Col: l.colNo + 1,
+ }
+}
+
+func (l *protoLex) adjustPos(consumedChars ...rune) {
+ for _, c := range consumedChars {
+ switch c {
+ case '\n':
+ // new line, back to first column
+ l.colNo = 0
+ l.lineNo++
+ case '\r':
+ // no adjustment
+ case '\t':
+ // advance to next tab stop
+ mod := l.colNo % 8
+ l.colNo += 8 - mod
+ default:
+ l.colNo++
+ }
+ }
+}
+
+func (l *protoLex) prev() *SourcePos {
+ if l.prevSym == nil {
+ return &SourcePos{
+ Filename: l.filename,
+ Offset: 0,
+ Line: 1,
+ Col: 1,
+ }
+ }
+ return l.prevSym.start()
+}
+
+func (l *protoLex) Lex(lval *protoSymType) int {
+ if l.errs.err != nil {
+ // if error reporter already returned non-nil error,
+ // we can skip the rest of the input
+ return 0
+ }
+
+ l.prevLineNo = l.lineNo
+ l.prevColNo = l.colNo
+ l.prevOffset = l.offset
+ l.comments = nil
+
+ for {
+ c, n, err := l.input.readRune()
+ if err == io.EOF {
+ // we're not actually returning a rune, but this will associate
+ // accumulated comments as a trailing comment on last symbol
+ // (if appropriate)
+ l.setRune(lval)
+ return 0
+ } else if err != nil {
+ // we don't call setError because we don't want it wrapped
+ // with a source position because it's I/O, not syntax
+ lval.err = err
+ _ = l.errs.handleError(err)
+ return _ERROR
+ }
+
+ l.prevLineNo = l.lineNo
+ l.prevColNo = l.colNo
+ l.prevOffset = l.offset
+
+ l.offset += n
+ l.adjustPos(c)
+ if strings.ContainsRune("\n\r\t ", c) {
+ continue
+ }
+
+ if c == '.' {
+ // decimal literals could start with a dot
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.setRune(lval)
+ return int(c)
+ }
+ if cn >= '0' && cn <= '9' {
+ l.adjustPos(cn)
+ token := []rune{c, cn}
+ token = l.readNumber(token, false, true)
+ f, err := strconv.ParseFloat(string(token), 64)
+ if err != nil {
+ l.setError(lval, err)
+ return _ERROR
+ }
+ l.setFloat(lval, f)
+ return _FLOAT_LIT
+ }
+ l.input.unreadRune(cn)
+ l.setRune(lval)
+ return int(c)
+ }
+
+ if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') {
+ // identifier
+ token := []rune{c}
+ token = l.readIdentifier(token)
+ str := string(token)
+ if t, ok := keywords[str]; ok {
+ l.setIdent(lval, str)
+ return t
+ }
+ l.setIdent(lval, str)
+ return _NAME
+ }
+
+ if c >= '0' && c <= '9' {
+ // integer or float literal
+ if c == '0' {
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.setInt(lval, 0)
+ return _INT_LIT
+ }
+ if cn == 'x' || cn == 'X' {
+ cnn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(cn)
+ l.setInt(lval, 0)
+ return _INT_LIT
+ }
+ if (cnn >= '0' && cnn <= '9') || (cnn >= 'a' && cnn <= 'f') || (cnn >= 'A' && cnn <= 'F') {
+ // hexadecimal!
+ l.adjustPos(cn, cnn)
+ token := []rune{cnn}
+ token = l.readHexNumber(token)
+ ui, err := strconv.ParseUint(string(token), 16, 64)
+ if err != nil {
+ l.setError(lval, err)
+ return _ERROR
+ }
+ l.setInt(lval, ui)
+ return _INT_LIT
+ }
+ l.input.unreadRune(cnn)
+ l.input.unreadRune(cn)
+ l.setInt(lval, 0)
+ return _INT_LIT
+ } else {
+ l.input.unreadRune(cn)
+ }
+ }
+ token := []rune{c}
+ token = l.readNumber(token, true, true)
+ numstr := string(token)
+ if strings.Contains(numstr, ".") || strings.Contains(numstr, "e") || strings.Contains(numstr, "E") {
+ // floating point!
+ f, err := strconv.ParseFloat(numstr, 64)
+ if err != nil {
+ l.setError(lval, err)
+ return _ERROR
+ }
+ l.setFloat(lval, f)
+ return _FLOAT_LIT
+ }
+ // integer! (decimal or octal)
+ ui, err := strconv.ParseUint(numstr, 0, 64)
+ if err != nil {
+ l.setError(lval, err)
+ return _ERROR
+ }
+ l.setInt(lval, ui)
+ return _INT_LIT
+ }
+
+ if c == '\'' || c == '"' {
+ // string literal
+ str, err := l.readStringLiteral(c)
+ if err != nil {
+ l.setError(lval, err)
+ return _ERROR
+ }
+ l.setString(lval, str)
+ return _STRING_LIT
+ }
+
+ if c == '/' {
+ // comment
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.setRune(lval)
+ return int(c)
+ }
+ if cn == '/' {
+ l.adjustPos(cn)
+ hitNewline, txt := l.skipToEndOfLineComment()
+ commentPos := l.posRange()
+ commentPos.end.Col++
+ if hitNewline {
+ // we don't do this inside of skipToEndOfLineComment
+ // because we want to know the length of previous
+ // line for calculation above
+ l.adjustPos('\n')
+ }
+ l.comments = append(l.comments, comment{posRange: commentPos, text: txt})
+ continue
+ }
+ if cn == '*' {
+ l.adjustPos(cn)
+ if txt, ok := l.skipToEndOfBlockComment(); !ok {
+ l.setError(lval, errors.New("block comment never terminates, unexpected EOF"))
+ return _ERROR
+ } else {
+ l.comments = append(l.comments, comment{posRange: l.posRange(), text: txt})
+ }
+ continue
+ }
+ l.input.unreadRune(cn)
+ }
+
+ l.setRune(lval)
+ return int(c)
+ }
+}
+
+func (l *protoLex) posRange() posRange {
+ return posRange{
+ start: SourcePos{
+ Filename: l.filename,
+ Offset: l.prevOffset,
+ Line: l.prevLineNo + 1,
+ Col: l.prevColNo + 1,
+ },
+ end: l.cur(),
+ }
+}
+
+func (l *protoLex) newBasicNode() basicNode {
+ return basicNode{
+ posRange: l.posRange(),
+ leading: l.comments,
+ }
+}
+
+func (l *protoLex) setPrev(n terminalNode) {
+ nStart := n.start().Line
+ if _, ok := n.(*basicNode); ok {
+ // if the node is a simple rune, don't attribute comments to it
+ // HACK: adjusting the start line makes leading comments appear
+ // detached so logic below will naturally associated trailing
+ // comment to previous symbol
+ nStart += 2
+ }
+ if l.prevSym != nil && len(n.leadingComments()) > 0 && l.prevSym.end().Line < nStart {
+ // we may need to re-attribute the first comment to
+ // instead be previous node's trailing comment
+ prevEnd := l.prevSym.end().Line
+ comments := n.leadingComments()
+ c := comments[0]
+ commentStart := c.start.Line
+ if commentStart == prevEnd {
+ // comment is on same line as previous symbol
+ n.popLeadingComment()
+ l.prevSym.pushTrailingComment(c)
+ } else if commentStart == prevEnd+1 {
+ // comment is right after previous symbol; see if it is detached
+ // and if so re-attribute
+ singleLineStyle := strings.HasPrefix(c.text, "//")
+ line := c.end.Line
+ groupEnd := -1
+ for i := 1; i < len(comments); i++ {
+ c := comments[i]
+ newGroup := false
+ if !singleLineStyle || c.start.Line > line+1 {
+ // we've found a gap between comments, which means the
+ // previous comments were detached
+ newGroup = true
+ } else {
+ line = c.end.Line
+ singleLineStyle = strings.HasPrefix(comments[i].text, "//")
+ if !singleLineStyle {
+ // we've found a switch from // comments to /*
+ // consider that a new group which means the
+ // previous comments were detached
+ newGroup = true
+ }
+ }
+ if newGroup {
+ groupEnd = i
+ break
+ }
+ }
+
+ if groupEnd == -1 {
+ // just one group of comments; we'll mark it as a trailing
+ // comment if it immediately follows previous symbol and is
+ // detached from current symbol
+ c1 := comments[0]
+ c2 := comments[len(comments)-1]
+ if c1.start.Line <= prevEnd+1 && c2.end.Line < nStart-1 {
+ groupEnd = len(comments)
+ }
+ }
+
+ for i := 0; i < groupEnd; i++ {
+ l.prevSym.pushTrailingComment(n.popLeadingComment())
+ }
+ }
+ }
+
+ l.prevSym = n
+}
+
+func (l *protoLex) setString(lval *protoSymType, val string) {
+ lval.s = &stringLiteralNode{basicNode: l.newBasicNode(), val: val}
+ l.setPrev(lval.s)
+}
+
+func (l *protoLex) setIdent(lval *protoSymType, val string) {
+ lval.id = &identNode{basicNode: l.newBasicNode(), val: val}
+ l.setPrev(lval.id)
+}
+
+func (l *protoLex) setInt(lval *protoSymType, val uint64) {
+ lval.i = &intLiteralNode{basicNode: l.newBasicNode(), val: val}
+ l.setPrev(lval.i)
+}
+
+func (l *protoLex) setFloat(lval *protoSymType, val float64) {
+ lval.f = &floatLiteralNode{basicNode: l.newBasicNode(), val: val}
+ l.setPrev(lval.f)
+}
+
+func (l *protoLex) setRune(lval *protoSymType) {
+ b := l.newBasicNode()
+ lval.b = &b
+ l.setPrev(lval.b)
+}
+
+func (l *protoLex) setError(lval *protoSymType, err error) {
+ lval.err = l.addSourceError(err)
+}
+
+func (l *protoLex) readNumber(sofar []rune, allowDot bool, allowExp bool) []rune {
+ token := sofar
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ break
+ }
+ if c == '.' {
+ if !allowDot {
+ l.input.unreadRune(c)
+ break
+ }
+ allowDot = false
+ } else if c == 'e' || c == 'E' {
+ if !allowExp {
+ l.input.unreadRune(c)
+ break
+ }
+ allowExp = false
+ cn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(c)
+ break
+ }
+ if cn == '-' || cn == '+' {
+ cnn, _, err := l.input.readRune()
+ if err != nil {
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ if cnn < '0' || cnn > '9' {
+ l.input.unreadRune(cnn)
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ l.adjustPos(c)
+ token = append(token, c)
+ c, cn = cn, cnn
+ } else if cn < '0' || cn > '9' {
+ l.input.unreadRune(cn)
+ l.input.unreadRune(c)
+ break
+ }
+ l.adjustPos(c)
+ token = append(token, c)
+ c = cn
+ } else if c < '0' || c > '9' {
+ l.input.unreadRune(c)
+ break
+ }
+ l.adjustPos(c)
+ token = append(token, c)
+ }
+ return token
+}
+
+func (l *protoLex) readHexNumber(sofar []rune) []rune {
+ token := sofar
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ break
+ }
+ if (c < 'a' || c > 'f') && (c < 'A' || c > 'F') && (c < '0' || c > '9') {
+ l.input.unreadRune(c)
+ break
+ }
+ l.adjustPos(c)
+ token = append(token, c)
+ }
+ return token
+}
+
+func (l *protoLex) readIdentifier(sofar []rune) []rune {
+ token := sofar
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ break
+ }
+ if c != '_' && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') {
+ l.input.unreadRune(c)
+ break
+ }
+ l.adjustPos(c)
+ token = append(token, c)
+ }
+ return token
+}
+
+func (l *protoLex) readStringLiteral(quote rune) (string, error) {
+ var buf bytes.Buffer
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return "", err
+ }
+ if c == '\n' {
+ return "", errors.New("encountered end-of-line before end of string literal")
+ }
+ l.adjustPos(c)
+ if c == quote {
+ break
+ }
+ if c == 0 {
+ return "", errors.New("null character ('\\0') not allowed in string literal")
+ }
+ if c == '\\' {
+ // escape sequence
+ c, _, err = l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.adjustPos(c)
+ if c == 'x' || c == 'X' {
+ // hex escape
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.adjustPos(c)
+ c2, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ var hex string
+ if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') {
+ l.input.unreadRune(c2)
+ hex = string(c)
+ } else {
+ l.adjustPos(c2)
+ hex = string([]rune{c, c2})
+ }
+ i, err := strconv.ParseInt(hex, 16, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid hex escape: \\x%q", hex)
+ }
+ buf.WriteByte(byte(i))
+
+ } else if c >= '0' && c <= '7' {
+ // octal escape
+ c2, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ var octal string
+ if c2 < '0' || c2 > '7' {
+ l.input.unreadRune(c2)
+ octal = string(c)
+ } else {
+ l.adjustPos(c2)
+ c3, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ if c3 < '0' || c3 > '7' {
+ l.input.unreadRune(c3)
+ octal = string([]rune{c, c2})
+ } else {
+ l.adjustPos(c3)
+ octal = string([]rune{c, c2, c3})
+ }
+ }
+ i, err := strconv.ParseInt(octal, 8, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid octal escape: \\%q", octal)
+ }
+ if i > 0xff {
+ return "", fmt.Errorf("octal escape is out range, must be between 0 and 377: \\%q", octal)
+ }
+ buf.WriteByte(byte(i))
+
+ } else if c == 'u' {
+ // short unicode escape
+ u := make([]rune, 4)
+ for i := range u {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.adjustPos(c)
+ u[i] = c
+ }
+ i, err := strconv.ParseInt(string(u), 16, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid unicode escape: \\u%q", string(u))
+ }
+ buf.WriteRune(rune(i))
+
+ } else if c == 'U' {
+ // long unicode escape
+ u := make([]rune, 8)
+ for i := range u {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", err
+ }
+ l.adjustPos(c)
+ u[i] = c
+ }
+ i, err := strconv.ParseInt(string(u), 16, 32)
+ if err != nil {
+ return "", fmt.Errorf("invalid unicode escape: \\U%q", string(u))
+ }
+ if i > 0x10ffff || i < 0 {
+ return "", fmt.Errorf("unicode escape is out of range, must be between 0 and 0x10ffff: \\U%q", string(u))
+ }
+ buf.WriteRune(rune(i))
+
+ } else if c == 'a' {
+ buf.WriteByte('\a')
+ } else if c == 'b' {
+ buf.WriteByte('\b')
+ } else if c == 'f' {
+ buf.WriteByte('\f')
+ } else if c == 'n' {
+ buf.WriteByte('\n')
+ } else if c == 'r' {
+ buf.WriteByte('\r')
+ } else if c == 't' {
+ buf.WriteByte('\t')
+ } else if c == 'v' {
+ buf.WriteByte('\v')
+ } else if c == '\\' {
+ buf.WriteByte('\\')
+ } else if c == '\'' {
+ buf.WriteByte('\'')
+ } else if c == '"' {
+ buf.WriteByte('"')
+ } else if c == '?' {
+ buf.WriteByte('?')
+ } else {
+ return "", fmt.Errorf("invalid escape sequence: %q", "\\"+string(c))
+ }
+ } else {
+ buf.WriteRune(c)
+ }
+ }
+ return buf.String(), nil
+}
+
+func (l *protoLex) skipToEndOfLineComment() (bool, string) {
+ txt := []rune{'/', '/'}
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return false, string(txt)
+ }
+ if c == '\n' {
+ return true, string(append(txt, '\n'))
+ }
+ l.adjustPos(c)
+ txt = append(txt, c)
+ }
+}
+
+func (l *protoLex) skipToEndOfBlockComment() (string, bool) {
+ txt := []rune{'/', '*'}
+ for {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", false
+ }
+ l.adjustPos(c)
+ txt = append(txt, c)
+ if c == '*' {
+ c, _, err := l.input.readRune()
+ if err != nil {
+ return "", false
+ }
+ if c == '/' {
+ l.adjustPos(c)
+ txt = append(txt, c)
+ return string(txt), true
+ }
+ l.input.unreadRune(c)
+ }
+ }
+}
+
+func (l *protoLex) addSourceError(err error) ErrorWithPos {
+ ewp, ok := err.(ErrorWithPos)
+ if !ok {
+ ewp = ErrorWithSourcePos{Pos: l.prev(), Underlying: err}
+ }
+ _ = l.errs.handleError(ewp)
+ return ewp
+}
+
+func (l *protoLex) Error(s string) {
+ _ = l.addSourceError(errors.New(s))
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go
new file mode 100644
index 0000000..a990f4f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go
@@ -0,0 +1,731 @@
+package protoparse
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+type linker struct {
+ files map[string]*parseResult
+ filenames []string
+ errs *errorHandler
+ descriptorPool map[*dpb.FileDescriptorProto]map[string]proto.Message
+ extensions map[string]map[int32]string
+}
+
+func newLinker(files *parseResults, errs *errorHandler) *linker {
+ return &linker{files: files.resultsByFilename, filenames: files.filenames, errs: errs}
+}
+
+func (l *linker) linkFiles() (map[string]*desc.FileDescriptor, error) {
+ // First, we put all symbols into a single pool, which lets us ensure there
+ // are no duplicate symbols and will also let us resolve and revise all type
+ // references in next step.
+ if err := l.createDescriptorPool(); err != nil {
+ return nil, err
+ }
+
+ // After we've populated the pool, we can now try to resolve all type
+ // references. All references must be checked for correct type, any fields
+ // with enum types must be corrected (since we parse them as if they are
+ // message references since we don't actually know message or enum until
+ // link time), and references will be re-written to be fully-qualified
+ // references (e.g. start with a dot ".").
+ if err := l.resolveReferences(); err != nil {
+ return nil, err
+ }
+
+ if err := l.errs.getError(); err != nil {
+ // we won't be able to create real descriptors if we've encountered
+ // errors up to this point, so bail at this point
+ return nil, err
+ }
+
+ // Now we've validated the descriptors, so we can link them into rich
+ // descriptors. This is a little redundant since that step does similar
+ // checking of symbols. But, without breaking encapsulation (e.g. exporting
+ // a lot of fields from desc package that are currently unexported) or
+ // merging this into the same package, we can't really prevent it.
+ linked, err := l.createdLinkedDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ // Now that we have linked descriptors, we can interpret any uninterpreted
+ // options that remain.
+ for _, r := range l.files {
+ fd := linked[r.fd.GetName()]
+ if err := interpretFileOptions(r, richFileDescriptorish{FileDescriptor: fd}); err != nil {
+ return nil, err
+ }
+ }
+
+ return linked, nil
+}
+
+func (l *linker) createDescriptorPool() error {
+ l.descriptorPool = map[*dpb.FileDescriptorProto]map[string]proto.Message{}
+ for _, filename := range l.filenames {
+ r := l.files[filename]
+ fd := r.fd
+ pool := map[string]proto.Message{}
+ l.descriptorPool[fd] = pool
+ prefix := fd.GetPackage()
+ if prefix != "" {
+ prefix += "."
+ }
+ for _, md := range fd.MessageType {
+ if err := addMessageToPool(r, pool, l.errs, prefix, md); err != nil {
+ return err
+ }
+ }
+ for _, fld := range fd.Extension {
+ if err := addFieldToPool(r, pool, l.errs, prefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, ed := range fd.EnumType {
+ if err := addEnumToPool(r, pool, l.errs, prefix, ed); err != nil {
+ return err
+ }
+ }
+ for _, sd := range fd.Service {
+ if err := addServiceToPool(r, pool, l.errs, prefix, sd); err != nil {
+ return err
+ }
+ }
+ }
+ // try putting everything into a single pool, to ensure there are no duplicates
+ // across files (e.g. same symbol, but declared in two different files)
+ type entry struct {
+ file string
+ msg proto.Message
+ }
+ pool := map[string]entry{}
+ for _, filename := range l.filenames {
+ f := l.files[filename].fd
+ p := l.descriptorPool[f]
+ keys := make([]string, 0, len(p))
+ for k := range p {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys) // for deterministic error reporting
+ for _, k := range keys {
+ v := p[k]
+ if e, ok := pool[k]; ok {
+ desc1 := e.msg
+ file1 := e.file
+ desc2 := v
+ file2 := f.GetName()
+ if file2 < file1 {
+ file1, file2 = file2, file1
+ desc1, desc2 = desc2, desc1
+ }
+ node := l.files[file2].nodes[desc2]
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("duplicate symbol %s: already defined as %s in %q", k, descriptorType(desc1), file1)}); err != nil {
+ return err
+ }
+ }
+ pool[k] = entry{file: f.GetName(), msg: v}
+ }
+ }
+
+ return nil
+}
+
+func addMessageToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, prefix string, md *dpb.DescriptorProto) error {
+ fqn := prefix + md.GetName()
+ if err := addToPool(r, pool, errs, fqn, md); err != nil {
+ return err
+ }
+ prefix = fqn + "."
+ for _, fld := range md.Field {
+ if err := addFieldToPool(r, pool, errs, prefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Extension {
+ if err := addFieldToPool(r, pool, errs, prefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, nmd := range md.NestedType {
+ if err := addMessageToPool(r, pool, errs, prefix, nmd); err != nil {
+ return err
+ }
+ }
+ for _, ed := range md.EnumType {
+ if err := addEnumToPool(r, pool, errs, prefix, ed); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addFieldToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, prefix string, fld *dpb.FieldDescriptorProto) error {
+ fqn := prefix + fld.GetName()
+ return addToPool(r, pool, errs, fqn, fld)
+}
+
+func addEnumToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, prefix string, ed *dpb.EnumDescriptorProto) error {
+ fqn := prefix + ed.GetName()
+ if err := addToPool(r, pool, errs, fqn, ed); err != nil {
+ return err
+ }
+ for _, evd := range ed.Value {
+ vfqn := fqn + "." + evd.GetName()
+ if err := addToPool(r, pool, errs, vfqn, evd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addServiceToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, prefix string, sd *dpb.ServiceDescriptorProto) error {
+ fqn := prefix + sd.GetName()
+ if err := addToPool(r, pool, errs, fqn, sd); err != nil {
+ return err
+ }
+ for _, mtd := range sd.Method {
+ mfqn := fqn + "." + mtd.GetName()
+ if err := addToPool(r, pool, errs, mfqn, mtd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, fqn string, dsc proto.Message) error {
+ if d, ok := pool[fqn]; ok {
+ node := r.nodes[dsc]
+ if err := errs.handleError(ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("duplicate symbol %s: already defined as %s", fqn, descriptorType(d))}); err != nil {
+ return err
+ }
+ }
+ pool[fqn] = dsc
+ return nil
+}
+
+func descriptorType(m proto.Message) string {
+ switch m := m.(type) {
+ case *dpb.DescriptorProto:
+ return "message"
+ case *dpb.DescriptorProto_ExtensionRange:
+ return "extension range"
+ case *dpb.FieldDescriptorProto:
+ if m.GetExtendee() == "" {
+ return "field"
+ } else {
+ return "extension"
+ }
+ case *dpb.EnumDescriptorProto:
+ return "enum"
+ case *dpb.EnumValueDescriptorProto:
+ return "enum value"
+ case *dpb.ServiceDescriptorProto:
+ return "service"
+ case *dpb.MethodDescriptorProto:
+ return "method"
+ case *dpb.FileDescriptorProto:
+ return "file"
+ default:
+ // shouldn't be possible
+ return fmt.Sprintf("%T", m)
+ }
+}
+
+func (l *linker) resolveReferences() error {
+ l.extensions = map[string]map[int32]string{}
+ for _, filename := range l.filenames {
+ r := l.files[filename]
+ fd := r.fd
+ prefix := fd.GetPackage()
+ scopes := []scope{fileScope(fd, l)}
+ if prefix != "" {
+ prefix += "."
+ }
+ if fd.Options != nil {
+ if err := l.resolveOptions(r, fd, "file", fd.GetName(), proto.MessageName(fd.Options), fd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ for _, md := range fd.MessageType {
+ if err := l.resolveMessageTypes(r, fd, prefix, md, scopes); err != nil {
+ return err
+ }
+ }
+ for _, fld := range fd.Extension {
+ if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+ return err
+ }
+ }
+ for _, ed := range fd.EnumType {
+ if err := l.resolveEnumTypes(r, fd, prefix, ed, scopes); err != nil {
+ return err
+ }
+ }
+ for _, sd := range fd.Service {
+ if err := l.resolveServiceTypes(r, fd, prefix, sd, scopes); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveEnumTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, ed *dpb.EnumDescriptorProto, scopes []scope) error {
+ enumFqn := prefix + ed.GetName()
+ if ed.Options != nil {
+ if err := l.resolveOptions(r, fd, "enum", enumFqn, proto.MessageName(ed.Options), ed.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ for _, evd := range ed.Value {
+ if evd.Options != nil {
+ evFqn := enumFqn + "." + evd.GetName()
+ if err := l.resolveOptions(r, fd, "enum value", evFqn, proto.MessageName(evd.Options), evd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveMessageTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, md *dpb.DescriptorProto, scopes []scope) error {
+ fqn := prefix + md.GetName()
+ scope := messageScope(fqn, isProto3(fd), l.descriptorPool[fd])
+ scopes = append(scopes, scope)
+ prefix = fqn + "."
+
+ if md.Options != nil {
+ if err := l.resolveOptions(r, fd, "message", fqn, proto.MessageName(md.Options), md.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+
+ for _, nmd := range md.NestedType {
+ if err := l.resolveMessageTypes(r, fd, prefix, nmd, scopes); err != nil {
+ return err
+ }
+ }
+ for _, ned := range md.EnumType {
+ if err := l.resolveEnumTypes(r, fd, prefix, ned, scopes); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Field {
+ if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Extension {
+ if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+ return err
+ }
+ }
+ for _, er := range md.ExtensionRange {
+ if er.Options != nil {
+ erName := fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1)
+ if err := l.resolveOptions(r, fd, "extension range", erName, proto.MessageName(er.Options), er.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveFieldTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto, scopes []scope) error {
+ thisName := prefix + fld.GetName()
+ scope := fmt.Sprintf("field %s", thisName)
+ node := r.getFieldNode(fld)
+ elemType := "field"
+ if fld.GetExtendee() != "" {
+ elemType = "extension"
+ fqn, dsc, _ := l.resolve(fd, fld.GetExtendee(), isMessage, scopes)
+ if dsc == nil {
+ return l.errs.handleError(ErrorWithSourcePos{Pos: node.fieldExtendee().start(), Underlying: fmt.Errorf("unknown extendee type %s", fld.GetExtendee())})
+ }
+ extd, ok := dsc.(*dpb.DescriptorProto)
+ if !ok {
+ otherType := descriptorType(dsc)
+ return l.errs.handleError(ErrorWithSourcePos{Pos: node.fieldExtendee().start(), Underlying: fmt.Errorf("extendee is invalid: %s is a %s, not a message", fqn, otherType)})
+ }
+ fld.Extendee = proto.String("." + fqn)
+ // make sure the tag number is in range
+ found := false
+ tag := fld.GetNumber()
+ for _, rng := range extd.ExtensionRange {
+ if tag >= rng.GetStart() && tag < rng.GetEnd() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.fieldTag().start(), Underlying: fmt.Errorf("%s: tag %d is not in valid range for extended type %s", scope, tag, fqn)}); err != nil {
+ return err
+ }
+ } else {
+ // make sure tag is not a duplicate
+ usedExtTags := l.extensions[fqn]
+ if usedExtTags == nil {
+ usedExtTags = map[int32]string{}
+ l.extensions[fqn] = usedExtTags
+ }
+ if other := usedExtTags[fld.GetNumber()]; other != "" {
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.fieldTag().start(), Underlying: fmt.Errorf("%s: duplicate extension: %s and %s are both using tag %d", scope, other, thisName, fld.GetNumber())}); err != nil {
+ return err
+ }
+ } else {
+ usedExtTags[fld.GetNumber()] = thisName
+ }
+ }
+ }
+
+ if fld.Options != nil {
+ if err := l.resolveOptions(r, fd, elemType, thisName, proto.MessageName(fld.Options), fld.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+
+ if fld.GetTypeName() == "" {
+ // scalar type; no further resolution required
+ return nil
+ }
+
+ fqn, dsc, proto3 := l.resolve(fd, fld.GetTypeName(), isType, scopes)
+ if dsc == nil {
+ return l.errs.handleError(ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: unknown type %s", scope, fld.GetTypeName())})
+ }
+ switch dsc := dsc.(type) {
+ case *dpb.DescriptorProto:
+ fld.TypeName = proto.String("." + fqn)
+ // if type was tentatively unset, we now know it's actually a message
+ if fld.Type == nil {
+ fld.Type = dpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
+ }
+ case *dpb.EnumDescriptorProto:
+ if fld.GetExtendee() == "" && isProto3(fd) && !proto3 {
+ // fields in a proto3 message cannot refer to proto2 enums
+ return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: cannot use proto2 enum %s in a proto3 message", scope, fld.GetTypeName())}
+ }
+ fld.TypeName = proto.String("." + fqn)
+ // the type was tentatively unset, but now we know it's actually an enum
+ fld.Type = dpb.FieldDescriptorProto_TYPE_ENUM.Enum()
+ default:
+ otherType := descriptorType(dsc)
+ return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: invalid type: %s is a %s, not a message or enum", scope, fqn, otherType)}
+ }
+ return nil
+}
+
+func (l *linker) resolveServiceTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, sd *dpb.ServiceDescriptorProto, scopes []scope) error {
+ thisName := prefix + sd.GetName()
+ if sd.Options != nil {
+ if err := l.resolveOptions(r, fd, "service", thisName, proto.MessageName(sd.Options), sd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+
+ for _, mtd := range sd.Method {
+ if mtd.Options != nil {
+ if err := l.resolveOptions(r, fd, "method", thisName+"."+mtd.GetName(), proto.MessageName(mtd.Options), mtd.Options.UninterpretedOption, scopes); err != nil {
+ return err
+ }
+ }
+ scope := fmt.Sprintf("method %s.%s", thisName, mtd.GetName())
+ node := r.getMethodNode(mtd)
+ fqn, dsc, _ := l.resolve(fd, mtd.GetInputType(), isMessage, scopes)
+ if dsc == nil {
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.getInputType().start(), Underlying: fmt.Errorf("%s: unknown request type %s", scope, mtd.GetInputType())}); err != nil {
+ return err
+ }
+ } else if _, ok := dsc.(*dpb.DescriptorProto); !ok {
+ otherType := descriptorType(dsc)
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.getInputType().start(), Underlying: fmt.Errorf("%s: invalid request type: %s is a %s, not a message", scope, fqn, otherType)}); err != nil {
+ return err
+ }
+ } else {
+ mtd.InputType = proto.String("." + fqn)
+ }
+
+ fqn, dsc, _ = l.resolve(fd, mtd.GetOutputType(), isMessage, scopes)
+ if dsc == nil {
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.getOutputType().start(), Underlying: fmt.Errorf("%s: unknown response type %s", scope, mtd.GetOutputType())}); err != nil {
+ return err
+ }
+ } else if _, ok := dsc.(*dpb.DescriptorProto); !ok {
+ otherType := descriptorType(dsc)
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.getOutputType().start(), Underlying: fmt.Errorf("%s: invalid response type: %s is a %s, not a message", scope, fqn, otherType)}); err != nil {
+ return err
+ }
+ } else {
+ mtd.OutputType = proto.String("." + fqn)
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolveOptions(r *parseResult, fd *dpb.FileDescriptorProto, elemType, elemName, optType string, opts []*dpb.UninterpretedOption, scopes []scope) error {
+ var scope string
+ if elemType != "file" {
+ scope = fmt.Sprintf("%s %s: ", elemType, elemName)
+ }
+opts:
+ for _, opt := range opts {
+ for _, nm := range opt.Name {
+ if nm.GetIsExtension() {
+ node := r.getOptionNamePartNode(nm)
+ fqn, dsc, _ := l.resolve(fd, nm.GetNamePart(), isField, scopes)
+ if dsc == nil {
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sunknown extension %s", scope, nm.GetNamePart())}); err != nil {
+ return err
+ }
+ continue opts
+ }
+ if ext, ok := dsc.(*dpb.FieldDescriptorProto); !ok {
+ otherType := descriptorType(dsc)
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sinvalid extension: %s is a %s, not an extension", scope, nm.GetNamePart(), otherType)}); err != nil {
+ return err
+ }
+ continue opts
+ } else if ext.GetExtendee() == "" {
+ if err := l.errs.handleError(ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sinvalid extension: %s is a field but not an extension", scope, nm.GetNamePart())}); err != nil {
+ return err
+ }
+ continue opts
+ }
+ nm.NamePart = proto.String("." + fqn)
+ }
+ }
+ }
+ return nil
+}
+
+func (l *linker) resolve(fd *dpb.FileDescriptorProto, name string, allowed func(proto.Message) bool, scopes []scope) (fqn string, element proto.Message, proto3 bool) {
+ if strings.HasPrefix(name, ".") {
+ // already fully-qualified
+ d, proto3 := l.findSymbol(fd, name[1:], false, map[*dpb.FileDescriptorProto]struct{}{})
+ if d != nil {
+ return name[1:], d, proto3
+ }
+ } else {
+ // unqualified, so we look in the enclosing (last) scope first and move
+ // towards outermost (first) scope, trying to resolve the symbol
+ var bestGuess proto.Message
+ var bestGuessFqn string
+ var bestGuessProto3 bool
+ for i := len(scopes) - 1; i >= 0; i-- {
+ fqn, d, proto3 := scopes[i](name)
+ if d != nil {
+ if allowed(d) {
+ return fqn, d, proto3
+ } else if bestGuess == nil {
+ bestGuess = d
+ bestGuessFqn = fqn
+ bestGuessProto3 = proto3
+ }
+ }
+ }
+ // we return best guess, even though it was not an allowed kind of
+ // descriptor, so caller can print a better error message (e.g.
+ // indicating that the name was found but that it's the wrong type)
+ return bestGuessFqn, bestGuess, bestGuessProto3
+ }
+ return "", nil, false
+}
+
+func isField(m proto.Message) bool {
+ _, ok := m.(*dpb.FieldDescriptorProto)
+ return ok
+}
+
+func isMessage(m proto.Message) bool {
+ _, ok := m.(*dpb.DescriptorProto)
+ return ok
+}
+
+func isType(m proto.Message) bool {
+ switch m.(type) {
+ case *dpb.DescriptorProto, *dpb.EnumDescriptorProto:
+ return true
+ }
+ return false
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(symbol string) (fqn string, element proto.Message, proto3 bool)
+
+func fileScope(fd *dpb.FileDescriptorProto, l *linker) scope {
+ // we search symbols in this file, but also symbols in other files that have
+ // the same package as this file or a "parent" package (in protobuf,
+ // packages are a hierarchy like C++ namespaces)
+ prefixes := internal.CreatePrefixList(fd.GetPackage())
+ return func(name string) (string, proto.Message, bool) {
+ for _, prefix := range prefixes {
+ var n string
+ if prefix == "" {
+ n = name
+ } else {
+ n = prefix + "." + name
+ }
+ d, proto3 := l.findSymbol(fd, n, false, map[*dpb.FileDescriptorProto]struct{}{})
+ if d != nil {
+ return n, d, proto3
+ }
+ }
+ return "", nil, false
+ }
+}
+
+func messageScope(messageName string, proto3 bool, filePool map[string]proto.Message) scope {
+ return func(name string) (string, proto.Message, bool) {
+ n := messageName + "." + name
+ if d, ok := filePool[n]; ok {
+ return n, d, proto3
+ }
+ return "", nil, false
+ }
+}
+
+func (l *linker) findSymbol(fd *dpb.FileDescriptorProto, name string, public bool, checked map[*dpb.FileDescriptorProto]struct{}) (element proto.Message, proto3 bool) {
+ if _, ok := checked[fd]; ok {
+ // already checked this one
+ return nil, false
+ }
+ checked[fd] = struct{}{}
+ d := l.descriptorPool[fd][name]
+ if d != nil {
+ return d, isProto3(fd)
+ }
+
+ // When public = false, we are searching only directly imported symbols. But we
+ // also need to search transitive public imports due to semantics of public imports.
+ if public {
+ for _, depIndex := range fd.PublicDependency {
+ dep := fd.Dependency[depIndex]
+ depres := l.files[dep]
+ if depres == nil {
+ // we'll catch this error later
+ continue
+ }
+ if d, proto3 := l.findSymbol(depres.fd, name, true, checked); d != nil {
+ return d, proto3
+ }
+ }
+ } else {
+ for _, dep := range fd.Dependency {
+ depres := l.files[dep]
+ if depres == nil {
+ // we'll catch this error later
+ continue
+ }
+ if d, proto3 := l.findSymbol(depres.fd, name, true, checked); d != nil {
+ return d, proto3
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func isProto3(fd *dpb.FileDescriptorProto) bool {
+ return fd.GetSyntax() == "proto3"
+}
+
+func (l *linker) createdLinkedDescriptors() (map[string]*desc.FileDescriptor, error) {
+ names := make([]string, 0, len(l.files))
+ for name := range l.files {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ linked := map[string]*desc.FileDescriptor{}
+ for _, name := range names {
+ if _, err := l.linkFile(name, nil, nil, linked); err != nil {
+ return nil, err
+ }
+ }
+ return linked, nil
+}
+
+func (l *linker) linkFile(name string, rootImportLoc *SourcePos, seen []string, linked map[string]*desc.FileDescriptor) (*desc.FileDescriptor, error) {
+ // check for import cycle
+ for _, s := range seen {
+ if name == s {
+ var msg bytes.Buffer
+ first := true
+ for _, s := range seen {
+ if first {
+ first = false
+ } else {
+ msg.WriteString(" -> ")
+ }
+ fmt.Fprintf(&msg, "%q", s)
+ }
+ fmt.Fprintf(&msg, " -> %q", name)
+ return nil, ErrorWithSourcePos{
+ Underlying: fmt.Errorf("cycle found in imports: %s", msg.String()),
+ Pos: rootImportLoc,
+ }
+ }
+ }
+ seen = append(seen, name)
+
+ if lfd, ok := linked[name]; ok {
+ // already linked
+ return lfd, nil
+ }
+ r := l.files[name]
+ if r == nil {
+ importer := seen[len(seen)-2] // len-1 is *this* file, before that is the one that imported it
+ return nil, fmt.Errorf("no descriptor found for %q, imported by %q", name, importer)
+ }
+ var deps []*desc.FileDescriptor
+ if rootImportLoc == nil {
+ // try to find a source location for this "root" import
+ decl := r.getFileNode(r.fd)
+ fnode, ok := decl.(*fileNode)
+ if ok {
+ for _, dep := range fnode.imports {
+ ldep, err := l.linkFile(dep.name.val, dep.name.start(), seen, linked)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, ldep)
+ }
+ } else {
+ // no AST? just use the descriptor
+ for _, dep := range r.fd.Dependency {
+ ldep, err := l.linkFile(dep, decl.start(), seen, linked)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, ldep)
+ }
+ }
+ } else {
+ // we can just use the descriptor since we don't need source location
+ // (we'll just attribute any import cycles found to the "root" import)
+ for _, dep := range r.fd.Dependency {
+ ldep, err := l.linkFile(dep, rootImportLoc, seen, linked)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, ldep)
+ }
+ }
+ lfd, err := desc.CreateFileDescriptor(r.fd, deps...)
+ if err != nil {
+ return nil, fmt.Errorf("error linking %q: %s", name, err)
+ }
+ linked[name] = lfd
+ return lfd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go
new file mode 100644
index 0000000..04530dc
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go
@@ -0,0 +1,1405 @@
+package protoparse
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// NB: To process options, we need descriptors, but we may not have rich
+// descriptors when trying to interpret options for unlinked parsed files.
+// So we define minimal interfaces that can be backed by both rich descriptors
+// as well as their poorer cousins, plain ol' descriptor protos.
+
+type descriptorish interface {
+ GetFile() fileDescriptorish
+ GetFullyQualifiedName() string
+ AsProto() proto.Message
+}
+
+type fileDescriptorish interface {
+ descriptorish
+ GetFileOptions() *dpb.FileOptions
+ GetPackage() string
+ FindSymbol(name string) desc.Descriptor
+ GetPublicDependencies() []fileDescriptorish
+ GetDependencies() []fileDescriptorish
+ GetMessageTypes() []msgDescriptorish
+ GetExtensions() []fldDescriptorish
+ GetEnumTypes() []enumDescriptorish
+ GetServices() []svcDescriptorish
+}
+
+type msgDescriptorish interface {
+ descriptorish
+ GetMessageOptions() *dpb.MessageOptions
+ GetFields() []fldDescriptorish
+ GetOneOfs() []oneofDescriptorish
+ GetExtensionRanges() []extRangeDescriptorish
+ GetNestedMessageTypes() []msgDescriptorish
+ GetNestedExtensions() []fldDescriptorish
+ GetNestedEnumTypes() []enumDescriptorish
+}
+
+type fldDescriptorish interface {
+ descriptorish
+ GetFieldOptions() *dpb.FieldOptions
+ GetMessageType() *desc.MessageDescriptor
+ GetEnumType() *desc.EnumDescriptor
+ AsFieldDescriptorProto() *dpb.FieldDescriptorProto
+}
+
+type oneofDescriptorish interface {
+ descriptorish
+ GetOneOfOptions() *dpb.OneofOptions
+}
+
+type enumDescriptorish interface {
+ descriptorish
+ GetEnumOptions() *dpb.EnumOptions
+ GetValues() []enumValDescriptorish
+}
+
+type enumValDescriptorish interface {
+ descriptorish
+ GetEnumValueOptions() *dpb.EnumValueOptions
+}
+
+type svcDescriptorish interface {
+ descriptorish
+ GetServiceOptions() *dpb.ServiceOptions
+ GetMethods() []methodDescriptorish
+}
+
+type methodDescriptorish interface {
+ descriptorish
+ GetMethodOptions() *dpb.MethodOptions
+}
+
+// The hierarchy of descriptorish implementations backed by
+// rich descriptors:
+
+type richFileDescriptorish struct {
+ *desc.FileDescriptor
+}
+
+func (d richFileDescriptorish) GetFile() fileDescriptorish {
+ return d
+}
+
+func (d richFileDescriptorish) GetPublicDependencies() []fileDescriptorish {
+ deps := d.FileDescriptor.GetPublicDependencies()
+ ret := make([]fileDescriptorish, len(deps))
+ for i, d := range deps {
+ ret[i] = richFileDescriptorish{FileDescriptor: d}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetDependencies() []fileDescriptorish {
+ deps := d.FileDescriptor.GetDependencies()
+ ret := make([]fileDescriptorish, len(deps))
+ for i, d := range deps {
+ ret[i] = richFileDescriptorish{FileDescriptor: d}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetMessageTypes() []msgDescriptorish {
+ msgs := d.FileDescriptor.GetMessageTypes()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = richMsgDescriptorish{MessageDescriptor: m}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetExtensions() []fldDescriptorish {
+ flds := d.FileDescriptor.GetExtensions()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = richFldDescriptorish{FieldDescriptor: f}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetEnumTypes() []enumDescriptorish {
+ ens := d.FileDescriptor.GetEnumTypes()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, en := range ens {
+ ret[i] = richEnumDescriptorish{EnumDescriptor: en}
+ }
+ return ret
+}
+
+func (d richFileDescriptorish) GetServices() []svcDescriptorish {
+ svcs := d.FileDescriptor.GetServices()
+ ret := make([]svcDescriptorish, len(svcs))
+ for i, s := range svcs {
+ ret[i] = richSvcDescriptorish{ServiceDescriptor: s}
+ }
+ return ret
+}
+
+type richMsgDescriptorish struct {
+ *desc.MessageDescriptor
+}
+
+func (d richMsgDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.MessageDescriptor.GetFile()}
+}
+
+func (d richMsgDescriptorish) GetFields() []fldDescriptorish {
+ flds := d.MessageDescriptor.GetFields()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = richFldDescriptorish{FieldDescriptor: f}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetOneOfs() []oneofDescriptorish {
+ oos := d.MessageDescriptor.GetOneOfs()
+ ret := make([]oneofDescriptorish, len(oos))
+ for i, oo := range oos {
+ ret[i] = richOneOfDescriptorish{OneOfDescriptor: oo}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetExtensionRanges() []extRangeDescriptorish {
+ md := d.MessageDescriptor
+ mdFqn := md.GetFullyQualifiedName()
+ extrs := md.AsDescriptorProto().GetExtensionRange()
+ ret := make([]extRangeDescriptorish, len(extrs))
+ for i, extr := range extrs {
+ ret[i] = extRangeDescriptorish{
+ er: extr,
+ qual: mdFqn,
+ file: richFileDescriptorish{FileDescriptor: md.GetFile()},
+ }
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetNestedMessageTypes() []msgDescriptorish {
+ msgs := d.MessageDescriptor.GetNestedMessageTypes()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = richMsgDescriptorish{MessageDescriptor: m}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetNestedExtensions() []fldDescriptorish {
+ flds := d.MessageDescriptor.GetNestedExtensions()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = richFldDescriptorish{FieldDescriptor: f}
+ }
+ return ret
+}
+
+func (d richMsgDescriptorish) GetNestedEnumTypes() []enumDescriptorish {
+ ens := d.MessageDescriptor.GetNestedEnumTypes()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, en := range ens {
+ ret[i] = richEnumDescriptorish{EnumDescriptor: en}
+ }
+ return ret
+}
+
+type richFldDescriptorish struct {
+ *desc.FieldDescriptor
+}
+
+func (d richFldDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.FieldDescriptor.GetFile()}
+}
+
+func (d richFldDescriptorish) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+ return d.FieldDescriptor.AsFieldDescriptorProto()
+}
+
+type richOneOfDescriptorish struct {
+ *desc.OneOfDescriptor
+}
+
+func (d richOneOfDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.OneOfDescriptor.GetFile()}
+}
+
+type richEnumDescriptorish struct {
+ *desc.EnumDescriptor
+}
+
+func (d richEnumDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.EnumDescriptor.GetFile()}
+}
+
+func (d richEnumDescriptorish) GetValues() []enumValDescriptorish {
+ vals := d.EnumDescriptor.GetValues()
+ ret := make([]enumValDescriptorish, len(vals))
+ for i, val := range vals {
+ ret[i] = richEnumValDescriptorish{EnumValueDescriptor: val}
+ }
+ return ret
+}
+
+type richEnumValDescriptorish struct {
+ *desc.EnumValueDescriptor
+}
+
+func (d richEnumValDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.EnumValueDescriptor.GetFile()}
+}
+
+type richSvcDescriptorish struct {
+ *desc.ServiceDescriptor
+}
+
+func (d richSvcDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.ServiceDescriptor.GetFile()}
+}
+
+func (d richSvcDescriptorish) GetMethods() []methodDescriptorish {
+ mtds := d.ServiceDescriptor.GetMethods()
+ ret := make([]methodDescriptorish, len(mtds))
+ for i, mtd := range mtds {
+ ret[i] = richMethodDescriptorish{MethodDescriptor: mtd}
+ }
+ return ret
+}
+
+type richMethodDescriptorish struct {
+ *desc.MethodDescriptor
+}
+
+func (d richMethodDescriptorish) GetFile() fileDescriptorish {
+ return richFileDescriptorish{FileDescriptor: d.MethodDescriptor.GetFile()}
+}
+
+// The hierarchy of descriptorish implementations backed by
+// plain descriptor protos:
+
+type poorFileDescriptorish struct {
+ *dpb.FileDescriptorProto
+}
+
+func (d poorFileDescriptorish) GetFile() fileDescriptorish {
+ return d
+}
+
+func (d poorFileDescriptorish) GetFullyQualifiedName() string {
+ return d.FileDescriptorProto.GetName()
+}
+
+func (d poorFileDescriptorish) AsProto() proto.Message {
+ return d.FileDescriptorProto
+}
+
+func (d poorFileDescriptorish) GetFileOptions() *dpb.FileOptions {
+ return d.FileDescriptorProto.GetOptions()
+}
+
+func (d poorFileDescriptorish) FindSymbol(name string) desc.Descriptor {
+ return nil
+}
+
+func (d poorFileDescriptorish) GetPublicDependencies() []fileDescriptorish {
+ return nil
+}
+
+func (d poorFileDescriptorish) GetDependencies() []fileDescriptorish {
+ return nil
+}
+
+func (d poorFileDescriptorish) GetMessageTypes() []msgDescriptorish {
+ msgs := d.FileDescriptorProto.GetMessageType()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = poorMsgDescriptorish{
+ DescriptorProto: m,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+func (d poorFileDescriptorish) GetExtensions() []fldDescriptorish {
+ exts := d.FileDescriptorProto.GetExtension()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]fldDescriptorish, len(exts))
+ for i, e := range exts {
+ ret[i] = poorFldDescriptorish{
+ FieldDescriptorProto: e,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+func (d poorFileDescriptorish) GetEnumTypes() []enumDescriptorish {
+ ens := d.FileDescriptorProto.GetEnumType()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, e := range ens {
+ ret[i] = poorEnumDescriptorish{
+ EnumDescriptorProto: e,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+func (d poorFileDescriptorish) GetServices() []svcDescriptorish {
+ svcs := d.FileDescriptorProto.GetService()
+ pkg := d.FileDescriptorProto.GetPackage()
+ ret := make([]svcDescriptorish, len(svcs))
+ for i, s := range svcs {
+ ret[i] = poorSvcDescriptorish{
+ ServiceDescriptorProto: s,
+ qual: pkg,
+ file: d,
+ }
+ }
+ return ret
+}
+
+type poorMsgDescriptorish struct {
+ *dpb.DescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorMsgDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorMsgDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.DescriptorProto.GetName())
+}
+
+func qualify(qual, name string) string {
+ if qual == "" {
+ return name
+ } else {
+ return fmt.Sprintf("%s.%s", qual, name)
+ }
+}
+
+func (d poorMsgDescriptorish) AsProto() proto.Message {
+ return d.DescriptorProto
+}
+
+func (d poorMsgDescriptorish) GetMessageOptions() *dpb.MessageOptions {
+ return d.DescriptorProto.GetOptions()
+}
+
+func (d poorMsgDescriptorish) GetFields() []fldDescriptorish {
+ flds := d.DescriptorProto.GetField()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = poorFldDescriptorish{
+ FieldDescriptorProto: f,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetOneOfs() []oneofDescriptorish {
+ oos := d.DescriptorProto.GetOneofDecl()
+ ret := make([]oneofDescriptorish, len(oos))
+ for i, oo := range oos {
+ ret[i] = poorOneOfDescriptorish{
+ OneofDescriptorProto: oo,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetExtensionRanges() []extRangeDescriptorish {
+ mdFqn := d.GetFullyQualifiedName()
+ extrs := d.DescriptorProto.GetExtensionRange()
+ ret := make([]extRangeDescriptorish, len(extrs))
+ for i, extr := range extrs {
+ ret[i] = extRangeDescriptorish{
+ er: extr,
+ qual: mdFqn,
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedMessageTypes() []msgDescriptorish {
+ msgs := d.DescriptorProto.GetNestedType()
+ ret := make([]msgDescriptorish, len(msgs))
+ for i, m := range msgs {
+ ret[i] = poorMsgDescriptorish{
+ DescriptorProto: m,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedExtensions() []fldDescriptorish {
+ flds := d.DescriptorProto.GetExtension()
+ ret := make([]fldDescriptorish, len(flds))
+ for i, f := range flds {
+ ret[i] = poorFldDescriptorish{
+ FieldDescriptorProto: f,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedEnumTypes() []enumDescriptorish {
+ ens := d.DescriptorProto.GetEnumType()
+ ret := make([]enumDescriptorish, len(ens))
+ for i, en := range ens {
+ ret[i] = poorEnumDescriptorish{
+ EnumDescriptorProto: en,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+type poorFldDescriptorish struct {
+ *dpb.FieldDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorFldDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorFldDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.FieldDescriptorProto.GetName())
+}
+
+func (d poorFldDescriptorish) AsProto() proto.Message {
+ return d.FieldDescriptorProto
+}
+
+func (d poorFldDescriptorish) GetFieldOptions() *dpb.FieldOptions {
+ return d.FieldDescriptorProto.GetOptions()
+}
+
+func (d poorFldDescriptorish) GetMessageType() *desc.MessageDescriptor {
+ return nil
+}
+
+func (d poorFldDescriptorish) GetEnumType() *desc.EnumDescriptor {
+ return nil
+}
+
+type poorOneOfDescriptorish struct {
+ *dpb.OneofDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorOneOfDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorOneOfDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.OneofDescriptorProto.GetName())
+}
+
+func (d poorOneOfDescriptorish) AsProto() proto.Message {
+ return d.OneofDescriptorProto
+}
+
+func (d poorOneOfDescriptorish) GetOneOfOptions() *dpb.OneofOptions {
+ return d.OneofDescriptorProto.GetOptions()
+}
+
+func (d poorFldDescriptorish) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+ return d.FieldDescriptorProto
+}
+
+type poorEnumDescriptorish struct {
+ *dpb.EnumDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorEnumDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorEnumDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.EnumDescriptorProto.GetName())
+}
+
+func (d poorEnumDescriptorish) AsProto() proto.Message {
+ return d.EnumDescriptorProto
+}
+
+func (d poorEnumDescriptorish) GetEnumOptions() *dpb.EnumOptions {
+ return d.EnumDescriptorProto.GetOptions()
+}
+
+func (d poorEnumDescriptorish) GetValues() []enumValDescriptorish {
+ vals := d.EnumDescriptorProto.GetValue()
+ ret := make([]enumValDescriptorish, len(vals))
+ for i, v := range vals {
+ ret[i] = poorEnumValDescriptorish{
+ EnumValueDescriptorProto: v,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+type poorEnumValDescriptorish struct {
+ *dpb.EnumValueDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorEnumValDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorEnumValDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.EnumValueDescriptorProto.GetName())
+}
+
+func (d poorEnumValDescriptorish) AsProto() proto.Message {
+ return d.EnumValueDescriptorProto
+}
+
+func (d poorEnumValDescriptorish) GetEnumValueOptions() *dpb.EnumValueOptions {
+ return d.EnumValueDescriptorProto.GetOptions()
+}
+
+type poorSvcDescriptorish struct {
+ *dpb.ServiceDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorSvcDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorSvcDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.ServiceDescriptorProto.GetName())
+}
+
+func (d poorSvcDescriptorish) AsProto() proto.Message {
+ return d.ServiceDescriptorProto
+}
+
+func (d poorSvcDescriptorish) GetServiceOptions() *dpb.ServiceOptions {
+ return d.ServiceDescriptorProto.GetOptions()
+}
+
+func (d poorSvcDescriptorish) GetMethods() []methodDescriptorish {
+ mtds := d.ServiceDescriptorProto.GetMethod()
+ ret := make([]methodDescriptorish, len(mtds))
+ for i, m := range mtds {
+ ret[i] = poorMethodDescriptorish{
+ MethodDescriptorProto: m,
+ qual: d.GetFullyQualifiedName(),
+ file: d.file,
+ }
+ }
+ return ret
+}
+
+type poorMethodDescriptorish struct {
+ *dpb.MethodDescriptorProto
+ qual string
+ file fileDescriptorish
+}
+
+func (d poorMethodDescriptorish) GetFile() fileDescriptorish {
+ return d.file
+}
+
+func (d poorMethodDescriptorish) GetFullyQualifiedName() string {
+ return qualify(d.qual, d.MethodDescriptorProto.GetName())
+}
+
+func (d poorMethodDescriptorish) AsProto() proto.Message {
+ return d.MethodDescriptorProto
+}
+
+func (d poorMethodDescriptorish) GetMethodOptions() *dpb.MethodOptions {
+ return d.MethodDescriptorProto.GetOptions()
+}
+
+type extRangeDescriptorish struct {
+ er *dpb.DescriptorProto_ExtensionRange
+ qual string
+ file fileDescriptorish
+}
+
+func (er extRangeDescriptorish) GetFile() fileDescriptorish {
+ return er.file
+}
+
+func (er extRangeDescriptorish) GetFullyQualifiedName() string {
+ return qualify(er.qual, fmt.Sprintf("%d-%d", er.er.GetStart(), er.er.GetEnd()-1))
+}
+
+func (er extRangeDescriptorish) AsProto() proto.Message {
+ return er.er
+}
+
+func (er extRangeDescriptorish) GetExtensionRangeOptions() *dpb.ExtensionRangeOptions {
+ return er.er.GetOptions()
+}
+
+func interpretFileOptions(r *parseResult, fd fileDescriptorish) error {
+ opts := fd.GetFileOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, fd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, md := range fd.GetMessageTypes() {
+ if err := interpretMessageOptions(r, md); err != nil {
+ return err
+ }
+ }
+ for _, fld := range fd.GetExtensions() {
+ if err := interpretFieldOptions(r, fld); err != nil {
+ return err
+ }
+ }
+ for _, ed := range fd.GetEnumTypes() {
+ if err := interpretEnumOptions(r, ed); err != nil {
+ return err
+ }
+ }
+ for _, sd := range fd.GetServices() {
+ opts := sd.GetServiceOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, sd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, mtd := range sd.GetMethods() {
+ opts := mtd.GetMethodOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, mtd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func interpretMessageOptions(r *parseResult, md msgDescriptorish) error {
+ opts := md.GetMessageOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, md, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, fld := range md.GetFields() {
+ if err := interpretFieldOptions(r, fld); err != nil {
+ return err
+ }
+ }
+ for _, ood := range md.GetOneOfs() {
+ opts := ood.GetOneOfOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, ood, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ for _, fld := range md.GetNestedExtensions() {
+ if err := interpretFieldOptions(r, fld); err != nil {
+ return err
+ }
+ }
+ for _, er := range md.GetExtensionRanges() {
+ opts := er.GetExtensionRangeOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, er, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ for _, nmd := range md.GetNestedMessageTypes() {
+ if err := interpretMessageOptions(r, nmd); err != nil {
+ return err
+ }
+ }
+ for _, ed := range md.GetNestedEnumTypes() {
+ if err := interpretEnumOptions(r, ed); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func interpretFieldOptions(r *parseResult, fld fldDescriptorish) error {
+ opts := fld.GetFieldOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ uo := opts.UninterpretedOption
+ scope := fmt.Sprintf("field %s", fld.GetFullyQualifiedName())
+
+ // process json_name pseudo-option
+ if index, err := findOption(r, scope, uo, "json_name"); err != nil && !r.lenient {
+ return err
+ } else if err == nil && index >= 0 {
+ opt := uo[index]
+ optNode := r.getOptionNode(opt)
+
+ // attribute source code info
+ if on, ok := optNode.(*optionNode); ok {
+ r.interpretedOptions[on] = []int32{-1, internal.Field_jsonNameTag}
+ }
+ uo = removeOption(uo, index)
+ if opt.StringValue == nil {
+ return ErrorWithSourcePos{Pos: optNode.getValue().start(), Underlying: fmt.Errorf("%s: expecting string value for json_name option", scope)}
+ }
+ fld.AsFieldDescriptorProto().JsonName = proto.String(string(opt.StringValue))
+ }
+
+ // and process default pseudo-option
+ if index, err := processDefaultOption(r, scope, fld, uo); err != nil && !r.lenient {
+ return err
+ } else if err == nil && index >= 0 {
+ // attribute source code info
+ optNode := r.getOptionNode(uo[index])
+ if on, ok := optNode.(*optionNode); ok {
+ r.interpretedOptions[on] = []int32{-1, internal.Field_defaultTag}
+ }
+ uo = removeOption(uo, index)
+ }
+
+ if len(uo) == 0 {
+ // no real options, only pseudo-options above? clear out options
+ fld.AsFieldDescriptorProto().Options = nil
+ } else if remain, err := interpretOptions(r, fld, opts, uo); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ return nil
+}
+
+func processDefaultOption(res *parseResult, scope string, fld fldDescriptorish, uos []*dpb.UninterpretedOption) (defaultIndex int, err error) {
+ found, err := findOption(res, scope, uos, "default")
+ if err != nil {
+ return -1, err
+ } else if found == -1 {
+ return -1, nil
+ }
+ opt := uos[found]
+ optNode := res.getOptionNode(opt)
+ fdp := fld.AsFieldDescriptorProto()
+ if fdp.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED {
+ return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default value cannot be set because field is repeated", scope)}
+ }
+ if fdp.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP || fdp.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE {
+ return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default value cannot be set because field is a message", scope)}
+ }
+ val := optNode.getValue()
+ if _, ok := val.(*aggregateLiteralNode); ok {
+ return -1, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%s: default value cannot be an aggregate", scope)}
+ }
+ mc := &messageContext{
+ res: res,
+ file: fld.GetFile(),
+ elementName: fld.GetFullyQualifiedName(),
+ elementType: descriptorType(fld.AsProto()),
+ option: opt,
+ }
+ v, err := fieldValue(res, mc, fld, val, true)
+ if err != nil {
+ return -1, err
+ }
+ if str, ok := v.(string); ok {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(str)
+ } else if b, ok := v.([]byte); ok {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(encodeDefaultBytes(b))
+ } else {
+ var flt float64
+ var ok bool
+ if flt, ok = v.(float64); !ok {
+ var flt32 float32
+ if flt32, ok = v.(float32); ok {
+ flt = float64(flt32)
+ }
+ }
+ if ok {
+ if math.IsInf(flt, 1) {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String("inf")
+ } else if ok && math.IsInf(flt, -1) {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String("-inf")
+ } else if ok && math.IsNaN(flt) {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String("nan")
+ } else {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(fmt.Sprintf("%v", v))
+ }
+ } else {
+ fld.AsFieldDescriptorProto().DefaultValue = proto.String(fmt.Sprintf("%v", v))
+ }
+ }
+ return found, nil
+}
+
+func encodeDefaultBytes(b []byte) string {
+ var buf bytes.Buffer
+ writeEscapedBytes(&buf, b)
+ return buf.String()
+}
+
+func interpretEnumOptions(r *parseResult, ed enumDescriptorish) error {
+ opts := ed.GetEnumOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, ed, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ for _, evd := range ed.GetValues() {
+ opts := evd.GetEnumValueOptions()
+ if opts != nil {
+ if len(opts.UninterpretedOption) > 0 {
+ if remain, err := interpretOptions(r, evd, opts, opts.UninterpretedOption); err != nil {
+ return err
+ } else {
+ opts.UninterpretedOption = remain
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func interpretOptions(res *parseResult, element descriptorish, opts proto.Message, uninterpreted []*dpb.UninterpretedOption) ([]*dpb.UninterpretedOption, error) {
+ optsd, err := desc.LoadMessageDescriptorForMessage(opts)
+ if err != nil {
+ if res.lenient {
+ return uninterpreted, nil
+ }
+ return nil, err
+ }
+ dm := dynamic.NewMessage(optsd)
+ err = dm.ConvertFrom(opts)
+ if err != nil {
+ if res.lenient {
+ return uninterpreted, nil
+ }
+ node := res.nodes[element.AsProto()]
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+ }
+
+ mc := &messageContext{res: res, file: element.GetFile(), elementName: element.GetFullyQualifiedName(), elementType: descriptorType(element.AsProto())}
+ var remain []*dpb.UninterpretedOption
+ for _, uo := range uninterpreted {
+ node := res.getOptionNode(uo)
+ if !uo.Name[0].GetIsExtension() && uo.Name[0].GetNamePart() == "uninterpreted_option" {
+ if res.lenient {
+ remain = append(remain, uo)
+ continue
+ }
+ // uninterpreted_option might be found reflectively, but is not actually valid for use
+ return nil, ErrorWithSourcePos{Pos: node.getName().start(), Underlying: fmt.Errorf("%vinvalid option 'uninterpreted_option'", mc)}
+ }
+ mc.option = uo
+ path, err := interpretField(res, mc, element, dm, uo, 0, nil)
+ if err != nil {
+ if res.lenient {
+ remain = append(remain, uo)
+ continue
+ }
+ return nil, err
+ }
+ if optn, ok := node.(*optionNode); ok {
+ res.interpretedOptions[optn] = path
+ }
+ }
+
+ if err := dm.ValidateRecursive(); err != nil {
+ // if lenient, we'll let this pass, but it means that some required field was not set!
+ // TODO: do this in a more granular way, so we can validate individual fields
+ // and leave them uninterpreted, instead of just having to live with the
+ // thing having invalid data in extensions.
+ if !res.lenient {
+ node := res.nodes[element.AsProto()]
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("error in %s options: %v", descriptorType(element.AsProto()), err)}
+ }
+ }
+
+ if res.lenient {
+ // If we're lenient, then we don't want to clobber the passed in message
+ // and leave it partially populated. So we convert into a copy first
+ optsClone := proto.Clone(opts)
+ if err := dm.ConvertTo(optsClone); err != nil {
+ // TODO: do this in a more granular way, so we can convert individual
+ // fields and leave bad ones uninterpreted instead of skipping all of
+ // the work we've done so far.
+ return uninterpreted, nil
+ }
+ // conversion from dynamic message above worked, so now
+ // it is safe to overwrite the passed in message
+ opts.Reset()
+ proto.Merge(opts, optsClone)
+
+ } else {
+ // not lenient: try to convert into the passed in message
+ // and fail if not successful
+ if err := dm.ConvertTo(opts); err != nil {
+ node := res.nodes[element.AsProto()]
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+ }
+ }
+
+ return remain, nil
+}
+
+func interpretField(res *parseResult, mc *messageContext, element descriptorish, dm *dynamic.Message, opt *dpb.UninterpretedOption, nameIndex int, pathPrefix []int32) (path []int32, err error) {
+ var fld *desc.FieldDescriptor
+ nm := opt.GetName()[nameIndex]
+ node := res.getOptionNamePartNode(nm)
+ if nm.GetIsExtension() {
+ extName := nm.GetNamePart()
+ if extName[0] == '.' {
+ extName = extName[1:] /* skip leading dot */
+ }
+ fld = findExtension(element.GetFile(), extName, false, map[fileDescriptorish]struct{}{})
+ if fld == nil {
+ return nil, ErrorWithSourcePos{
+ Pos: node.start(),
+ Underlying: fmt.Errorf("%vunrecognized extension %s of %s",
+ mc, extName, dm.GetMessageDescriptor().GetFullyQualifiedName()),
+ }
+ }
+ if fld.GetOwner().GetFullyQualifiedName() != dm.GetMessageDescriptor().GetFullyQualifiedName() {
+ return nil, ErrorWithSourcePos{
+ Pos: node.start(),
+ Underlying: fmt.Errorf("%vextension %s should extend %s but instead extends %s",
+ mc, extName, dm.GetMessageDescriptor().GetFullyQualifiedName(), fld.GetOwner().GetFullyQualifiedName()),
+ }
+ }
+ } else {
+ fld = dm.GetMessageDescriptor().FindFieldByName(nm.GetNamePart())
+ if fld == nil {
+ return nil, ErrorWithSourcePos{
+ Pos: node.start(),
+ Underlying: fmt.Errorf("%vfield %s of %s does not exist",
+ mc, nm.GetNamePart(), dm.GetMessageDescriptor().GetFullyQualifiedName()),
+ }
+ }
+ }
+
+ path = append(pathPrefix, fld.GetNumber())
+
+ if len(opt.GetName()) > nameIndex+1 {
+ nextnm := opt.GetName()[nameIndex+1]
+ nextnode := res.getOptionNamePartNode(nextnm)
+ if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE {
+ return nil, ErrorWithSourcePos{
+ Pos: nextnode.start(),
+ Underlying: fmt.Errorf("%vcannot set field %s because %s is not a message",
+ mc, nextnm.GetNamePart(), nm.GetNamePart()),
+ }
+ }
+ if fld.IsRepeated() {
+ return nil, ErrorWithSourcePos{
+ Pos: nextnode.start(),
+ Underlying: fmt.Errorf("%vcannot set field %s because %s is repeated (must use an aggregate)",
+ mc, nextnm.GetNamePart(), nm.GetNamePart()),
+ }
+ }
+ var fdm *dynamic.Message
+ var err error
+ if dm.HasField(fld) {
+ var v interface{}
+ v, err = dm.TryGetField(fld)
+ fdm, _ = v.(*dynamic.Message)
+ } else {
+ fdm = dynamic.NewMessage(fld.GetMessageType())
+ err = dm.TrySetField(fld, fdm)
+ }
+ if err != nil {
+ return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+ }
+ // recurse to set next part of name
+ return interpretField(res, mc, element, fdm, opt, nameIndex+1, path)
+ }
+
+ optNode := res.getOptionNode(opt)
+ if err := setOptionField(res, mc, dm, fld, node, optNode.getValue()); err != nil {
+ return nil, err
+ }
+ if fld.IsRepeated() {
+ path = append(path, int32(dm.FieldLength(fld))-1)
+ }
+ return path, nil
+}
+
+func findExtension(fd fileDescriptorish, name string, public bool, checked map[fileDescriptorish]struct{}) *desc.FieldDescriptor {
+ if _, ok := checked[fd]; ok {
+ return nil
+ }
+ checked[fd] = struct{}{}
+ d := fd.FindSymbol(name)
+ if d != nil {
+ if fld, ok := d.(*desc.FieldDescriptor); ok {
+ return fld
+ }
+ return nil
+ }
+
+ // When public = false, we are searching only directly imported symbols. But we
+ // also need to search transitive public imports due to semantics of public imports.
+ if public {
+ for _, dep := range fd.GetPublicDependencies() {
+ d := findExtension(dep, name, true, checked)
+ if d != nil {
+ return d
+ }
+ }
+ } else {
+ for _, dep := range fd.GetDependencies() {
+ d := findExtension(dep, name, true, checked)
+ if d != nil {
+ return d
+ }
+ }
+ }
+ return nil
+}
+
+func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, fld *desc.FieldDescriptor, name node, val valueNode) error {
+ v := val.value()
+ if sl, ok := v.([]valueNode); ok {
+ // handle slices a little differently than the others
+ if !fld.IsRepeated() {
+ return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue is an array but field is not repeated", mc)}
+ }
+ origPath := mc.optAggPath
+ defer func() {
+ mc.optAggPath = origPath
+ }()
+ for index, item := range sl {
+ mc.optAggPath = fmt.Sprintf("%s[%d]", origPath, index)
+ if v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, item, false); err != nil {
+ return err
+ } else if err = dm.TryAddRepeatedField(fld, v); err != nil {
+ return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%verror setting value: %s", mc, err)}
+ }
+ }
+ return nil
+ }
+
+ v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, val, false)
+ if err != nil {
+ return err
+ }
+ if fld.IsRepeated() {
+ err = dm.TryAddRepeatedField(fld, v)
+ } else {
+ if dm.HasField(fld) {
+ return ErrorWithSourcePos{Pos: name.start(), Underlying: fmt.Errorf("%vnon-repeated option field %s already set", mc, fieldName(fld))}
+ }
+ err = dm.TrySetField(fld, v)
+ }
+ if err != nil {
+ return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%verror setting value: %s", mc, err)}
+ }
+
+ return nil
+}
+
+type messageContext struct {
+ res *parseResult
+ file fileDescriptorish
+ elementType string
+ elementName string
+ option *dpb.UninterpretedOption
+ optAggPath string
+}
+
+func (c *messageContext) String() string {
+ var ctx bytes.Buffer
+ if c.elementType != "file" {
+ fmt.Fprintf(&ctx, "%s %s: ", c.elementType, c.elementName)
+ }
+ if c.option != nil && c.option.Name != nil {
+ ctx.WriteString("option ")
+ writeOptionName(&ctx, c.option.Name)
+ if c.res.nodes == nil {
+ // if we have no source position info, try to provide as much context
+ // as possible (if nodes != nil, we don't need this because any errors
+ // will actually have file and line numbers)
+ if c.optAggPath != "" {
+ fmt.Fprintf(&ctx, " at %s", c.optAggPath)
+ }
+ }
+ ctx.WriteString(": ")
+ }
+ return ctx.String()
+}
+
+func writeOptionName(buf *bytes.Buffer, parts []*dpb.UninterpretedOption_NamePart) {
+ first := true
+ for _, p := range parts {
+ if first {
+ first = false
+ } else {
+ buf.WriteByte('.')
+ }
+ nm := p.GetNamePart()
+ if nm[0] == '.' {
+ // skip leading dot
+ nm = nm[1:]
+ }
+ if p.GetIsExtension() {
+ buf.WriteByte('(')
+ buf.WriteString(nm)
+ buf.WriteByte(')')
+ } else {
+ buf.WriteString(nm)
+ }
+ }
+}
+
+func fieldName(fld *desc.FieldDescriptor) string {
+ if fld.IsExtension() {
+ return fld.GetFullyQualifiedName()
+ } else {
+ return fld.GetName()
+ }
+}
+
+func valueKind(val interface{}) string {
+ switch val := val.(type) {
+ case identifier:
+ return "identifier"
+ case bool:
+ return "bool"
+ case int64:
+ if val < 0 {
+ return "negative integer"
+ }
+ return "integer"
+ case uint64:
+ return "integer"
+ case float64:
+ return "double"
+ case string, []byte:
+ return "string"
+ case []*aggregateEntryNode:
+ return "message"
+ default:
+ return fmt.Sprintf("%T", val)
+ }
+}
+
+func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val valueNode, enumAsString bool) (interface{}, error) {
+ v := val.value()
+ t := fld.AsFieldDescriptorProto().GetType()
+ switch t {
+ case dpb.FieldDescriptorProto_TYPE_ENUM:
+ if id, ok := v.(identifier); ok {
+ ev := fld.GetEnumType().FindValueByName(string(id))
+ if ev == nil {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%venum %s has no value named %s", mc, fld.GetEnumType().GetFullyQualifiedName(), id)}
+ }
+ if enumAsString {
+ return ev.GetName(), nil
+ } else {
+ return ev.GetNumber(), nil
+ }
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting enum, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_MESSAGE, dpb.FieldDescriptorProto_TYPE_GROUP:
+ if aggs, ok := v.([]*aggregateEntryNode); ok {
+ fmd := fld.GetMessageType()
+ fdm := dynamic.NewMessage(fmd)
+ origPath := mc.optAggPath
+ defer func() {
+ mc.optAggPath = origPath
+ }()
+ for _, a := range aggs {
+ if origPath == "" {
+ mc.optAggPath = a.name.value()
+ } else {
+ mc.optAggPath = origPath + "." + a.name.value()
+ }
+ var ffld *desc.FieldDescriptor
+ if a.name.isExtension {
+ n := a.name.name.val
+ ffld = findExtension(mc.file, n, false, map[fileDescriptorish]struct{}{})
+ if ffld == nil {
+ // may need to qualify with package name
+ pkg := mc.file.GetPackage()
+ if pkg != "" {
+ ffld = findExtension(mc.file, pkg+"."+n, false, map[fileDescriptorish]struct{}{})
+ }
+ }
+ } else {
+ ffld = fmd.FindFieldByName(a.name.value())
+ }
+ if ffld == nil {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vfield %s not found", mc, a.name.name.val)}
+ }
+ if err := setOptionField(res, mc, fdm, ffld, a.name, a.val); err != nil {
+ return nil, err
+ }
+ }
+ return fdm, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting message, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_BOOL:
+ if b, ok := v.(bool); ok {
+ return b, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting bool, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_BYTES:
+ if str, ok := v.(string); ok {
+ return []byte(str), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting bytes, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_STRING:
+ if str, ok := v.(string); ok {
+ return str, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting string, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_INT32, dpb.FieldDescriptorProto_TYPE_SINT32, dpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, ok := v.(int64); ok {
+ if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int32", mc, i)}
+ }
+ return int32(i), nil
+ }
+ if ui, ok := v.(uint64); ok {
+ if ui > math.MaxInt32 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int32", mc, ui)}
+ }
+ return int32(ui), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting int32, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_UINT32, dpb.FieldDescriptorProto_TYPE_FIXED32:
+ if i, ok := v.(int64); ok {
+ if i > math.MaxUint32 || i < 0 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint32", mc, i)}
+ }
+ return uint32(i), nil
+ }
+ if ui, ok := v.(uint64); ok {
+ if ui > math.MaxUint32 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint32", mc, ui)}
+ }
+ return uint32(ui), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting uint32, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_INT64, dpb.FieldDescriptorProto_TYPE_SINT64, dpb.FieldDescriptorProto_TYPE_SFIXED64:
+ if i, ok := v.(int64); ok {
+ return i, nil
+ }
+ if ui, ok := v.(uint64); ok {
+ if ui > math.MaxInt64 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int64", mc, ui)}
+ }
+ return int64(ui), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting int64, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_UINT64, dpb.FieldDescriptorProto_TYPE_FIXED64:
+ if i, ok := v.(int64); ok {
+ if i < 0 {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint64", mc, i)}
+ }
+ return uint64(i), nil
+ }
+ if ui, ok := v.(uint64); ok {
+ return ui, nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting uint64, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ if d, ok := v.(float64); ok {
+ return d, nil
+ }
+ if i, ok := v.(int64); ok {
+ return float64(i), nil
+ }
+ if u, ok := v.(uint64); ok {
+ return float64(u), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting double, got %s", mc, valueKind(v))}
+ case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ if d, ok := v.(float64); ok {
+ if (d > math.MaxFloat32 || d < -math.MaxFloat32) && !math.IsInf(d, 1) && !math.IsInf(d, -1) && !math.IsNaN(d) {
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %f is out of range for float", mc, d)}
+ }
+ return float32(d), nil
+ }
+ if i, ok := v.(int64); ok {
+ return float32(i), nil
+ }
+ if u, ok := v.(uint64); ok {
+ return float32(u), nil
+ }
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting float, got %s", mc, valueKind(v))}
+ default:
+ return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vunrecognized field type: %s", mc, t)}
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
new file mode 100644
index 0000000..6eb1acc
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
@@ -0,0 +1,1516 @@
+package protoparse
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+//go:generate goyacc -o proto.y.go -p proto proto.y
+
+func init() {
+ protoErrorVerbose = true
+
+ // fix up the generated "token name" array so that error messages are nicer
+ setTokenName(_STRING_LIT, "string literal")
+ setTokenName(_INT_LIT, "int literal")
+ setTokenName(_FLOAT_LIT, "float literal")
+ setTokenName(_NAME, "identifier")
+ setTokenName(_ERROR, "error")
+ // for keywords, just show the keyword itself wrapped in quotes
+ for str, i := range keywords {
+ setTokenName(i, fmt.Sprintf(`"%s"`, str))
+ }
+}
+
+func setTokenName(token int, text string) {
+ // NB: this is based on logic in generated parse code that translates the
+ // int returned from the lexer into an internal token number.
+ var intern int
+ if token < len(protoTok1) {
+ intern = protoTok1[token]
+ } else {
+ if token >= protoPrivate {
+ if token < protoPrivate+len(protoTok2) {
+ intern = protoTok2[token-protoPrivate]
+ }
+ }
+ if intern == 0 {
+ for i := 0; i+1 < len(protoTok3); i += 2 {
+ if protoTok3[i] == token {
+ intern = protoTok3[i+1]
+ break
+ }
+ }
+ }
+ }
+
+ if intern >= 1 && intern-1 < len(protoToknames) {
+ protoToknames[intern-1] = text
+ return
+ }
+
+ panic(fmt.Sprintf("Unknown token value: %d", token))
+}
+
+// FileAccessor is an abstraction for opening proto source files. It takes the
+// name of the file to open and returns either the input reader or an error.
+type FileAccessor func(filename string) (io.ReadCloser, error)
+
+// FileContentsFromMap returns a FileAccessor that uses the given map of file
+// contents. This allows proto source files to be constructed in memory and
+// easily supplied to a parser. The map keys are the paths to the proto source
+// files, and the values are the actual proto source contents.
+func FileContentsFromMap(files map[string]string) FileAccessor {
+ return func(filename string) (io.ReadCloser, error) {
+ contents, ok := files[filename]
+ if !ok {
+ return nil, os.ErrNotExist
+ }
+ return ioutil.NopCloser(strings.NewReader(contents)), nil
+ }
+}
+
+// Parser parses proto source into descriptors.
+type Parser struct {
+ // The paths used to search for dependencies that are referenced in import
+ // statements in proto source files. If no import paths are provided then
+ // "." (current directory) is assumed to be the only import path.
+ //
+ // This setting is only used during ParseFiles operations. Since calls to
+ // ParseFilesButDoNotLink do not link, there is no need to load and parse
+ // dependencies.
+ ImportPaths []string
+
+ // If true, the supplied file names/paths need not necessarily match how the
+ // files are referenced in import statements. The parser will attempt to
+ // match import statements to supplied paths, "guessing" the import paths
+ // for the files. Note that this inference is not perfect and link errors
+ // could result. It works best when all proto files are organized such that
+ // a single import path can be inferred (e.g. all files under a single tree
+ // with import statements all being relative to the root of this tree).
+ InferImportPaths bool
+
+ // Used to create a reader for a given filename, when loading proto source
+ // file contents. If unset, os.Open is used. If ImportPaths is also empty
+ // then relative paths are will be relative to the process's current working
+ // directory.
+ Accessor FileAccessor
+
+ // If true, the resulting file descriptors will retain source code info,
+ // that maps elements to their location in the source files as well as
+ // includes comments found during parsing (and attributed to elements of
+ // the source file).
+ IncludeSourceCodeInfo bool
+
+ // If true, the results from ParseFilesButDoNotLink will be passed through
+ // some additional validations. But only constraints that do not require
+ // linking can be checked. These include proto2 vs. proto3 language features,
+ // looking for incorrect usage of reserved names or tags, and ensuring that
+ // fields have unique tags and that enum values have unique numbers (unless
+ // the enum allows aliases).
+ ValidateUnlinkedFiles bool
+
+ // If true, the results from ParseFilesButDoNotLink will have options
+ // interpreted. Any uninterpretable options (including any custom options or
+ // options that refer to message and enum types, which can only be
+ // interpreted after linking) will be left in uninterpreted_options. Also,
+ // the "default" pseudo-option for fields can only be interpreted for scalar
+ // fields, excluding enums. (Interpreting default values for enum fields
+ // requires resolving enum names, which requires linking.)
+ InterpretOptionsInUnlinkedFiles bool
+
+ // A custom reporter of syntax and link errors. If not specified, the
+ // default reporter just returns the reported error, which causes parsing
+ // to abort after encountering a single error.
+ //
+ // The reporter is not invoked for system or I/O errors, only for syntax and
+ // link errors.
+ ErrorReporter ErrorReporter
+}
+
+// ParseFiles parses the named files into descriptors. The returned slice has
+// the same number of entries as the give filenames, in the same order. So the
+// first returned descriptor corresponds to the first given name, and so on.
+//
+// All dependencies for all specified files (including transitive dependencies)
+// must be accessible via the parser's Accessor or a link error will occur. The
+// exception to this rule is that files can import standard Google-provided
+// files -- e.g. google/protobuf/*.proto -- without needing to supply sources
+// for these files. Like protoc, this parser has a built-in version of these
+// files it can use if they aren't explicitly supplied.
+//
+// If the Parser has no ErrorReporter set and a syntax or link error occurs,
+// parsing will abort with the first such error encountered. If there is an
+// ErrorReporter configured and it returns non-nil, parsing will abort with the
+// error it returns. If syntax or link errors are encountered but the configured
+// ErrorReporter always returns nil, the parse fails with ErrInvalidSource.
+func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) {
+ accessor := p.Accessor
+ if accessor == nil {
+ accessor = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ }
+ paths := p.ImportPaths
+ if len(paths) > 0 {
+ acc := accessor
+ accessor = func(name string) (io.ReadCloser, error) {
+ var ret error
+ for _, path := range paths {
+ f, err := acc(filepath.Join(path, name))
+ if err != nil {
+ if ret == nil {
+ ret = err
+ }
+ continue
+ }
+ return f, nil
+ }
+ return nil, ret
+ }
+ }
+
+ protos := map[string]*parseResult{}
+ results := &parseResults{resultsByFilename: protos}
+ errs := newErrorHandler(p.ErrorReporter)
+ parseProtoFiles(accessor, filenames, errs, true, true, results)
+ if err := errs.getError(); err != nil {
+ return nil, err
+ }
+ if p.InferImportPaths {
+ // TODO: if this re-writes one of the names in filenames, lookups below will break
+ protos = fixupFilenames(protos)
+ }
+ linkedProtos, err := newLinker(results, errs).linkFiles()
+ if err != nil {
+ return nil, err
+ }
+ if p.IncludeSourceCodeInfo {
+ for name, fd := range linkedProtos {
+ pr := protos[name]
+ fd.AsFileDescriptorProto().SourceCodeInfo = pr.generateSourceCodeInfo()
+ internal.RecomputeSourceInfo(fd)
+ }
+ }
+ fds := make([]*desc.FileDescriptor, len(filenames))
+ for i, name := range filenames {
+ fd := linkedProtos[name]
+ fds[i] = fd
+ }
+ return fds, nil
+}
+
+// ParseFilesButDoNotLink parses the named files into descriptor protos. The
+// results are just protos, not fully-linked descriptors. It is possible that
+// descriptors are invalid and still be returned in parsed form without error
+// due to the fact that the linking step is skipped (and thus many validation
+// steps omitted).
+//
+// There are a few side effects to not linking the descriptors:
+// 1. No options will be interpreted. Options can refer to extensions or have
+// message and enum types. Without linking, these extension and type
+// references are not resolved, so the options may not be interpretable.
+// So all options will appear in UninterpretedOption fields of the various
+// descriptor options messages.
+// 2. Type references will not be resolved. This means that the actual type
+// names in the descriptors may be unqualified and even relative to the
+// scope in which the type reference appears. This goes for fields that
+// have message and enum types. It also applies to methods and their
+// references to request and response message types.
+// 3. Enum fields are not known. Until a field's type reference is resolved
+// (during linking), it is not known whether the type refers to a message
+// or an enum. So all fields with such type references have their Type set
+// to TYPE_MESSAGE.
+//
+// This method will still validate the syntax of parsed files. If the parser's
+// ValidateUnlinkedFiles field is true, additional checks, beyond syntax will
+// also be performed.
+//
+// If the Parser has no ErrorReporter set and a syntax or link error occurs,
+// parsing will abort with the first such error encountered. If there is an
+// ErrorReporter configured and it returns non-nil, parsing will abort with the
+// error it returns. If syntax or link errors are encountered but the configured
+// ErrorReporter always returns nil, the parse fails with ErrInvalidSource.
+func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*dpb.FileDescriptorProto, error) {
+ accessor := p.Accessor
+ if accessor == nil {
+ accessor = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ }
+
+ protos := map[string]*parseResult{}
+ errs := newErrorHandler(p.ErrorReporter)
+ parseProtoFiles(accessor, filenames, errs, false, p.ValidateUnlinkedFiles, &parseResults{resultsByFilename: protos})
+ if err := errs.getError(); err != nil {
+ return nil, err
+ }
+ if p.InferImportPaths {
+ // TODO: if this re-writes one of the names in filenames, lookups below will break
+ protos = fixupFilenames(protos)
+ }
+ fds := make([]*dpb.FileDescriptorProto, len(filenames))
+ for i, name := range filenames {
+ pr := protos[name]
+ fd := pr.fd
+ if p.InterpretOptionsInUnlinkedFiles {
+ pr.lenient = true
+ _ = interpretFileOptions(pr, poorFileDescriptorish{FileDescriptorProto: fd})
+ }
+ if p.IncludeSourceCodeInfo {
+ fd.SourceCodeInfo = pr.generateSourceCodeInfo()
+ }
+ fds[i] = fd
+ }
+ return fds, nil
+}
+
+func fixupFilenames(protos map[string]*parseResult) map[string]*parseResult {
+ // In the event that the given filenames (keys in the supplied map) do not
+ // match the actual paths used in 'import' statements in the files, we try
+ // to revise names in the protos so that they will match and be linkable.
+ revisedProtos := map[string]*parseResult{}
+
+ protoPaths := map[string]struct{}{}
+ // TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?)
+ importCandidates := map[string]map[string]struct{}{}
+ candidatesAvailable := map[string]struct{}{}
+ for name := range protos {
+ candidatesAvailable[name] = struct{}{}
+ for _, f := range protos {
+ for _, imp := range f.fd.Dependency {
+ if strings.HasSuffix(name, imp) {
+ candidates := importCandidates[imp]
+ if candidates == nil {
+ candidates = map[string]struct{}{}
+ importCandidates[imp] = candidates
+ }
+ candidates[name] = struct{}{}
+ }
+ }
+ }
+ }
+ for imp, candidates := range importCandidates {
+ // if we found multiple possible candidates, use the one that is an exact match
+ // if it exists, and otherwise, guess that it's the shortest path (fewest elements)
+ var best string
+ for c := range candidates {
+ if _, ok := candidatesAvailable[c]; !ok {
+ // already used this candidate and re-written its filename accordingly
+ continue
+ }
+ if c == imp {
+ // exact match!
+ best = c
+ break
+ }
+ if best == "" {
+ best = c
+ } else {
+ // HACK: we can't actually tell which files is supposed to match
+ // this import, so arbitrarily pick the "shorter" one (fewest
+ // path elements) or, on a tie, the lexically earlier one
+ minLen := strings.Count(best, string(filepath.Separator))
+ cLen := strings.Count(c, string(filepath.Separator))
+ if cLen < minLen || (cLen == minLen && c < best) {
+ best = c
+ }
+ }
+ }
+ if best != "" {
+ prefix := best[:len(best)-len(imp)]
+ if len(prefix) > 0 {
+ protoPaths[prefix] = struct{}{}
+ }
+ f := protos[best]
+ f.fd.Name = proto.String(imp)
+ revisedProtos[imp] = f
+ delete(candidatesAvailable, best)
+ }
+ }
+
+ if len(candidatesAvailable) == 0 {
+ return revisedProtos
+ }
+
+ if len(protoPaths) == 0 {
+ for c := range candidatesAvailable {
+ revisedProtos[c] = protos[c]
+ }
+ return revisedProtos
+ }
+
+ // Any remaining candidates are entry-points (not imported by others), so
+ // the best bet to "fixing" their file name is to see if they're in one of
+ // the proto paths we found, and if so strip that prefix.
+ protoPathStrs := make([]string, len(protoPaths))
+ i := 0
+ for p := range protoPaths {
+ protoPathStrs[i] = p
+ i++
+ }
+ sort.Strings(protoPathStrs)
+ // we look at paths in reverse order, so we'll use a longer proto path if
+ // there is more than one match
+ for c := range candidatesAvailable {
+ var imp string
+ for i := len(protoPathStrs) - 1; i >= 0; i-- {
+ p := protoPathStrs[i]
+ if strings.HasPrefix(c, p) {
+ imp = c[len(p):]
+ break
+ }
+ }
+ if imp != "" {
+ f := protos[c]
+ f.fd.Name = proto.String(imp)
+ revisedProtos[imp] = f
+ } else {
+ revisedProtos[c] = protos[c]
+ }
+ }
+
+ return revisedProtos
+}
+
+func parseProtoFiles(acc FileAccessor, filenames []string, errs *errorHandler, recursive, validate bool, parsed *parseResults) {
+ for _, name := range filenames {
+ parseProtoFile(acc, name, nil, errs, recursive, validate, parsed)
+ if errs.err != nil {
+ return
+ }
+ }
+}
+
+func parseProtoFile(acc FileAccessor, filename string, importLoc *SourcePos, errs *errorHandler, recursive, validate bool, parsed *parseResults) {
+ if parsed.has(filename) {
+ return
+ }
+ in, err := acc(filename)
+ var result *parseResult
+ if err == nil {
+ // try to parse the bytes accessed
+ func() {
+ defer func() {
+ // if we've already parsed contents, an error
+ // closing need not fail this operation
+ _ = in.Close()
+ }()
+ result = parseProto(filename, in, errs, validate)
+ }()
+ } else if d, ok := standardImports[filename]; ok {
+ // it's a well-known import
+ // (we clone it to make sure we're not sharing state with other
+ // parsers, which could result in unsafe races if multiple
+ // parsers are trying to access it concurrently)
+ result = &parseResult{fd: proto.Clone(d).(*dpb.FileDescriptorProto)}
+ } else {
+ if !strings.Contains(err.Error(), filename) {
+ // an error message that doesn't indicate the file is awful!
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ if _, ok := err.(ErrorWithPos); !ok && importLoc != nil {
+ // error has no source position? report it as the import line
+ err = ErrorWithSourcePos{
+ Pos: importLoc,
+ Underlying: err,
+ }
+ }
+ _ = errs.handleError(err)
+ return
+ }
+
+ parsed.add(filename, result)
+
+ if errs.getError() != nil {
+ return // abort
+ }
+
+ if recursive {
+ fd := result.fd
+ decl := result.getFileNode(fd)
+ fnode, ok := decl.(*fileNode)
+ if !ok {
+ // no AST for this file? use imports in descriptor
+ for _, dep := range fd.Dependency {
+ parseProtoFile(acc, dep, decl.start(), errs, true, validate, parsed)
+ if errs.getError() != nil {
+ return // abort
+ }
+ }
+ return
+ }
+ // we have an AST; use it so we can report import location in errors
+ for _, dep := range fnode.imports {
+ parseProtoFile(acc, dep.name.val, dep.name.start(), errs, true, validate, parsed)
+ if errs.getError() != nil {
+ return // abort
+ }
+ }
+ }
+}
+
+type parseResults struct {
+ resultsByFilename map[string]*parseResult
+ filenames []string
+}
+
+func (r *parseResults) has(filename string) bool {
+ _, ok := r.resultsByFilename[filename]
+ return ok
+}
+
+func (r *parseResults) add(filename string, result *parseResult) {
+ r.resultsByFilename[filename] = result
+ r.filenames = append(r.filenames, filename)
+}
+
+type parseResult struct {
+ // handles any errors encountered during parsing, construction of file descriptor,
+ // or validation
+ errs *errorHandler
+
+ // the parsed file descriptor
+ fd *dpb.FileDescriptorProto
+
+ // if set to true, enables lenient interpretation of options, where
+ // unrecognized options will be left uninterpreted instead of resulting in a
+ // link error
+ lenient bool
+
+ // a map of elements in the descriptor to nodes in the AST
+ // (for extracting position information when validating the descriptor)
+ nodes map[proto.Message]node
+
+ // a map of uninterpreted option AST nodes to their relative path
+ // in the resulting options message
+ interpretedOptions map[*optionNode][]int32
+}
+
+func (r *parseResult) getFileNode(f *dpb.FileDescriptorProto) fileDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(f.GetName())}
+ }
+ return r.nodes[f].(fileDecl)
+}
+
+func (r *parseResult) getOptionNode(o *dpb.UninterpretedOption) optionDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[o].(optionDecl)
+}
+
+func (r *parseResult) getOptionNamePartNode(o *dpb.UninterpretedOption_NamePart) node {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[o]
+}
+
+func (r *parseResult) getFieldNode(f *dpb.FieldDescriptorProto) fieldDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[f].(fieldDecl)
+}
+
+func (r *parseResult) getExtensionRangeNode(e *dpb.DescriptorProto_ExtensionRange) rangeDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[e].(rangeDecl)
+}
+
+func (r *parseResult) getMessageReservedRangeNode(rr *dpb.DescriptorProto_ReservedRange) rangeDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[rr].(rangeDecl)
+}
+
+func (r *parseResult) getEnumValueNode(e *dpb.EnumValueDescriptorProto) enumValueDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[e].(enumValueDecl)
+}
+
+func (r *parseResult) getEnumReservedRangeNode(rr *dpb.EnumDescriptorProto_EnumReservedRange) rangeDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[rr].(rangeDecl)
+}
+
+func (r *parseResult) getMethodNode(m *dpb.MethodDescriptorProto) methodDecl {
+ if r.nodes == nil {
+ return noSourceNode{pos: unknownPos(r.fd.GetName())}
+ }
+ return r.nodes[m].(methodDecl)
+}
+
+func (r *parseResult) putFileNode(f *dpb.FileDescriptorProto, n *fileNode) {
+ r.nodes[f] = n
+}
+
+func (r *parseResult) putOptionNode(o *dpb.UninterpretedOption, n *optionNode) {
+ r.nodes[o] = n
+}
+
+func (r *parseResult) putOptionNamePartNode(o *dpb.UninterpretedOption_NamePart, n *optionNamePartNode) {
+ r.nodes[o] = n
+}
+
+func (r *parseResult) putMessageNode(m *dpb.DescriptorProto, n msgDecl) {
+ r.nodes[m] = n
+}
+
+func (r *parseResult) putFieldNode(f *dpb.FieldDescriptorProto, n fieldDecl) {
+ r.nodes[f] = n
+}
+
+func (r *parseResult) putOneOfNode(o *dpb.OneofDescriptorProto, n *oneOfNode) {
+ r.nodes[o] = n
+}
+
+func (r *parseResult) putExtensionRangeNode(e *dpb.DescriptorProto_ExtensionRange, n *rangeNode) {
+ r.nodes[e] = n
+}
+
+func (r *parseResult) putMessageReservedRangeNode(rr *dpb.DescriptorProto_ReservedRange, n *rangeNode) {
+ r.nodes[rr] = n
+}
+
+func (r *parseResult) putEnumNode(e *dpb.EnumDescriptorProto, n *enumNode) {
+ r.nodes[e] = n
+}
+
+func (r *parseResult) putEnumValueNode(e *dpb.EnumValueDescriptorProto, n *enumValueNode) {
+ r.nodes[e] = n
+}
+
+func (r *parseResult) putEnumReservedRangeNode(rr *dpb.EnumDescriptorProto_EnumReservedRange, n *rangeNode) {
+ r.nodes[rr] = n
+}
+
+func (r *parseResult) putServiceNode(s *dpb.ServiceDescriptorProto, n *serviceNode) {
+ r.nodes[s] = n
+}
+
+func (r *parseResult) putMethodNode(m *dpb.MethodDescriptorProto, n *methodNode) {
+ r.nodes[m] = n
+}
+
+func parseProto(filename string, r io.Reader, errs *errorHandler, validate bool) *parseResult {
+ lx := newLexer(r, filename, errs)
+ protoParse(lx)
+
+ res := createParseResult(filename, lx.res, errs)
+ if validate {
+ basicValidate(res)
+ }
+
+ return res
+}
+
+func createParseResult(filename string, file *fileNode, errs *errorHandler) *parseResult {
+ res := &parseResult{
+ errs: errs,
+ nodes: map[proto.Message]node{},
+ interpretedOptions: map[*optionNode][]int32{},
+ }
+ if file == nil {
+ // nil AST means there was an error that prevented any parsing
+ // or the file was empty; synthesize empty non-nil AST
+ file = &fileNode{}
+ n := noSourceNode{pos: unknownPos(filename)}
+ file.setRange(&n, &n)
+ }
+ res.createFileDescriptor(filename, file)
+ return res
+}
+
+func (r *parseResult) createFileDescriptor(filename string, file *fileNode) {
+ fd := &dpb.FileDescriptorProto{Name: proto.String(filename)}
+ r.fd = fd
+ r.putFileNode(fd, file)
+
+ isProto3 := false
+ if file.syntax != nil {
+ isProto3 = file.syntax.syntax.val == "proto3"
+ // proto2 is the default, so no need to set unless proto3
+ if isProto3 {
+ fd.Syntax = proto.String(file.syntax.syntax.val)
+ }
+ }
+
+ for _, decl := range file.decls {
+ if decl.enum != nil {
+ fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl.enum))
+ } else if decl.extend != nil {
+ r.addExtensions(decl.extend, &fd.Extension, &fd.MessageType, isProto3)
+ } else if decl.imp != nil {
+ file.imports = append(file.imports, decl.imp)
+ index := len(fd.Dependency)
+ fd.Dependency = append(fd.Dependency, decl.imp.name.val)
+ if decl.imp.public {
+ fd.PublicDependency = append(fd.PublicDependency, int32(index))
+ } else if decl.imp.weak {
+ fd.WeakDependency = append(fd.WeakDependency, int32(index))
+ }
+ } else if decl.message != nil {
+ fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl.message, isProto3))
+ } else if decl.option != nil {
+ if fd.Options == nil {
+ fd.Options = &dpb.FileOptions{}
+ }
+ fd.Options.UninterpretedOption = append(fd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.service != nil {
+ fd.Service = append(fd.Service, r.asServiceDescriptor(decl.service))
+ } else if decl.pkg != nil {
+ if fd.Package != nil {
+ if r.errs.handleError(ErrorWithSourcePos{Pos: decl.pkg.start(), Underlying: errors.New("files should have only one package declaration")}) != nil {
+ return
+ }
+ }
+ fd.Package = proto.String(decl.pkg.name.val)
+ }
+ }
+}
+
+func (r *parseResult) asUninterpretedOptions(nodes []*optionNode) []*dpb.UninterpretedOption {
+ if len(nodes) == 0 {
+ return nil
+ }
+ opts := make([]*dpb.UninterpretedOption, len(nodes))
+ for i, n := range nodes {
+ opts[i] = r.asUninterpretedOption(n)
+ }
+ return opts
+}
+
+func (r *parseResult) asUninterpretedOption(node *optionNode) *dpb.UninterpretedOption {
+ opt := &dpb.UninterpretedOption{Name: r.asUninterpretedOptionName(node.name.parts)}
+ r.putOptionNode(opt, node)
+
+ switch val := node.val.value().(type) {
+ case bool:
+ if val {
+ opt.IdentifierValue = proto.String("true")
+ } else {
+ opt.IdentifierValue = proto.String("false")
+ }
+ case int64:
+ opt.NegativeIntValue = proto.Int64(val)
+ case uint64:
+ opt.PositiveIntValue = proto.Uint64(val)
+ case float64:
+ opt.DoubleValue = proto.Float64(val)
+ case string:
+ opt.StringValue = []byte(val)
+ case identifier:
+ opt.IdentifierValue = proto.String(string(val))
+ case []*aggregateEntryNode:
+ var buf bytes.Buffer
+ aggToString(val, &buf)
+ aggStr := buf.String()
+ opt.AggregateValue = proto.String(aggStr)
+ }
+ return opt
+}
+
+func (r *parseResult) asUninterpretedOptionName(parts []*optionNamePartNode) []*dpb.UninterpretedOption_NamePart {
+ ret := make([]*dpb.UninterpretedOption_NamePart, len(parts))
+ for i, part := range parts {
+ txt := part.text.val
+ if !part.isExtension {
+ txt = part.text.val[part.offset : part.offset+part.length]
+ }
+ np := &dpb.UninterpretedOption_NamePart{
+ NamePart: proto.String(txt),
+ IsExtension: proto.Bool(part.isExtension),
+ }
+ r.putOptionNamePartNode(np, part)
+ ret[i] = np
+ }
+ return ret
+}
+
+func (r *parseResult) addExtensions(ext *extendNode, flds *[]*dpb.FieldDescriptorProto, msgs *[]*dpb.DescriptorProto, isProto3 bool) {
+ extendee := ext.extendee.val
+ for _, decl := range ext.decls {
+ if decl.field != nil {
+ decl.field.extendee = ext
+ fd := r.asFieldDescriptor(decl.field)
+ fd.Extendee = proto.String(extendee)
+ *flds = append(*flds, fd)
+ } else if decl.group != nil {
+ decl.group.extendee = ext
+ fd, md := r.asGroupDescriptors(decl.group, isProto3)
+ fd.Extendee = proto.String(extendee)
+ *flds = append(*flds, fd)
+ *msgs = append(*msgs, md)
+ }
+ }
+}
+
+func asLabel(lbl *fieldLabel) *dpb.FieldDescriptorProto_Label {
+ if lbl.identNode == nil {
+ return nil
+ }
+ switch {
+ case lbl.repeated:
+ return dpb.FieldDescriptorProto_LABEL_REPEATED.Enum()
+ case lbl.required:
+ return dpb.FieldDescriptorProto_LABEL_REQUIRED.Enum()
+ default:
+ return dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+}
+
+func (r *parseResult) asFieldDescriptor(node *fieldNode) *dpb.FieldDescriptorProto {
+ fd := newFieldDescriptor(node.name.val, node.fldType.val, int32(node.tag.val), asLabel(&node.label))
+ r.putFieldNode(fd, node)
+ if opts := node.options.Elements(); len(opts) > 0 {
+ fd.Options = &dpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
+ }
+ return fd
+}
+
+var fieldTypes = map[string]dpb.FieldDescriptorProto_Type{
+ "double": dpb.FieldDescriptorProto_TYPE_DOUBLE,
+ "float": dpb.FieldDescriptorProto_TYPE_FLOAT,
+ "int32": dpb.FieldDescriptorProto_TYPE_INT32,
+ "int64": dpb.FieldDescriptorProto_TYPE_INT64,
+ "uint32": dpb.FieldDescriptorProto_TYPE_UINT32,
+ "uint64": dpb.FieldDescriptorProto_TYPE_UINT64,
+ "sint32": dpb.FieldDescriptorProto_TYPE_SINT32,
+ "sint64": dpb.FieldDescriptorProto_TYPE_SINT64,
+ "fixed32": dpb.FieldDescriptorProto_TYPE_FIXED32,
+ "fixed64": dpb.FieldDescriptorProto_TYPE_FIXED64,
+ "sfixed32": dpb.FieldDescriptorProto_TYPE_SFIXED32,
+ "sfixed64": dpb.FieldDescriptorProto_TYPE_SFIXED64,
+ "bool": dpb.FieldDescriptorProto_TYPE_BOOL,
+ "string": dpb.FieldDescriptorProto_TYPE_STRING,
+ "bytes": dpb.FieldDescriptorProto_TYPE_BYTES,
+}
+
+func newFieldDescriptor(name string, fieldType string, tag int32, lbl *dpb.FieldDescriptorProto_Label) *dpb.FieldDescriptorProto {
+ fd := &dpb.FieldDescriptorProto{
+ Name: proto.String(name),
+ JsonName: proto.String(internal.JsonName(name)),
+ Number: proto.Int32(tag),
+ Label: lbl,
+ }
+ t, ok := fieldTypes[fieldType]
+ if ok {
+ fd.Type = t.Enum()
+ } else {
+ // NB: we don't have enough info to determine whether this is an enum
+ // or a message type, so we'll leave Type nil and set it later
+ // (during linking)
+ fd.TypeName = proto.String(fieldType)
+ }
+ return fd
+}
+
+func (r *parseResult) asGroupDescriptors(group *groupNode, isProto3 bool) (*dpb.FieldDescriptorProto, *dpb.DescriptorProto) {
+ fieldName := strings.ToLower(group.name.val)
+ fd := &dpb.FieldDescriptorProto{
+ Name: proto.String(fieldName),
+ JsonName: proto.String(internal.JsonName(fieldName)),
+ Number: proto.Int32(int32(group.tag.val)),
+ Label: asLabel(&group.label),
+ Type: dpb.FieldDescriptorProto_TYPE_GROUP.Enum(),
+ TypeName: proto.String(group.name.val),
+ }
+ r.putFieldNode(fd, group)
+ md := &dpb.DescriptorProto{Name: proto.String(group.name.val)}
+ r.putMessageNode(md, group)
+ r.addMessageDecls(md, group.decls, isProto3)
+ return fd, md
+}
+
+func (r *parseResult) asMapDescriptors(mapField *mapFieldNode, isProto3 bool) (*dpb.FieldDescriptorProto, *dpb.DescriptorProto) {
+ var lbl *dpb.FieldDescriptorProto_Label
+ if !isProto3 {
+ lbl = dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+ keyFd := newFieldDescriptor("key", mapField.mapType.keyType.val, 1, lbl)
+ r.putFieldNode(keyFd, mapField.keyField())
+ valFd := newFieldDescriptor("value", mapField.mapType.valueType.val, 2, lbl)
+ r.putFieldNode(valFd, mapField.valueField())
+ entryName := internal.InitCap(internal.JsonName(mapField.name.val)) + "Entry"
+ fd := newFieldDescriptor(mapField.name.val, entryName, int32(mapField.tag.val), dpb.FieldDescriptorProto_LABEL_REPEATED.Enum())
+ if opts := mapField.options.Elements(); len(opts) > 0 {
+ fd.Options = &dpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
+ }
+ r.putFieldNode(fd, mapField)
+ md := &dpb.DescriptorProto{
+ Name: proto.String(entryName),
+ Options: &dpb.MessageOptions{MapEntry: proto.Bool(true)},
+ Field: []*dpb.FieldDescriptorProto{keyFd, valFd},
+ }
+ r.putMessageNode(md, mapField)
+ return fd, md
+}
+
+func (r *parseResult) asExtensionRanges(node *extensionRangeNode) []*dpb.DescriptorProto_ExtensionRange {
+ opts := r.asUninterpretedOptions(node.options.Elements())
+ ers := make([]*dpb.DescriptorProto_ExtensionRange, len(node.ranges))
+ for i, rng := range node.ranges {
+ er := &dpb.DescriptorProto_ExtensionRange{
+ Start: proto.Int32(rng.st),
+ End: proto.Int32(rng.en + 1),
+ }
+ if len(opts) > 0 {
+ er.Options = &dpb.ExtensionRangeOptions{UninterpretedOption: opts}
+ }
+ r.putExtensionRangeNode(er, rng)
+ ers[i] = er
+ }
+ return ers
+}
+
+func (r *parseResult) asEnumValue(ev *enumValueNode) *dpb.EnumValueDescriptorProto {
+ num := int32(ev.number.val)
+ evd := &dpb.EnumValueDescriptorProto{Name: proto.String(ev.name.val), Number: proto.Int32(num)}
+ r.putEnumValueNode(evd, ev)
+ if opts := ev.options.Elements(); len(opts) > 0 {
+ evd.Options = &dpb.EnumValueOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
+ }
+ return evd
+}
+
+func (r *parseResult) asMethodDescriptor(node *methodNode) *dpb.MethodDescriptorProto {
+ md := &dpb.MethodDescriptorProto{
+ Name: proto.String(node.name.val),
+ InputType: proto.String(node.input.msgType.val),
+ OutputType: proto.String(node.output.msgType.val),
+ }
+ r.putMethodNode(md, node)
+ if node.input.streamKeyword != nil {
+ md.ClientStreaming = proto.Bool(true)
+ }
+ if node.output.streamKeyword != nil {
+ md.ServerStreaming = proto.Bool(true)
+ }
+ // protoc always adds a MethodOptions if there are brackets
+ // We have a non-nil node.options if there are brackets
+ // We do the same to match protoc as closely as possible
+ // https://github.com/protocolbuffers/protobuf/blob/0c3f43a6190b77f1f68b7425d1b7e1a8257a8d0c/src/google/protobuf/compiler/parser.cc#L2152
+ if node.options != nil {
+ md.Options = &dpb.MethodOptions{UninterpretedOption: r.asUninterpretedOptions(node.options)}
+ }
+ return md
+}
+
+func (r *parseResult) asEnumDescriptor(en *enumNode) *dpb.EnumDescriptorProto {
+ ed := &dpb.EnumDescriptorProto{Name: proto.String(en.name.val)}
+ r.putEnumNode(ed, en)
+ for _, decl := range en.decls {
+ if decl.option != nil {
+ if ed.Options == nil {
+ ed.Options = &dpb.EnumOptions{}
+ }
+ ed.Options.UninterpretedOption = append(ed.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.value != nil {
+ ed.Value = append(ed.Value, r.asEnumValue(decl.value))
+ } else if decl.reserved != nil {
+ for _, n := range decl.reserved.names {
+ ed.ReservedName = append(ed.ReservedName, n.val)
+ }
+ for _, rng := range decl.reserved.ranges {
+ ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng))
+ }
+ }
+ }
+ return ed
+}
+
+func (r *parseResult) asEnumReservedRange(rng *rangeNode) *dpb.EnumDescriptorProto_EnumReservedRange {
+ rr := &dpb.EnumDescriptorProto_EnumReservedRange{
+ Start: proto.Int32(rng.st),
+ End: proto.Int32(rng.en),
+ }
+ r.putEnumReservedRangeNode(rr, rng)
+ return rr
+}
+
+func (r *parseResult) asMessageDescriptor(node *messageNode, isProto3 bool) *dpb.DescriptorProto {
+ msgd := &dpb.DescriptorProto{Name: proto.String(node.name.val)}
+ r.putMessageNode(msgd, node)
+ r.addMessageDecls(msgd, node.decls, isProto3)
+ return msgd
+}
+
+func (r *parseResult) addMessageDecls(msgd *dpb.DescriptorProto, decls []*messageElement, isProto3 bool) {
+ for _, decl := range decls {
+ if decl.enum != nil {
+ msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl.enum))
+ } else if decl.extend != nil {
+ r.addExtensions(decl.extend, &msgd.Extension, &msgd.NestedType, isProto3)
+ } else if decl.extensionRange != nil {
+ msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl.extensionRange)...)
+ } else if decl.field != nil {
+ msgd.Field = append(msgd.Field, r.asFieldDescriptor(decl.field))
+ } else if decl.mapField != nil {
+ fd, md := r.asMapDescriptors(decl.mapField, isProto3)
+ msgd.Field = append(msgd.Field, fd)
+ msgd.NestedType = append(msgd.NestedType, md)
+ } else if decl.group != nil {
+ fd, md := r.asGroupDescriptors(decl.group, isProto3)
+ msgd.Field = append(msgd.Field, fd)
+ msgd.NestedType = append(msgd.NestedType, md)
+ } else if decl.oneOf != nil {
+ oodIndex := len(msgd.OneofDecl)
+ ood := &dpb.OneofDescriptorProto{Name: proto.String(decl.oneOf.name.val)}
+ r.putOneOfNode(ood, decl.oneOf)
+ msgd.OneofDecl = append(msgd.OneofDecl, ood)
+ for _, oodecl := range decl.oneOf.decls {
+ if oodecl.option != nil {
+ if ood.Options == nil {
+ ood.Options = &dpb.OneofOptions{}
+ }
+ ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl.option))
+ } else if oodecl.field != nil {
+ fd := r.asFieldDescriptor(oodecl.field)
+ fd.OneofIndex = proto.Int32(int32(oodIndex))
+ msgd.Field = append(msgd.Field, fd)
+ } else if oodecl.group != nil {
+ fd, md := r.asGroupDescriptors(oodecl.group, isProto3)
+ fd.OneofIndex = proto.Int32(int32(oodIndex))
+ msgd.Field = append(msgd.Field, fd)
+ msgd.NestedType = append(msgd.NestedType, md)
+ }
+ }
+ } else if decl.option != nil {
+ if msgd.Options == nil {
+ msgd.Options = &dpb.MessageOptions{}
+ }
+ msgd.Options.UninterpretedOption = append(msgd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.nested != nil {
+ msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl.nested, isProto3))
+ } else if decl.reserved != nil {
+ for _, n := range decl.reserved.names {
+ msgd.ReservedName = append(msgd.ReservedName, n.val)
+ }
+ for _, rng := range decl.reserved.ranges {
+ msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng))
+ }
+ }
+ }
+}
+
+func (r *parseResult) asMessageReservedRange(rng *rangeNode) *dpb.DescriptorProto_ReservedRange {
+ rr := &dpb.DescriptorProto_ReservedRange{
+ Start: proto.Int32(rng.st),
+ End: proto.Int32(rng.en + 1),
+ }
+ r.putMessageReservedRangeNode(rr, rng)
+ return rr
+}
+
+func (r *parseResult) asServiceDescriptor(svc *serviceNode) *dpb.ServiceDescriptorProto {
+ sd := &dpb.ServiceDescriptorProto{Name: proto.String(svc.name.val)}
+ r.putServiceNode(sd, svc)
+ for _, decl := range svc.decls {
+ if decl.option != nil {
+ if sd.Options == nil {
+ sd.Options = &dpb.ServiceOptions{}
+ }
+ sd.Options.UninterpretedOption = append(sd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+ } else if decl.rpc != nil {
+ sd.Method = append(sd.Method, r.asMethodDescriptor(decl.rpc))
+ }
+ }
+ return sd
+}
+
+func toNameParts(ident *compoundIdentNode, offset int) []*optionNamePartNode {
+ parts := strings.Split(ident.val[offset:], ".")
+ ret := make([]*optionNamePartNode, len(parts))
+ for i, p := range parts {
+ ret[i] = &optionNamePartNode{text: ident, offset: offset, length: len(p)}
+ ret[i].setRange(ident, ident)
+ offset += len(p) + 1
+ }
+ return ret
+}
+
+func checkUint64InInt32Range(lex protoLexer, pos *SourcePos, v uint64) {
+ if v > math.MaxInt32 {
+ lexError(lex, pos, fmt.Sprintf("constant %d is out of range for int32 (%d to %d)", v, math.MinInt32, math.MaxInt32))
+ }
+}
+
+func checkInt64InInt32Range(lex protoLexer, pos *SourcePos, v int64) {
+ if v > math.MaxInt32 || v < math.MinInt32 {
+ lexError(lex, pos, fmt.Sprintf("constant %d is out of range for int32 (%d to %d)", v, math.MinInt32, math.MaxInt32))
+ }
+}
+
+func checkTag(lex protoLexer, pos *SourcePos, v uint64) {
+ if v < 1 {
+ lexError(lex, pos, fmt.Sprintf("tag number %d must be greater than zero", v))
+ } else if v > internal.MaxTag {
+ lexError(lex, pos, fmt.Sprintf("tag number %d is higher than max allowed tag number (%d)", v, internal.MaxTag))
+ } else if v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd {
+ lexError(lex, pos, fmt.Sprintf("tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd))
+ }
+}
+
+func aggToString(agg []*aggregateEntryNode, buf *bytes.Buffer) {
+ buf.WriteString("{")
+ for _, a := range agg {
+ buf.WriteString(" ")
+ buf.WriteString(a.name.value())
+ if v, ok := a.val.(*aggregateLiteralNode); ok {
+ aggToString(v.elements, buf)
+ } else {
+ buf.WriteString(": ")
+ elementToString(a.val.value(), buf)
+ }
+ }
+ buf.WriteString(" }")
+}
+
+func elementToString(v interface{}, buf *bytes.Buffer) {
+ switch v := v.(type) {
+ case bool, int64, uint64, identifier:
+ fmt.Fprintf(buf, "%v", v)
+ case float64:
+ if math.IsInf(v, 1) {
+ buf.WriteString(": inf")
+ } else if math.IsInf(v, -1) {
+ buf.WriteString(": -inf")
+ } else if math.IsNaN(v) {
+ buf.WriteString(": nan")
+ } else {
+ fmt.Fprintf(buf, ": %v", v)
+ }
+ case string:
+ buf.WriteRune('"')
+ writeEscapedBytes(buf, []byte(v))
+ buf.WriteRune('"')
+ case []valueNode:
+ buf.WriteString(": [")
+ first := true
+ for _, e := range v {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(", ")
+ }
+ elementToString(e.value(), buf)
+ }
+ buf.WriteString("]")
+ case []*aggregateEntryNode:
+ aggToString(v, buf)
+ }
+}
+
+func writeEscapedBytes(buf *bytes.Buffer, b []byte) {
+ for _, c := range b {
+ switch c {
+ case '\n':
+ buf.WriteString("\\n")
+ case '\r':
+ buf.WriteString("\\r")
+ case '\t':
+ buf.WriteString("\\t")
+ case '"':
+ buf.WriteString("\\\"")
+ case '\'':
+ buf.WriteString("\\'")
+ case '\\':
+ buf.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c <= 0x7f && c != '"' && c != '\\' {
+ // simple printable characters
+ buf.WriteByte(c)
+ } else {
+ // use octal escape for all other values
+ buf.WriteRune('\\')
+ buf.WriteByte('0' + ((c >> 6) & 0x7))
+ buf.WriteByte('0' + ((c >> 3) & 0x7))
+ buf.WriteByte('0' + (c & 0x7))
+ }
+ }
+ }
+}
+
+func basicValidate(res *parseResult) {
+ fd := res.fd
+ isProto3 := fd.GetSyntax() == "proto3"
+
+ for _, md := range fd.MessageType {
+ if validateMessage(res, isProto3, "", md) != nil {
+ return
+ }
+ }
+
+ for _, ed := range fd.EnumType {
+ if validateEnum(res, isProto3, "", ed) != nil {
+ return
+ }
+ }
+
+ for _, fld := range fd.Extension {
+ if validateField(res, isProto3, "", fld) != nil {
+ return
+ }
+ }
+}
+
+func validateMessage(res *parseResult, isProto3 bool, prefix string, md *dpb.DescriptorProto) error {
+ nextPrefix := md.GetName() + "."
+
+ for _, fld := range md.Field {
+ if err := validateField(res, isProto3, nextPrefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.Extension {
+ if err := validateField(res, isProto3, nextPrefix, fld); err != nil {
+ return err
+ }
+ }
+ for _, ed := range md.EnumType {
+ if err := validateEnum(res, isProto3, nextPrefix, ed); err != nil {
+ return err
+ }
+ }
+ for _, nmd := range md.NestedType {
+ if err := validateMessage(res, isProto3, nextPrefix, nmd); err != nil {
+ return err
+ }
+ }
+
+ scope := fmt.Sprintf("message %s%s", prefix, md.GetName())
+
+ if isProto3 && len(md.ExtensionRange) > 0 {
+ n := res.getExtensionRangeNode(md.ExtensionRange[0])
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: n.start(), Underlying: fmt.Errorf("%s: extension ranges are not allowed in proto3", scope)}); err != nil {
+ return err
+ }
+ }
+
+ if index, err := findOption(res, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil {
+ if err := res.errs.handleError(err); err != nil {
+ return err
+ }
+ } else if index >= 0 {
+ opt := md.Options.UninterpretedOption[index]
+ optn := res.getOptionNode(opt)
+ md.Options.UninterpretedOption = removeOption(md.Options.UninterpretedOption, index)
+ valid := false
+ if opt.IdentifierValue != nil {
+ if opt.GetIdentifierValue() == "true" {
+ valid = true
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: optn.getValue().start(), Underlying: fmt.Errorf("%s: map_entry option should not be set explicitly; use map type instead", scope)}); err != nil {
+ return err
+ }
+ } else if opt.GetIdentifierValue() == "false" {
+ valid = true
+ md.Options.MapEntry = proto.Bool(false)
+ }
+ }
+ if !valid {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: optn.getValue().start(), Underlying: fmt.Errorf("%s: expecting bool value for map_entry option", scope)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ // reserved ranges should not overlap
+ rsvd := make(tagRanges, len(md.ReservedRange))
+ for i, r := range md.ReservedRange {
+ n := res.getMessageReservedRangeNode(r)
+ rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+
+ }
+ sort.Sort(rsvd)
+ for i := 1; i < len(rsvd); i++ {
+ if rsvd[i].start < rsvd[i-1].end {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: rsvd[i].node.start(), Underlying: fmt.Errorf("%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ // extensions ranges should not overlap
+ exts := make(tagRanges, len(md.ExtensionRange))
+ for i, r := range md.ExtensionRange {
+ n := res.getExtensionRangeNode(r)
+ exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+ }
+ sort.Sort(exts)
+ for i := 1; i < len(exts); i++ {
+ if exts[i].start < exts[i-1].end {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: exts[i].node.start(), Underlying: fmt.Errorf("%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ // see if any extension range overlaps any reserved range
+ var i, j int // i indexes rsvd; j indexes exts
+ for i < len(rsvd) && j < len(exts) {
+ if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end ||
+ exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end {
+
+ var pos *SourcePos
+ if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end {
+ pos = rsvd[i].node.start()
+ } else {
+ pos = exts[j].node.start()
+ }
+ // ranges overlap
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: pos, Underlying: fmt.Errorf("%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1)}); err != nil {
+ return err
+ }
+ }
+ if rsvd[i].start < exts[j].start {
+ i++
+ } else {
+ j++
+ }
+ }
+
+ // now, check that fields don't re-use tags and don't try to use extension
+ // or reserved ranges or reserved names
+ rsvdNames := map[string]struct{}{}
+ for _, n := range md.ReservedName {
+ rsvdNames[n] = struct{}{}
+ }
+ fieldTags := map[int32]string{}
+ for _, fld := range md.Field {
+ fn := res.getFieldNode(fld)
+ if _, ok := rsvdNames[fld.GetName()]; ok {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: fn.fieldName().start(), Underlying: fmt.Errorf("%s: field %s is using a reserved name", scope, fld.GetName())}); err != nil {
+ return err
+ }
+ }
+ if existing := fieldTags[fld.GetNumber()]; existing != "" {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber())}); err != nil {
+ return err
+ }
+ }
+ fieldTags[fld.GetNumber()] = fld.GetName()
+ // check reserved ranges
+ r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() })
+ if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1)}); err != nil {
+ return err
+ }
+ }
+ // and check extension ranges
+ e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() })
+ if e < len(exts) && exts[e].start <= fld.GetNumber() {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func validateEnum(res *parseResult, isProto3 bool, prefix string, ed *dpb.EnumDescriptorProto) error {
+ scope := fmt.Sprintf("enum %s%s", prefix, ed.GetName())
+
+ allowAlias := false
+ if index, err := findOption(res, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil {
+ if err := res.errs.handleError(err); err != nil {
+ return err
+ }
+ } else if index >= 0 {
+ opt := ed.Options.UninterpretedOption[index]
+ valid := false
+ if opt.IdentifierValue != nil {
+ if opt.GetIdentifierValue() == "true" {
+ allowAlias = true
+ valid = true
+ } else if opt.GetIdentifierValue() == "false" {
+ valid = true
+ }
+ }
+ if !valid {
+ optNode := res.getOptionNode(opt)
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: optNode.getValue().start(), Underlying: fmt.Errorf("%s: expecting bool value for allow_alias option", scope)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ if isProto3 && ed.Value[0].GetNumber() != 0 {
+ evNode := res.getEnumValueNode(ed.Value[0])
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: evNode.getNumber().start(), Underlying: fmt.Errorf("%s: proto3 requires that first value in enum have numeric value of 0", scope)}); err != nil {
+ return err
+ }
+ }
+
+ if !allowAlias {
+ // make sure all value numbers are distinct
+ vals := map[int32]string{}
+ for _, evd := range ed.Value {
+ if existing := vals[evd.GetNumber()]; existing != "" {
+ evNode := res.getEnumValueNode(evd)
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: evNode.getNumber().start(), Underlying: fmt.Errorf("%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber())}); err != nil {
+ return err
+ }
+ }
+ vals[evd.GetNumber()] = evd.GetName()
+ }
+ }
+
+ // reserved ranges should not overlap
+ rsvd := make(tagRanges, len(ed.ReservedRange))
+ for i, r := range ed.ReservedRange {
+ n := res.getEnumReservedRangeNode(r)
+ rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+ }
+ sort.Sort(rsvd)
+ for i := 1; i < len(rsvd); i++ {
+ if rsvd[i].start <= rsvd[i-1].end {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: rsvd[i].node.start(), Underlying: fmt.Errorf("%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ // now, check that fields don't re-use tags and don't try to use extension
+ // or reserved ranges or reserved names
+ rsvdNames := map[string]struct{}{}
+ for _, n := range ed.ReservedName {
+ rsvdNames[n] = struct{}{}
+ }
+ for _, ev := range ed.Value {
+ evn := res.getEnumValueNode(ev)
+ if _, ok := rsvdNames[ev.GetName()]; ok {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: evn.getName().start(), Underlying: fmt.Errorf("%s: value %s is using a reserved name", scope, ev.GetName())}); err != nil {
+ return err
+ }
+ }
+ // check reserved ranges
+ r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() })
+ if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: evn.getNumber().start(), Underlying: fmt.Errorf("%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func validateField(res *parseResult, isProto3 bool, prefix string, fld *dpb.FieldDescriptorProto) error {
+ scope := fmt.Sprintf("field %s%s", prefix, fld.GetName())
+
+ node := res.getFieldNode(fld)
+ if isProto3 {
+ if fld.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+ n := node.(*groupNode)
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: n.groupKeyword.start(), Underlying: fmt.Errorf("%s: groups are not allowed in proto3", scope)}); err != nil {
+ return err
+ }
+ } else if fld.Label != nil && fld.GetLabel() != dpb.FieldDescriptorProto_LABEL_REPEATED {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: node.fieldLabel().start(), Underlying: fmt.Errorf("%s: field has label %v, but proto3 must omit labels other than 'repeated'", scope, fld.GetLabel())}); err != nil {
+ return err
+ }
+ }
+ if index, err := findOption(res, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil {
+ if err := res.errs.handleError(err); err != nil {
+ return err
+ }
+ } else if index >= 0 {
+ optNode := res.getOptionNode(fld.Options.GetUninterpretedOption()[index])
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default values are not allowed in proto3", scope)}); err != nil {
+ return err
+ }
+ }
+ } else {
+ if fld.Label == nil && fld.OneofIndex == nil {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: node.fieldName().start(), Underlying: fmt.Errorf("%s: field has no label, but proto2 must indicate 'optional' or 'required'", scope)}); err != nil {
+ return err
+ }
+ }
+ if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED {
+ if err := res.errs.handleError(ErrorWithSourcePos{Pos: node.fieldLabel().start(), Underlying: fmt.Errorf("%s: extension fields cannot be 'required'", scope)}); err != nil {
+ return err
+ }
+ }
+ }
+
+ // finally, set any missing label to optional
+ if fld.Label == nil {
+ fld.Label = dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+
+ return nil
+}
+
+func findOption(res *parseResult, scope string, opts []*dpb.UninterpretedOption, name string) (int, error) {
+ found := -1
+ for i, opt := range opts {
+ if len(opt.Name) != 1 {
+ continue
+ }
+ if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name {
+ continue
+ }
+ if found >= 0 {
+ optNode := res.getOptionNode(opt)
+ return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: option %s cannot be defined more than once", scope, name)}
+ }
+ found = i
+ }
+ return found, nil
+}
+
+func removeOption(uo []*dpb.UninterpretedOption, indexToRemove int) []*dpb.UninterpretedOption {
+ if indexToRemove == 0 {
+ return uo[1:]
+ } else if int(indexToRemove) == len(uo)-1 {
+ return uo[:len(uo)-1]
+ } else {
+ return append(uo[:indexToRemove], uo[indexToRemove+1:]...)
+ }
+}
+
+type tagRange struct {
+ start int32
+ end int32
+ node rangeDecl
+}
+
+type tagRanges []tagRange
+
+func (r tagRanges) Len() int {
+ return len(r)
+}
+
+func (r tagRanges) Less(i, j int) bool {
+ return r[i].start < r[j].start ||
+ (r[i].start == r[j].start && r[i].end < r[j].end)
+}
+
+func (r tagRanges) Swap(i, j int) {
+ r[i], r[j] = r[j], r[i]
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y
new file mode 100644
index 0000000..a795fe7
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y
@@ -0,0 +1,1005 @@
+%{
+package protoparse
+
+//lint:file-ignore SA4006 generated parser has unused values
+
+import (
+ "fmt"
+ "math"
+ "unicode"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+%}
+
+// fields inside this union end up as the fields in a structure known
+// as ${PREFIX}SymType, of which a reference is passed to the lexer.
+%union{
+ file *fileNode
+ fileDecls []*fileElement
+ syn *syntaxNode
+ pkg *packageNode
+ imprt *importNode
+ msg *messageNode
+ msgDecls []*messageElement
+ fld *fieldNode
+ mapFld *mapFieldNode
+ mapType *mapTypeNode
+ grp *groupNode
+ oo *oneOfNode
+ ooDecls []*oneOfElement
+ ext *extensionRangeNode
+ resvd *reservedNode
+ en *enumNode
+ enDecls []*enumElement
+ env *enumValueNode
+ extend *extendNode
+ extDecls []*extendElement
+ svc *serviceNode
+ svcDecls []*serviceElement
+ mtd *methodNode
+ rpcType *rpcTypeNode
+ opts []*optionNode
+ optNm []*optionNamePartNode
+ cmpctOpts *compactOptionsNode
+ rngs []*rangeNode
+ names []*compoundStringNode
+ cid *compoundIdentNode
+ sl []valueNode
+ agg []*aggregateEntryNode
+ aggName *aggregateNameNode
+ v valueNode
+ il *compoundIntNode
+ str *compoundStringNode
+ s *stringLiteralNode
+ i *intLiteralNode
+ f *floatLiteralNode
+ id *identNode
+ b *basicNode
+ err error
+}
+
+// any non-terminal which returns a value needs a type, which is
+// really a field name in the above union struct
+%type <file> file
+%type <syn> syntax
+%type <fileDecls> fileDecl fileDecls
+%type <imprt> import
+%type <pkg> package
+%type <opts> option compactOption compactOptionDecls rpcOption rpcOptions
+%type <optNm> optionName optionNameRest optionNameComponent
+%type <cmpctOpts> compactOptions
+%type <v> constant scalarConstant aggregate uintLit floatLit
+%type <il> intLit negIntLit
+%type <id> name keyType
+%type <cid> ident typeIdent
+%type <aggName> aggName
+%type <sl> constantList
+%type <agg> aggFields aggField aggFieldEntry
+%type <fld> field oneofField
+%type <oo> oneof
+%type <grp> group oneofGroup
+%type <mapFld> mapField
+%type <mapType> mapType
+%type <msg> message
+%type <msgDecls> messageItem messageBody
+%type <ooDecls> oneofItem oneofBody
+%type <names> fieldNames
+%type <resvd> msgReserved enumReserved reservedNames
+%type <rngs> tagRange tagRanges enumRange enumRanges
+%type <ext> extensions
+%type <en> enum
+%type <enDecls> enumItem enumBody
+%type <env> enumField
+%type <extend> extend
+%type <extDecls> extendItem extendBody
+%type <str> stringLit
+%type <svc> service
+%type <svcDecls> serviceItem serviceBody
+%type <mtd> rpc
+%type <rpcType> rpcType
+
+// same for terminals
+%token <s> _STRING_LIT
+%token <i> _INT_LIT
+%token <f> _FLOAT_LIT
+%token <id> _NAME
+%token <id> _SYNTAX _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED
+%token <id> _DOUBLE _FLOAT _INT32 _INT64 _UINT32 _UINT64 _SINT32 _SINT64 _FIXED32 _FIXED64 _SFIXED32 _SFIXED64
+%token <id> _BOOL _STRING _BYTES _GROUP _ONEOF _MAP _EXTENSIONS _TO _MAX _RESERVED _ENUM _MESSAGE _EXTEND
+%token <id> _SERVICE _RPC _STREAM _RETURNS
+%token <err> _ERROR
+// we define all of these, even ones that aren't used, to improve error messages
+// so it shows the unexpected symbol instead of showing "$unk"
+%token <b> '=' ';' ':' '{' '}' '\\' '/' '?' '.' ',' '>' '<' '+' '-' '(' ')' '[' ']' '*' '&' '^' '%' '$' '#' '@' '!' '~' '`'
+
+%%
+
+file : syntax {
+ $$ = &fileNode{syntax: $1}
+ $$.setRange($1, $1)
+ protolex.(*protoLex).res = $$
+ }
+ | fileDecls {
+ $$ = &fileNode{decls: $1}
+ if len($1) > 0 {
+ $$.setRange($1[0], $1[len($1)-1])
+ }
+ protolex.(*protoLex).res = $$
+ }
+ | syntax fileDecls {
+ $$ = &fileNode{syntax: $1, decls: $2}
+ var end node
+ if len($2) > 0 {
+ end = $2[len($2)-1]
+ } else {
+ end = $1
+ }
+ $$.setRange($1, end)
+ protolex.(*protoLex).res = $$
+ }
+ | {
+ }
+
+fileDecls : fileDecls fileDecl {
+ $$ = append($1, $2...)
+ }
+ | fileDecl
+
+fileDecl : import {
+ $$ = []*fileElement{{imp: $1}}
+ }
+ | package {
+ $$ = []*fileElement{{pkg: $1}}
+ }
+ | option {
+ $$ = []*fileElement{{option: $1[0]}}
+ }
+ | message {
+ $$ = []*fileElement{{message: $1}}
+ }
+ | enum {
+ $$ = []*fileElement{{enum: $1}}
+ }
+ | extend {
+ $$ = []*fileElement{{extend: $1}}
+ }
+ | service {
+ $$ = []*fileElement{{service: $1}}
+ }
+ | ';' {
+ $$ = []*fileElement{{empty: $1}}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+syntax : _SYNTAX '=' stringLit ';' {
+ if $3.val != "proto2" && $3.val != "proto3" {
+ lexError(protolex, $3.start(), "syntax value must be 'proto2' or 'proto3'")
+ }
+ $$ = &syntaxNode{syntax: $3}
+ $$.setRange($1, $4)
+ }
+
+import : _IMPORT stringLit ';' {
+ $$ = &importNode{ name: $2 }
+ $$.setRange($1, $3)
+ }
+ | _IMPORT _WEAK stringLit ';' {
+ $$ = &importNode{ name: $3, weak: true }
+ $$.setRange($1, $4)
+ }
+ | _IMPORT _PUBLIC stringLit ';' {
+ $$ = &importNode{ name: $3, public: true }
+ $$.setRange($1, $4)
+ }
+
+package : _PACKAGE ident ';' {
+ $$ = &packageNode{name: $2}
+ $$.setRange($1, $3)
+ }
+
+ident : name {
+ $$ = &compoundIdentNode{val: $1.val}
+ $$.setRange($1, $1)
+ }
+ | ident '.' name {
+ $$ = &compoundIdentNode{val: $1.val + "." + $3.val}
+ $$.setRange($1, $3)
+ }
+
+option : _OPTION optionName '=' constant ';' {
+ n := &optionNameNode{parts: $2}
+ n.setRange($2[0], $2[len($2)-1])
+ o := &optionNode{name: n, val: $4}
+ o.setRange($1, $5)
+ $$ = []*optionNode{o}
+ }
+
+optionName : ident {
+ $$ = toNameParts($1, 0)
+ }
+ | '(' typeIdent ')' {
+ p := &optionNamePartNode{text: $2, isExtension: true}
+ p.setRange($1, $3)
+ $$ = []*optionNamePartNode{p}
+ }
+ | '(' typeIdent ')' optionNameRest {
+ p := &optionNamePartNode{text: $2, isExtension: true}
+ p.setRange($1, $3)
+ ps := make([]*optionNamePartNode, 1, len($4)+1)
+ ps[0] = p
+ $$ = append(ps, $4...)
+ }
+
+optionNameRest : optionNameComponent
+ | optionNameComponent optionNameRest {
+ $$ = append($1, $2...)
+ }
+
+optionNameComponent : typeIdent {
+ $$ = toNameParts($1, 1 /* exclude leading dot */)
+ }
+ | '.' '(' typeIdent ')' {
+ p := &optionNamePartNode{text: $3, isExtension: true}
+ p.setRange($2, $4)
+ $$ = []*optionNamePartNode{p}
+ }
+
+constant : scalarConstant
+ | aggregate
+
+scalarConstant : stringLit {
+ $$ = $1
+ }
+ | uintLit
+ | negIntLit {
+ $$ = $1
+ }
+ | floatLit
+ | name {
+ if $1.val == "true" {
+ $$ = &boolLiteralNode{identNode: $1, val: true}
+ } else if $1.val == "false" {
+ $$ = &boolLiteralNode{identNode: $1, val: false}
+ } else if $1.val == "inf" {
+ f := &compoundFloatNode{val: math.Inf(1)}
+ f.setRange($1, $1)
+ $$ = f
+ } else if $1.val == "nan" {
+ f := &compoundFloatNode{val: math.NaN()}
+ f.setRange($1, $1)
+ $$ = f
+ } else {
+ $$ = $1
+ }
+ }
+
+uintLit : _INT_LIT {
+ i := &compoundUintNode{val: $1.val}
+ i.setRange($1, $1)
+ $$ = i
+ }
+ | '+' _INT_LIT {
+ i := &compoundUintNode{val: $2.val}
+ i.setRange($1, $2)
+ $$ = i
+ }
+
+negIntLit : '-' _INT_LIT {
+ if $2.val > math.MaxInt64 + 1 {
+ lexError(protolex, $2.start(), fmt.Sprintf("numeric constant %d would underflow (allowed range is %d to %d)", $2.val, int64(math.MinInt64), int64(math.MaxInt64)))
+ }
+ i := &compoundIntNode{val: -int64($2.val)}
+ i.setRange($1, $2)
+ $$ = i
+ }
+
+intLit : negIntLit
+ | _INT_LIT {
+ // we don't allow uintLit because this is for enum numeric vals, which don't allow '+'
+ checkUint64InInt32Range(protolex, $1.start(), $1.val)
+ i := &compoundIntNode{val: int64($1.val)}
+ i.setRange($1, $1)
+ $$ = i
+ }
+
+floatLit : _FLOAT_LIT {
+ $$ = $1
+ }
+ | '-' _FLOAT_LIT {
+ f := &compoundFloatNode{val: -$2.val}
+ f.setRange($1, $2)
+ $$ = f
+ }
+ | '+' _FLOAT_LIT {
+ f := &compoundFloatNode{val: $2.val}
+ f.setRange($1, $2)
+ $$ = f
+ }
+ | '+' _INF {
+ f := &compoundFloatNode{val: math.Inf(1)}
+ f.setRange($1, $2)
+ $$ = f
+ }
+ | '-' _INF {
+ f := &compoundFloatNode{val: math.Inf(-1)}
+ f.setRange($1, $2)
+ $$ = f
+ }
+
+stringLit : _STRING_LIT {
+ $$ = &compoundStringNode{val: $1.val}
+ $$.setRange($1, $1)
+ }
+ | stringLit _STRING_LIT {
+ $$ = &compoundStringNode{val: $1.val + $2.val}
+ $$.setRange($1, $2)
+ }
+
+aggregate : '{' aggFields '}' {
+ a := &aggregateLiteralNode{elements: $2}
+ a.setRange($1, $3)
+ $$ = a
+ }
+
+aggFields : aggField
+ | aggFields aggField {
+ $$ = append($1, $2...)
+ }
+ | {
+ $$ = nil
+ }
+
+aggField : aggFieldEntry
+ | aggFieldEntry ',' {
+ $$ = $1
+ }
+ | aggFieldEntry ';' {
+ $$ = $1
+ }
+ | error ',' {
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+aggFieldEntry : aggName ':' scalarConstant {
+ a := &aggregateEntryNode{name: $1, val: $3}
+ a.setRange($1, $3)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '[' ']' {
+ s := &sliceLiteralNode{}
+ s.setRange($3, $4)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $4)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '[' constantList ']' {
+ s := &sliceLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $5)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '[' error ']' {
+ }
+ | aggName ':' aggregate {
+ a := &aggregateEntryNode{name: $1, val: $3}
+ a.setRange($1, $3)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName aggregate {
+ a := &aggregateEntryNode{name: $1, val: $2}
+ a.setRange($1, $2)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $5)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $3}
+ s.setRange($2, $4)
+ a := &aggregateEntryNode{name: $1, val: s}
+ a.setRange($1, $4)
+ $$ = []*aggregateEntryNode{a}
+ }
+ | aggName ':' '<' error '>' {
+ }
+ | aggName '<' error '>' {
+ }
+
+aggName : name {
+ n := &compoundIdentNode{val: $1.val}
+ n.setRange($1, $1)
+ $$ = &aggregateNameNode{name: n}
+ $$.setRange($1, $1)
+ }
+ | '[' typeIdent ']' {
+ $$ = &aggregateNameNode{name: $2, isExtension: true}
+ $$.setRange($1, $3)
+ }
+ | '[' error ']' {
+ }
+
+constantList : constant {
+ $$ = []valueNode{$1}
+ }
+ | constantList ',' constant {
+ $$ = append($1, $3)
+ }
+ | constantList ';' constant {
+ $$ = append($1, $3)
+ }
+ | '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $2}
+ s.setRange($1, $3)
+ $$ = []valueNode{s}
+ }
+ | constantList ',' '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ $$ = append($1, s)
+ }
+ | constantList ';' '<' aggFields '>' {
+ s := &aggregateLiteralNode{elements: $4}
+ s.setRange($3, $5)
+ $$ = append($1, s)
+ }
+ | '<' error '>' {
+ }
+ | constantList ',' '<' error '>' {
+ }
+ | constantList ';' '<' error '>' {
+ }
+
+typeIdent : ident
+ | '.' ident {
+ $$ = &compoundIdentNode{val: "." + $2.val}
+ $$.setRange($1, $2)
+ }
+
+field : _REQUIRED typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := fieldLabel{identNode: $1, required: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+ $$.setRange($1, $6)
+ }
+ | _OPTIONAL typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := fieldLabel{identNode: $1}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+ $$.setRange($1, $6)
+ }
+ | _REPEATED typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := fieldLabel{identNode: $1, repeated: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+ $$.setRange($1, $6)
+ }
+ | typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4}
+ $$.setRange($1, $5)
+ }
+ | _REQUIRED typeIdent name '=' _INT_LIT compactOptions ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := fieldLabel{identNode: $1, required: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $6}
+ $$.setRange($1, $7)
+ }
+ | _OPTIONAL typeIdent name '=' _INT_LIT compactOptions ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := fieldLabel{identNode: $1}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $6}
+ $$.setRange($1, $7)
+ }
+ | _REPEATED typeIdent name '=' _INT_LIT compactOptions ';' {
+ checkTag(protolex, $5.start(), $5.val)
+ lbl := fieldLabel{identNode: $1, repeated: true}
+ $$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $6}
+ $$.setRange($1, $7)
+ }
+ | typeIdent name '=' _INT_LIT compactOptions ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4, options: $5}
+ $$.setRange($1, $6)
+ }
+
+compactOptions: '[' compactOptionDecls ']' {
+ $$ = &compactOptionsNode{decls: $2}
+ $$.setRange($1, $3)
+ }
+
+compactOptionDecls : compactOptionDecls ',' compactOption {
+ $$ = append($1, $3...)
+ }
+ | compactOption
+
+compactOption: optionName '=' constant {
+ n := &optionNameNode{parts: $1}
+ n.setRange($1[0], $1[len($1)-1])
+ o := &optionNode{name: n, val: $3}
+ o.setRange($1[0], $3)
+ $$ = []*optionNode{o}
+ }
+
+group : _REQUIRED _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $5.start(), $5.val)
+ if !unicode.IsUpper(rune($3.val[0])) {
+ lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+ }
+ lbl := fieldLabel{identNode: $1, required: true}
+ $$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+ $$.setRange($1, $8)
+ }
+ | _OPTIONAL _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $5.start(), $5.val)
+ if !unicode.IsUpper(rune($3.val[0])) {
+ lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+ }
+ lbl := fieldLabel{identNode: $1}
+ $$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+ $$.setRange($1, $8)
+ }
+ | _REPEATED _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $5.start(), $5.val)
+ if !unicode.IsUpper(rune($3.val[0])) {
+ lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+ }
+ lbl := fieldLabel{identNode: $1, repeated: true}
+ $$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+ $$.setRange($1, $8)
+ }
+
+oneof : _ONEOF name '{' oneofBody '}' {
+ c := 0
+ for _, el := range $4 {
+ if el.field != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, $1.start(), "oneof must contain at least one field")
+ }
+ $$ = &oneOfNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+oneofBody : oneofBody oneofItem {
+ $$ = append($1, $2...)
+ }
+ | oneofItem
+ | {
+ $$ = nil
+ }
+
+oneofItem : option {
+ $$ = []*oneOfElement{{option: $1[0]}}
+ }
+ | oneofField {
+ $$ = []*oneOfElement{{field: $1}}
+ }
+ | oneofGroup {
+ $$ = []*oneOfElement{{group: $1}}
+ }
+ | ';' {
+ $$ = []*oneOfElement{{empty: $1}}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+oneofField : typeIdent name '=' _INT_LIT ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4}
+ $$.setRange($1, $5)
+ }
+ | typeIdent name '=' _INT_LIT compactOptions ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &fieldNode{fldType: $1, name: $2, tag: $4, options: $5}
+ $$.setRange($1, $6)
+ }
+
+oneofGroup : _GROUP name '=' _INT_LIT '{' messageBody '}' {
+ checkTag(protolex, $4.start(), $4.val)
+ if !unicode.IsUpper(rune($2.val[0])) {
+ lexError(protolex, $2.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $2.val))
+ }
+ $$ = &groupNode{groupKeyword: $1, name: $2, tag: $4, decls: $6}
+ $$.setRange($1, $7)
+ }
+
+mapField : mapType name '=' _INT_LIT ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &mapFieldNode{mapType: $1, name: $2, tag: $4}
+ $$.setRange($1, $5)
+ }
+ | mapType name '=' _INT_LIT compactOptions ';' {
+ checkTag(protolex, $4.start(), $4.val)
+ $$ = &mapFieldNode{mapType: $1, name: $2, tag: $4, options: $5}
+ $$.setRange($1, $6)
+ }
+
+mapType : _MAP '<' keyType ',' typeIdent '>' {
+ $$ = &mapTypeNode{mapKeyword: $1, keyType: $3, valueType: $5}
+ $$.setRange($1, $6)
+}
+
+keyType : _INT32
+ | _INT64
+ | _UINT32
+ | _UINT64
+ | _SINT32
+ | _SINT64
+ | _FIXED32
+ | _FIXED64
+ | _SFIXED32
+ | _SFIXED64
+ | _BOOL
+ | _STRING
+
+extensions : _EXTENSIONS tagRanges ';' {
+ $$ = &extensionRangeNode{ranges: $2}
+ $$.setRange($1, $3)
+ }
+ | _EXTENSIONS tagRanges compactOptions ';' {
+ $$ = &extensionRangeNode{ranges: $2, options: $3}
+ $$.setRange($1, $4)
+ }
+
+tagRanges : tagRanges ',' tagRange {
+ $$ = append($1, $3...)
+ }
+ | tagRange
+
+tagRange : _INT_LIT {
+ if $1.val > internal.MaxTag {
+ lexError(protolex, $1.start(), fmt.Sprintf("range includes out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+ r.setRange($1, $1)
+ $$ = []*rangeNode{r}
+ }
+ | _INT_LIT _TO _INT_LIT {
+ if $1.val > internal.MaxTag {
+ lexError(protolex, $1.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+ }
+ if $3.val > internal.MaxTag {
+ lexError(protolex, $3.start(), fmt.Sprintf("range end is out-of-range tag: %d (should be between 0 and %d)", $3.val, internal.MaxTag))
+ }
+ if $1.val > $3.val {
+ lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | _INT_LIT _TO _MAX {
+ if $1.val > internal.MaxTag {
+ lexError(protolex, $1.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: internal.MaxTag}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+
+enumRanges : enumRanges ',' enumRange {
+ $$ = append($1, $3...)
+ }
+ | enumRange
+
+enumRange : intLit {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+ r.setRange($1, $1)
+ $$ = []*rangeNode{r}
+ }
+ | intLit _TO intLit {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ checkInt64InInt32Range(protolex, $3.start(), $3.val)
+ if $1.val > $3.val {
+ lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+ }
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+ | intLit _TO _MAX {
+ checkInt64InInt32Range(protolex, $1.start(), $1.val)
+ r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: math.MaxInt32}
+ r.setRange($1, $3)
+ $$ = []*rangeNode{r}
+ }
+
+msgReserved : _RESERVED tagRanges ';' {
+ $$ = &reservedNode{ranges: $2}
+ $$.setRange($1, $3)
+ }
+ | reservedNames
+
+enumReserved : _RESERVED enumRanges ';' {
+ $$ = &reservedNode{ranges: $2}
+ $$.setRange($1, $3)
+ }
+ | reservedNames
+
+reservedNames : _RESERVED fieldNames ';' {
+ rsvd := map[string]struct{}{}
+ for _, n := range $2 {
+ if _, ok := rsvd[n.val]; ok {
+ lexError(protolex, n.start(), fmt.Sprintf("name %q is reserved multiple times", n.val))
+ break
+ }
+ rsvd[n.val] = struct{}{}
+ }
+ $$ = &reservedNode{names: $2}
+ $$.setRange($1, $3)
+ }
+
+fieldNames : fieldNames ',' stringLit {
+ $$ = append($1, $3)
+ }
+ | stringLit {
+ $$ = []*compoundStringNode{$1}
+ }
+
+enum : _ENUM name '{' enumBody '}' {
+ c := 0
+ for _, el := range $4 {
+ if el.value != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, $1.start(), "enums must define at least one value")
+ }
+ $$ = &enumNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+enumBody : enumBody enumItem {
+ $$ = append($1, $2...)
+ }
+ | enumItem
+ | {
+ $$ = nil
+ }
+
+enumItem : option {
+ $$ = []*enumElement{{option: $1[0]}}
+ }
+ | enumField {
+ $$ = []*enumElement{{value: $1}}
+ }
+ | enumReserved {
+ $$ = []*enumElement{{reserved: $1}}
+ }
+ | ';' {
+ $$ = []*enumElement{{empty: $1}}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+enumField : name '=' intLit ';' {
+ checkInt64InInt32Range(protolex, $3.start(), $3.val)
+ $$ = &enumValueNode{name: $1, number: $3}
+ $$.setRange($1, $4)
+ }
+ | name '=' intLit compactOptions ';' {
+ checkInt64InInt32Range(protolex, $3.start(), $3.val)
+ $$ = &enumValueNode{name: $1, number: $3, options: $4}
+ $$.setRange($1, $5)
+ }
+
+message : _MESSAGE name '{' messageBody '}' {
+ $$ = &messageNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+messageBody : messageBody messageItem {
+ $$ = append($1, $2...)
+ }
+ | messageItem
+ | {
+ $$ = nil
+ }
+
+messageItem : field {
+ $$ = []*messageElement{{field: $1}}
+ }
+ | enum {
+ $$ = []*messageElement{{enum: $1}}
+ }
+ | message {
+ $$ = []*messageElement{{nested: $1}}
+ }
+ | extend {
+ $$ = []*messageElement{{extend: $1}}
+ }
+ | extensions {
+ $$ = []*messageElement{{extensionRange: $1}}
+ }
+ | group {
+ $$ = []*messageElement{{group: $1}}
+ }
+ | option {
+ $$ = []*messageElement{{option: $1[0]}}
+ }
+ | oneof {
+ $$ = []*messageElement{{oneOf: $1}}
+ }
+ | mapField {
+ $$ = []*messageElement{{mapField: $1}}
+ }
+ | msgReserved {
+ $$ = []*messageElement{{reserved: $1}}
+ }
+ | ';' {
+ $$ = []*messageElement{{empty: $1}}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+extend : _EXTEND typeIdent '{' extendBody '}' {
+ c := 0
+ for _, el := range $4 {
+ if el.field != nil || el.group != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, $1.start(), "extend sections must define at least one extension")
+ }
+ $$ = &extendNode{extendee: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+extendBody : extendBody extendItem {
+ $$ = append($1, $2...)
+ }
+ | extendItem
+ | {
+ $$ = nil
+ }
+
+extendItem : field {
+ $$ = []*extendElement{{field: $1}}
+ }
+ | group {
+ $$ = []*extendElement{{group: $1}}
+ }
+ | ';' {
+ $$ = []*extendElement{{empty: $1}}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+service : _SERVICE name '{' serviceBody '}' {
+ $$ = &serviceNode{name: $2, decls: $4}
+ $$.setRange($1, $5)
+ }
+
+serviceBody : serviceBody serviceItem {
+ $$ = append($1, $2...)
+ }
+ | serviceItem
+ | {
+ $$ = nil
+ }
+
+// NB: doc suggests support for "stream" declaration, separate from "rpc", but
+// it does not appear to be supported in protoc (doc is likely from grammar for
+// Google-internal version of protoc, with support for streaming stubby)
+serviceItem : option {
+ $$ = []*serviceElement{{option: $1[0]}}
+ }
+ | rpc {
+ $$ = []*serviceElement{{rpc: $1}}
+ }
+ | ';' {
+ $$ = []*serviceElement{{empty: $1}}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+rpc : _RPC name '(' rpcType ')' _RETURNS '(' rpcType ')' ';' {
+ $$ = &methodNode{name: $2, input: $4, output: $8}
+ $$.setRange($1, $10)
+ }
+ | _RPC name '(' rpcType ')' _RETURNS '(' rpcType ')' '{' rpcOptions '}' {
+ $$ = &methodNode{name: $2, input: $4, output: $8, options: $11}
+ $$.setRange($1, $12)
+ }
+
+rpcType : _STREAM typeIdent {
+ $$ = &rpcTypeNode{msgType: $2, streamKeyword: $1}
+ $$.setRange($1, $2)
+ }
+ | typeIdent {
+ $$ = &rpcTypeNode{msgType: $1}
+ $$.setRange($1, $1)
+ }
+
+rpcOptions : rpcOptions rpcOption {
+ $$ = append($1, $2...)
+ }
+ | rpcOption
+ | {
+ $$ = []*optionNode{}
+ }
+
+rpcOption : option {
+ $$ = $1
+ }
+ | ';' {
+ $$ = []*optionNode{}
+ }
+ | error ';' {
+ }
+ | error {
+ }
+
+name : _NAME
+ | _SYNTAX
+ | _IMPORT
+ | _WEAK
+ | _PUBLIC
+ | _PACKAGE
+ | _OPTION
+ | _TRUE
+ | _FALSE
+ | _INF
+ | _NAN
+ | _REPEATED
+ | _OPTIONAL
+ | _REQUIRED
+ | _DOUBLE
+ | _FLOAT
+ | _INT32
+ | _INT64
+ | _UINT32
+ | _UINT64
+ | _SINT32
+ | _SINT64
+ | _FIXED32
+ | _FIXED64
+ | _SFIXED32
+ | _SFIXED64
+ | _BOOL
+ | _STRING
+ | _BYTES
+ | _GROUP
+ | _ONEOF
+ | _MAP
+ | _EXTENSIONS
+ | _TO
+ | _MAX
+ | _RESERVED
+ | _ENUM
+ | _MESSAGE
+ | _EXTEND
+ | _SERVICE
+ | _RPC
+ | _STREAM
+ | _RETURNS
+
+%%
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go
new file mode 100644
index 0000000..843df01
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go
@@ -0,0 +1,2364 @@
+// Code generated by goyacc -o proto.y.go -p proto proto.y. DO NOT EDIT.
+
+//line proto.y:2
+package protoparse
+
+import __yyfmt__ "fmt"
+
+//line proto.y:2
+
+//lint:file-ignore SA4006 generated parser has unused values
+
+import (
+ "fmt"
+ "math"
+ "unicode"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+//line proto.y:18
+type protoSymType struct {
+ yys int
+ file *fileNode
+ fileDecls []*fileElement
+ syn *syntaxNode
+ pkg *packageNode
+ imprt *importNode
+ msg *messageNode
+ msgDecls []*messageElement
+ fld *fieldNode
+ mapFld *mapFieldNode
+ mapType *mapTypeNode
+ grp *groupNode
+ oo *oneOfNode
+ ooDecls []*oneOfElement
+ ext *extensionRangeNode
+ resvd *reservedNode
+ en *enumNode
+ enDecls []*enumElement
+ env *enumValueNode
+ extend *extendNode
+ extDecls []*extendElement
+ svc *serviceNode
+ svcDecls []*serviceElement
+ mtd *methodNode
+ rpcType *rpcTypeNode
+ opts []*optionNode
+ optNm []*optionNamePartNode
+ cmpctOpts *compactOptionsNode
+ rngs []*rangeNode
+ names []*compoundStringNode
+ cid *compoundIdentNode
+ sl []valueNode
+ agg []*aggregateEntryNode
+ aggName *aggregateNameNode
+ v valueNode
+ il *compoundIntNode
+ str *compoundStringNode
+ s *stringLiteralNode
+ i *intLiteralNode
+ f *floatLiteralNode
+ id *identNode
+ b *basicNode
+ err error
+}
+
+const _STRING_LIT = 57346
+const _INT_LIT = 57347
+const _FLOAT_LIT = 57348
+const _NAME = 57349
+const _SYNTAX = 57350
+const _IMPORT = 57351
+const _WEAK = 57352
+const _PUBLIC = 57353
+const _PACKAGE = 57354
+const _OPTION = 57355
+const _TRUE = 57356
+const _FALSE = 57357
+const _INF = 57358
+const _NAN = 57359
+const _REPEATED = 57360
+const _OPTIONAL = 57361
+const _REQUIRED = 57362
+const _DOUBLE = 57363
+const _FLOAT = 57364
+const _INT32 = 57365
+const _INT64 = 57366
+const _UINT32 = 57367
+const _UINT64 = 57368
+const _SINT32 = 57369
+const _SINT64 = 57370
+const _FIXED32 = 57371
+const _FIXED64 = 57372
+const _SFIXED32 = 57373
+const _SFIXED64 = 57374
+const _BOOL = 57375
+const _STRING = 57376
+const _BYTES = 57377
+const _GROUP = 57378
+const _ONEOF = 57379
+const _MAP = 57380
+const _EXTENSIONS = 57381
+const _TO = 57382
+const _MAX = 57383
+const _RESERVED = 57384
+const _ENUM = 57385
+const _MESSAGE = 57386
+const _EXTEND = 57387
+const _SERVICE = 57388
+const _RPC = 57389
+const _STREAM = 57390
+const _RETURNS = 57391
+const _ERROR = 57392
+
+var protoToknames = [...]string{
+ "$end",
+ "error",
+ "$unk",
+ "_STRING_LIT",
+ "_INT_LIT",
+ "_FLOAT_LIT",
+ "_NAME",
+ "_SYNTAX",
+ "_IMPORT",
+ "_WEAK",
+ "_PUBLIC",
+ "_PACKAGE",
+ "_OPTION",
+ "_TRUE",
+ "_FALSE",
+ "_INF",
+ "_NAN",
+ "_REPEATED",
+ "_OPTIONAL",
+ "_REQUIRED",
+ "_DOUBLE",
+ "_FLOAT",
+ "_INT32",
+ "_INT64",
+ "_UINT32",
+ "_UINT64",
+ "_SINT32",
+ "_SINT64",
+ "_FIXED32",
+ "_FIXED64",
+ "_SFIXED32",
+ "_SFIXED64",
+ "_BOOL",
+ "_STRING",
+ "_BYTES",
+ "_GROUP",
+ "_ONEOF",
+ "_MAP",
+ "_EXTENSIONS",
+ "_TO",
+ "_MAX",
+ "_RESERVED",
+ "_ENUM",
+ "_MESSAGE",
+ "_EXTEND",
+ "_SERVICE",
+ "_RPC",
+ "_STREAM",
+ "_RETURNS",
+ "_ERROR",
+ "'='",
+ "';'",
+ "':'",
+ "'{'",
+ "'}'",
+ "'\\\\'",
+ "'/'",
+ "'?'",
+ "'.'",
+ "','",
+ "'>'",
+ "'<'",
+ "'+'",
+ "'-'",
+ "'('",
+ "')'",
+ "'['",
+ "']'",
+ "'*'",
+ "'&'",
+ "'^'",
+ "'%'",
+ "'$'",
+ "'#'",
+ "'@'",
+ "'!'",
+ "'~'",
+ "'`'",
+}
+var protoStatenames = [...]string{}
+
+const protoEofCode = 1
+const protoErrCode = 2
+const protoInitialStackSize = 16
+
+//line proto.y:1005
+
+//line yacctab:1
+var protoExca = [...]int{
+ -1, 0,
+ 1, 4,
+ -2, 0,
+ -1, 1,
+ 1, -1,
+ -2, 0,
+ -1, 2,
+ 1, 1,
+ -2, 0,
+ -1, 3,
+ 1, 2,
+ -2, 0,
+ -1, 22,
+ 1, 3,
+ -2, 0,
+ -1, 93,
+ 55, 162,
+ -2, 0,
+ -1, 94,
+ 55, 150,
+ -2, 0,
+ -1, 95,
+ 55, 179,
+ -2, 0,
+ -1, 97,
+ 55, 188,
+ -2, 0,
+ -1, 110,
+ 55, 54,
+ -2, 0,
+ -1, 229,
+ 61, 54,
+ -2, 0,
+ -1, 246,
+ 55, 103,
+ -2, 0,
+ -1, 272,
+ 61, 54,
+ -2, 0,
+ -1, 316,
+ 61, 54,
+ -2, 0,
+ -1, 354,
+ 55, 162,
+ -2, 0,
+ -1, 357,
+ 55, 162,
+ -2, 0,
+ -1, 360,
+ 55, 162,
+ -2, 0,
+ -1, 370,
+ 61, 54,
+ -2, 0,
+ -1, 372,
+ 61, 54,
+ -2, 0,
+ -1, 393,
+ 55, 162,
+ -2, 0,
+ -1, 404,
+ 55, 200,
+ -2, 0,
+}
+
+const protoPrivate = 57344
+
+const protoLast = 2634
+
+var protoAct = [...]int{
+
+ 31, 124, 8, 406, 8, 8, 116, 117, 169, 75,
+ 309, 288, 293, 209, 196, 107, 210, 103, 181, 78,
+ 79, 161, 83, 104, 8, 168, 123, 81, 133, 118,
+ 155, 195, 102, 142, 105, 145, 391, 358, 346, 331,
+ 241, 355, 352, 242, 30, 76, 345, 330, 243, 80,
+ 26, 244, 244, 338, 344, 244, 244, 244, 328, 84,
+ 347, 304, 87, 88, 276, 275, 401, 212, 244, 342,
+ 321, 29, 212, 244, 115, 383, 244, 268, 212, 227,
+ 110, 226, 203, 367, 303, 226, 90, 412, 229, 225,
+ 398, 101, 109, 225, 396, 151, 146, 266, 393, 162,
+ 226, 360, 399, 308, 226, 265, 92, 226, 225, 374,
+ 96, 173, 225, 351, 357, 225, 320, 403, 354, 404,
+ 89, 249, 157, 379, 186, 156, 213, 90, 153, 250,
+ 248, 213, 377, 246, 194, 79, 78, 213, 243, 226,
+ 198, 199, 224, 97, 183, 151, 146, 225, 95, 94,
+ 223, 93, 165, 375, 366, 361, 86, 341, 335, 188,
+ 190, 192, 162, 17, 14, 80, 76, 220, 211, 173,
+ 4, 15, 165, 200, 16, 17, 202, 222, 153, 86,
+ 205, 157, 218, 17, 156, 215, 86, 202, 409, 234,
+ 235, 236, 237, 238, 239, 86, 228, 166, 285, 17,
+ 219, 232, 164, 230, 100, 19, 18, 20, 21, 409,
+ 216, 183, 96, 206, 13, 187, 167, 166, 25, 365,
+ 17, 368, 164, 211, 264, 217, 14, 99, 109, 364,
+ 173, 332, 283, 15, 98, 282, 16, 17, 408, 281,
+ 280, 410, 290, 85, 279, 269, 278, 247, 294, 240,
+ 207, 271, 91, 24, 289, 273, 267, 245, 286, 408,
+ 29, 197, 277, 382, 381, 327, 326, 19, 18, 20,
+ 21, 109, 76, 173, 173, 299, 13, 325, 291, 306,
+ 324, 211, 222, 211, 307, 302, 121, 11, 323, 11,
+ 11, 322, 119, 10, 294, 10, 10, 311, 317, 301,
+ 336, 337, 5, 315, 197, 334, 23, 284, 305, 11,
+ 178, 178, 179, 175, 176, 10, 86, 173, 173, 29,
+ 163, 299, 180, 29, 177, 23, 222, 160, 329, 27,
+ 28, 3, 340, 109, 22, 12, 154, 147, 144, 343,
+ 122, 289, 348, 362, 208, 339, 109, 109, 148, 173,
+ 127, 120, 9, 201, 9, 9, 292, 222, 140, 76,
+ 126, 376, 296, 125, 378, 363, 353, 380, 356, 295,
+ 359, 173, 170, 173, 9, 313, 172, 251, 369, 371,
+ 108, 106, 182, 405, 186, 173, 186, 173, 186, 287,
+ 7, 6, 2, 222, 394, 222, 384, 1, 386, 0,
+ 400, 0, 0, 0, 0, 0, 407, 407, 186, 411,
+ 0, 0, 311, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 314, 392, 29, 111, 114, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 0, 0, 0, 0, 110, 0, 0, 0,
+ 0, 0, 0, 0, 316, 112, 113, 0, 0, 0,
+ 312, 29, 111, 114, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 0, 0, 0,
+ 0, 110, 0, 0, 0, 0, 0, 0, 0, 272,
+ 112, 113, 0, 0, 270, 29, 111, 114, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 0, 0, 0, 0, 110, 0, 0, 0, 0,
+ 0, 0, 0, 372, 112, 113, 29, 111, 114, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 0, 0, 0, 0, 110, 0, 0, 0,
+ 0, 0, 171, 0, 370, 112, 113, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 397, 0, 171, 0, 0, 0, 174, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 395, 0, 171, 0, 0, 0, 174, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 373, 0, 0, 0, 0, 0, 174,
+ 29, 111, 114, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 0, 0, 0, 0,
+ 110, 0, 0, 0, 0, 0, 171, 0, 0, 112,
+ 113, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 350, 0, 171, 0, 0,
+ 0, 174, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 319, 0, 171, 0,
+ 0, 0, 174, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 387,
+ 0, 0, 0, 174, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 0,
+ 385, 0, 0, 0, 174, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 349, 0, 0, 0, 174, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 318, 0, 0, 0, 174, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 274, 0, 0, 0, 174, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 171, 0, 0, 0, 174, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 174,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 77, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 129, 0, 0, 0, 233, 32, 33,
+ 34, 35, 36, 37, 138, 39, 40, 41, 42, 132,
+ 131, 130, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 139, 143,
+ 137, 65, 66, 141, 134, 135, 136, 71, 72, 73,
+ 74, 0, 0, 128, 0, 0, 402, 129, 0, 0,
+ 82, 0, 32, 33, 34, 35, 36, 37, 138, 39,
+ 40, 41, 42, 132, 131, 130, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 139, 143, 137, 65, 66, 141, 134, 135,
+ 136, 71, 72, 73, 74, 0, 0, 128, 0, 0,
+ 390, 129, 0, 0, 82, 0, 32, 33, 34, 35,
+ 36, 37, 138, 39, 40, 41, 42, 132, 131, 130,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 139, 143, 137, 65,
+ 66, 141, 134, 135, 136, 71, 72, 73, 74, 0,
+ 0, 128, 0, 0, 389, 129, 0, 0, 82, 0,
+ 32, 33, 34, 35, 36, 37, 138, 39, 40, 41,
+ 42, 132, 131, 130, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 139, 143, 137, 65, 66, 141, 134, 135, 136, 71,
+ 72, 73, 74, 0, 0, 128, 0, 0, 388, 298,
+ 0, 0, 82, 0, 32, 33, 34, 35, 36, 37,
+ 138, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 300, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 0, 0, 297,
+ 0, 0, 333, 159, 0, 0, 82, 0, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 132,
+ 131, 130, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 0, 0, 158, 0, 0, 214, 129, 0, 0,
+ 82, 0, 32, 33, 34, 35, 36, 37, 138, 39,
+ 40, 41, 42, 132, 131, 130, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 139, 143, 137, 65, 66, 141, 134, 135,
+ 136, 71, 72, 73, 74, 0, 0, 128, 0, 0,
+ 185, 129, 0, 0, 82, 0, 32, 33, 34, 35,
+ 36, 37, 138, 39, 40, 41, 42, 132, 131, 130,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 139, 143, 137, 65,
+ 66, 141, 134, 135, 136, 71, 72, 73, 74, 0,
+ 0, 128, 0, 0, 298, 0, 0, 0, 82, 32,
+ 33, 34, 35, 36, 37, 138, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 300, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 0, 0, 297, 0, 0, 159, 0, 0,
+ 0, 82, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 132, 131, 130, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 0, 0, 158, 0, 0,
+ 231, 0, 0, 0, 82, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 82, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 310,
+ 74, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 82, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 82, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 184, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 193, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 82,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 191,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 82, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 189, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 0, 0, 0, 0,
+ 0, 150, 0, 0, 0, 82, 32, 33, 34, 35,
+ 36, 37, 138, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 152, 68, 69, 70, 71, 72, 73, 74, 0,
+ 150, 149, 0, 0, 204, 32, 33, 34, 35, 36,
+ 37, 138, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 152, 68, 69, 70, 71, 72, 73, 74, 0, 0,
+ 149, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74,
+}
+var protoPact = [...]int{
+
+ 162, -1000, 224, 224, 202, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 166, 319, 2584, 1463, 2584, 2584,
+ 2224, 2584, 224, -1000, 315, -1000, 191, 315, 315, -1000,
+ 68, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 201, 27, 2224, 97, 95,
+ 94, 27, 2584, 89, 182, -1000, -1000, 175, 152, -1000,
+ 2584, 856, 8, 1959, 2538, 2065, 27, 150, -1000, -1000,
+ -1000, -1000, 164, -1000, -1000, 312, -1000, -1000, -1000, -1000,
+ 1402, -1000, 308, 306, -1000, 2277, 1905, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 163,
+ 2436, 2383, 2330, 2584, 2584, 2584, 2224, 299, 1463, 2584,
+ 2584, 256, -1000, 20, 2489, -1000, -1000, -1000, -1000, -1000,
+ 161, 199, 67, -1000, 1851, -1000, -1000, -1000, -1000, 158,
+ 170, -1000, -1000, -1000, -1000, 148, 2584, -1000, 1036, -1000,
+ 90, 87, 26, -1000, 2118, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 2277, -1000, 1522, -1000, -1000, -1000, 2584, 2584,
+ 2584, 2584, 2584, 2584, 198, -12, -1000, 217, 79, 196,
+ 78, 69, 312, 1124, -1000, -1000, -1000, 73, 45, -1000,
+ 216, -1000, -1000, 305, -1000, -1000, -1000, -1000, -1000, -1000,
+ 12, -1000, -1000, -1000, -1000, -1000, -1000, 487, -1000, 1341,
+ -3, -4, -1000, 2224, 195, 193, 189, 188, 184, 181,
+ 302, -1000, 146, 299, 1463, 237, 2012, 294, -1000, -1000,
+ 315, 24, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 9, 73, -1000, 62, 2171, -1000,
+ 422, -1000, 1280, 975, 55, -1000, -1000, 4, 286, 283,
+ 275, 272, 261, 260, 6, -1000, -1000, -21, -1000, 180,
+ -1000, -1000, 1797, -1000, -1000, -1000, -1000, -1000, 106, 2584,
+ 2584, 1, 312, 2224, -1000, 105, -1000, -1000, -1000, 3,
+ 2224, -1000, -1000, -14, -8, -1000, 1219, 914, 52, -1000,
+ -1000, -1000, -10, 64, -11, 60, -15, 47, -1000, 103,
+ -1000, 1463, 856, -1000, -1000, -1000, 178, 168, -1000, 102,
+ 22, -1000, 172, -1000, -1000, 612, 551, -1000, 792, 48,
+ -1000, -1000, -1000, 101, 1959, -1000, 80, 1959, -1000, 71,
+ 1959, -1000, -1000, -1000, 259, 258, -1000, -1000, 10, -1000,
+ 1158, -1000, 1097, -1000, -1000, -1000, 1743, -1000, 1689, -1000,
+ 1635, -16, 44, 2171, 731, 33, 670, 29, -1000, -1000,
+ -1000, -1000, 50, 1959, 0, -1000, -1000, -1000, -1000, -1000,
+ 1581, 65, -1000, -1000, 207, 186, -1000, -1000, -1000, 35,
+ -1000, -1000, -1000,
+}
+var protoPgo = [...]int{
+
+ 0, 397, 392, 302, 331, 391, 390, 1, 11, 389,
+ 3, 383, 9, 18, 382, 43, 32, 17, 23, 381,
+ 380, 16, 15, 0, 377, 27, 28, 376, 375, 25,
+ 8, 372, 29, 369, 363, 26, 362, 360, 358, 351,
+ 7, 6, 12, 356, 353, 350, 348, 33, 14, 31,
+ 13, 344, 340, 292, 35, 338, 337, 286, 30, 336,
+ 34, 335, 21, 327, 320, 10,
+}
+var protoR1 = [...]int{
+
+ 0, 1, 1, 1, 1, 4, 4, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 2, 5, 5,
+ 5, 6, 25, 25, 7, 12, 12, 12, 13, 13,
+ 14, 14, 16, 16, 17, 17, 17, 17, 17, 19,
+ 19, 22, 21, 21, 20, 20, 20, 20, 20, 60,
+ 60, 18, 29, 29, 29, 30, 30, 30, 30, 30,
+ 30, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 27, 27, 27, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 26, 26, 32, 32, 32, 32, 32,
+ 32, 32, 32, 15, 9, 9, 8, 35, 35, 35,
+ 34, 43, 43, 43, 42, 42, 42, 42, 42, 42,
+ 33, 33, 36, 37, 37, 38, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 52, 52,
+ 49, 49, 48, 48, 48, 51, 51, 50, 50, 50,
+ 45, 45, 46, 46, 47, 44, 44, 53, 55, 55,
+ 55, 54, 54, 54, 54, 54, 54, 56, 56, 39,
+ 41, 41, 41, 40, 40, 40, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 57, 59, 59, 59,
+ 58, 58, 58, 58, 58, 61, 63, 63, 63, 62,
+ 62, 62, 62, 62, 64, 64, 65, 65, 11, 11,
+ 11, 10, 10, 10, 10, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+}
+var protoR2 = [...]int{
+
+ 0, 1, 1, 2, 0, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 1, 4, 3, 4,
+ 4, 3, 1, 3, 5, 1, 3, 4, 1, 2,
+ 1, 4, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 1, 1, 1, 2, 2, 2, 2, 1,
+ 2, 3, 1, 2, 0, 1, 2, 2, 2, 2,
+ 1, 3, 4, 5, 5, 3, 2, 5, 4, 5,
+ 4, 1, 3, 3, 1, 3, 3, 3, 5, 5,
+ 3, 5, 5, 1, 2, 6, 6, 6, 5, 7,
+ 7, 7, 6, 3, 3, 1, 3, 8, 8, 8,
+ 5, 2, 1, 0, 1, 1, 1, 1, 2, 1,
+ 5, 6, 7, 5, 6, 6, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 3, 4,
+ 3, 1, 1, 3, 3, 3, 1, 1, 3, 3,
+ 3, 1, 3, 1, 3, 3, 1, 5, 2, 1,
+ 0, 1, 1, 1, 1, 2, 1, 4, 5, 5,
+ 2, 1, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 1, 5, 2, 1, 0,
+ 1, 1, 1, 2, 1, 5, 2, 1, 0, 1,
+ 1, 1, 2, 1, 10, 12, 2, 1, 2, 1,
+ 0, 1, 1, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+}
+var protoChk = [...]int{
+
+ -1000, -1, -2, -4, 8, -3, -5, -6, -7, -39,
+ -53, -57, -61, 52, 2, 9, 12, 13, 44, 43,
+ 45, 46, -4, -3, 51, 52, -60, 10, 11, 4,
+ -25, -23, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, -12, -25, 65, -23, -23,
+ -26, -25, 59, -23, -60, 52, 4, -60, -60, 52,
+ 59, 51, -26, 54, 54, 54, -25, 54, 52, 52,
+ 52, -23, -16, -17, -18, -60, -19, -22, -20, -23,
+ 54, 5, 63, 64, 6, 66, -41, -40, -32, -53,
+ -39, -57, -52, -35, -7, -34, -37, -45, 52, 2,
+ 20, 19, 18, -26, 43, 44, 45, 39, 13, 37,
+ -38, 42, -47, 38, -55, -54, -7, -56, -46, 52,
+ 2, -23, 42, -47, -59, -58, -32, -35, 52, 2,
+ -63, -62, -7, -64, 52, 2, 47, 52, -29, -30,
+ -31, 2, -27, -23, 67, 5, 6, 16, 5, 6,
+ 16, -13, -14, -26, 59, 55, -40, 52, -26, 36,
+ -26, 36, -26, 36, -23, -49, -48, 5, -23, -23,
+ -49, -44, -60, 62, 55, -54, 52, 51, -51, -50,
+ -21, -22, 5, 64, 55, -58, 52, 55, -62, 52,
+ -23, 55, -30, 60, 52, 60, 52, 53, -18, 62,
+ -26, 2, -13, 65, -23, -23, -23, -23, -23, -23,
+ 51, 52, -15, 60, 67, 40, 54, 51, 52, 52,
+ 60, -24, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, -21, 60, 52, 40, 65, -17,
+ 67, -18, 62, -29, 2, 68, 68, -26, 51, 51,
+ 51, 51, 51, 51, 5, 52, -48, -9, -8, -12,
+ 5, 41, -43, -42, -7, -33, -36, 52, 2, -26,
+ 36, 5, -60, 60, 52, -15, -50, -21, 41, -65,
+ 48, -26, 68, -28, 2, -16, 62, -29, 2, 61,
+ 61, 66, 5, 5, 5, 5, 5, 5, 52, -15,
+ 68, 60, 51, 55, -42, 52, -23, -23, 52, -15,
+ -26, 52, 66, -26, 68, 60, 52, 68, -29, 2,
+ 61, 61, 52, -15, 54, 52, -15, 54, 52, -15,
+ 54, 52, -8, -16, 51, 51, 52, 61, 49, -16,
+ 62, -16, 62, 61, 61, 52, -41, 52, -41, 52,
+ -41, 5, 5, 65, -29, 2, -29, 2, 55, 55,
+ 55, 52, -15, 54, -65, 61, 61, 61, 61, 52,
+ -41, 66, 55, 52, 54, -11, -10, -7, 52, 2,
+ 55, -10, 52,
+}
+var protoDef = [...]int{
+
+ -2, -2, -2, -2, 0, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 16, 0, 0, 0, 0, 0,
+ 0, 0, -2, 5, 0, 15, 0, 0, 0, 49,
+ 0, 22, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 0, 25, 0, 0, 0,
+ 0, 83, 0, 0, 0, 18, 50, 0, 0, 21,
+ 0, 0, 0, -2, -2, -2, 84, -2, 17, 19,
+ 20, 23, 0, 32, 33, 34, 35, 36, 37, 38,
+ -2, 39, 0, 0, 44, 26, 0, 161, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 175,
+ 0, 0, 0, 0, 241, 242, 0, 237, 211, 235,
+ 0, 240, 141, 236, 0, 149, 151, 152, 153, 154,
+ 156, 0, 240, 143, 0, 178, 180, 181, 182, 184,
+ 0, 187, 189, 190, 191, 193, 0, 24, 0, 52,
+ 55, 60, 0, 71, 0, 40, 46, 47, 41, 45,
+ 48, 27, 28, 30, 0, 159, 160, 174, 0, 234,
+ 0, 234, 0, 234, 0, 0, 131, 132, 0, 0,
+ 0, 0, 146, 0, 147, 148, 155, 0, 0, 136,
+ 137, 42, 43, 0, 176, 177, 183, 185, 186, 192,
+ 0, 51, 53, 56, 57, 58, 59, 0, 66, -2,
+ 0, 0, 29, 0, 0, 0, 0, 0, 0, 0,
+ 0, 128, 0, 0, 0, 0, -2, 0, 140, 144,
+ 0, 0, 116, 117, 118, 119, 120, 121, 122, 123,
+ 124, 125, 126, 127, 0, 0, 142, 0, 0, 61,
+ 0, 65, -2, 0, 60, 72, 73, 0, 0, 0,
+ 0, 0, 0, 0, 0, 129, 130, 0, 95, 0,
+ 133, 134, 0, 102, 104, 105, 106, 107, 109, 0,
+ 234, 0, 145, 0, 157, 0, 135, 138, 139, 0,
+ 246, 197, 62, 0, 0, 74, -2, 0, 60, 68,
+ 70, 31, 0, 0, 0, 0, 0, 0, 88, 0,
+ 93, 0, 0, 100, 101, 108, 0, 0, 113, 0,
+ 0, 158, 0, 196, 63, 0, 0, 64, 0, 60,
+ 67, 69, 85, 0, -2, 86, 0, -2, 87, 0,
+ -2, 92, 94, 96, 0, 0, 114, 115, 0, 75,
+ -2, 76, -2, 77, 80, 89, 0, 90, 0, 91,
+ 0, 0, 0, 0, 0, 60, 0, 60, 97, 98,
+ 99, 110, 0, -2, 0, 78, 81, 79, 82, 111,
+ 0, 0, 112, 194, -2, 0, 199, 201, 202, 204,
+ 195, 198, 203,
+}
+var protoTok1 = [...]int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 76, 3, 74, 73, 72, 70, 3,
+ 65, 66, 69, 63, 60, 64, 59, 57, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 53, 52,
+ 62, 51, 61, 58, 75, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 67, 56, 68, 71, 3, 78, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 54, 3, 55, 77,
+}
+var protoTok2 = [...]int{
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50,
+}
+var protoTok3 = [...]int{
+ 0,
+}
+
+var protoErrorMessages = [...]struct {
+ state int
+ token int
+ msg string
+}{}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var (
+ protoDebug = 0
+ protoErrorVerbose = false
+)
+
+type protoLexer interface {
+ Lex(lval *protoSymType) int
+ Error(s string)
+}
+
+type protoParser interface {
+ Parse(protoLexer) int
+ Lookahead() int
+}
+
+type protoParserImpl struct {
+ lval protoSymType
+ stack [protoInitialStackSize]protoSymType
+ char int
+}
+
+func (p *protoParserImpl) Lookahead() int {
+ return p.char
+}
+
+func protoNewParser() protoParser {
+ return &protoParserImpl{}
+}
+
+const protoFlag = -1000
+
+func protoTokname(c int) string {
+ if c >= 1 && c-1 < len(protoToknames) {
+ if protoToknames[c-1] != "" {
+ return protoToknames[c-1]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func protoStatname(s int) string {
+ if s >= 0 && s < len(protoStatenames) {
+ if protoStatenames[s] != "" {
+ return protoStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func protoErrorMessage(state, lookAhead int) string {
+ const TOKSTART = 4
+
+ if !protoErrorVerbose {
+ return "syntax error"
+ }
+
+ for _, e := range protoErrorMessages {
+ if e.state == state && e.token == lookAhead {
+ return "syntax error: " + e.msg
+ }
+ }
+
+ res := "syntax error: unexpected " + protoTokname(lookAhead)
+
+ // To match Bison, suggest at most four expected tokens.
+ expected := make([]int, 0, 4)
+
+ // Look for shiftable tokens.
+ base := protoPact[state]
+ for tok := TOKSTART; tok-1 < len(protoToknames); tok++ {
+ if n := base + tok; n >= 0 && n < protoLast && protoChk[protoAct[n]] == tok {
+ if len(expected) == cap(expected) {
+ return res
+ }
+ expected = append(expected, tok)
+ }
+ }
+
+ if protoDef[state] == -2 {
+ i := 0
+ for protoExca[i] != -1 || protoExca[i+1] != state {
+ i += 2
+ }
+
+ // Look for tokens that we accept or reduce.
+ for i += 2; protoExca[i] >= 0; i += 2 {
+ tok := protoExca[i]
+ if tok < TOKSTART || protoExca[i+1] == 0 {
+ continue
+ }
+ if len(expected) == cap(expected) {
+ return res
+ }
+ expected = append(expected, tok)
+ }
+
+ // If the default action is to accept or reduce, give up.
+ if protoExca[i+1] != 0 {
+ return res
+ }
+ }
+
+ for i, tok := range expected {
+ if i == 0 {
+ res += ", expecting "
+ } else {
+ res += " or "
+ }
+ res += protoTokname(tok)
+ }
+ return res
+}
+
+func protolex1(lex protoLexer, lval *protoSymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
+ if char <= 0 {
+ token = protoTok1[0]
+ goto out
+ }
+ if char < len(protoTok1) {
+ token = protoTok1[char]
+ goto out
+ }
+ if char >= protoPrivate {
+ if char < protoPrivate+len(protoTok2) {
+ token = protoTok2[char-protoPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(protoTok3); i += 2 {
+ token = protoTok3[i+0]
+ if token == char {
+ token = protoTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if token == 0 {
+ token = protoTok2[1] /* unknown char */
+ }
+ if protoDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", protoTokname(token), uint(char))
+ }
+ return char, token
+}
+
+func protoParse(protolex protoLexer) int {
+ return protoNewParser().Parse(protolex)
+}
+
+func (protorcvr *protoParserImpl) Parse(protolex protoLexer) int {
+ var proton int
+ var protoVAL protoSymType
+ var protoDollar []protoSymType
+ _ = protoDollar // silence set and not used
+ protoS := protorcvr.stack[:]
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ protostate := 0
+ protorcvr.char = -1
+ prototoken := -1 // protorcvr.char translated into internal numbering
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ protostate = -1
+ protorcvr.char = -1
+ prototoken = -1
+ }()
+ protop := -1
+ goto protostack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+protostack:
+ /* put a state and value onto the stack */
+ if protoDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", protoTokname(prototoken), protoStatname(protostate))
+ }
+
+ protop++
+ if protop >= len(protoS) {
+ nyys := make([]protoSymType, len(protoS)*2)
+ copy(nyys, protoS)
+ protoS = nyys
+ }
+ protoS[protop] = protoVAL
+ protoS[protop].yys = protostate
+
+protonewstate:
+ proton = protoPact[protostate]
+ if proton <= protoFlag {
+ goto protodefault /* simple state */
+ }
+ if protorcvr.char < 0 {
+ protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
+ }
+ proton += prototoken
+ if proton < 0 || proton >= protoLast {
+ goto protodefault
+ }
+ proton = protoAct[proton]
+ if protoChk[proton] == prototoken { /* valid shift */
+ protorcvr.char = -1
+ prototoken = -1
+ protoVAL = protorcvr.lval
+ protostate = proton
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto protostack
+ }
+
+protodefault:
+ /* default state action */
+ proton = protoDef[protostate]
+ if proton == -2 {
+ if protorcvr.char < 0 {
+ protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if protoExca[xi+0] == -1 && protoExca[xi+1] == protostate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ proton = protoExca[xi+0]
+ if proton < 0 || proton == prototoken {
+ break
+ }
+ }
+ proton = protoExca[xi+1]
+ if proton < 0 {
+ goto ret0
+ }
+ }
+ if proton == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ protolex.Error(protoErrorMessage(protostate, prototoken))
+ Nerrs++
+ if protoDebug >= 1 {
+ __yyfmt__.Printf("%s", protoStatname(protostate))
+ __yyfmt__.Printf(" saw %s\n", protoTokname(prototoken))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for protop >= 0 {
+ proton = protoPact[protoS[protop].yys] + protoErrCode
+ if proton >= 0 && proton < protoLast {
+ protostate = protoAct[proton] /* simulate a shift of "error" */
+ if protoChk[protostate] == protoErrCode {
+ goto protostack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if protoDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", protoS[protop].yys)
+ }
+ protop--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if protoDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", protoTokname(prototoken))
+ }
+ if prototoken == protoEofCode {
+ goto ret1
+ }
+ protorcvr.char = -1
+ prototoken = -1
+ goto protonewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production proton */
+ if protoDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", proton, protoStatname(protostate))
+ }
+
+ protont := proton
+ protopt := protop
+ _ = protopt // guard against "declared and not used"
+
+ protop -= protoR2[proton]
+ // protop is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if protop+1 >= len(protoS) {
+ nyys := make([]protoSymType, len(protoS)*2)
+ copy(nyys, protoS)
+ protoS = nyys
+ }
+ protoVAL = protoS[protop+1]
+
+ /* consult goto table to find next state */
+ proton = protoR1[proton]
+ protog := protoPgo[proton]
+ protoj := protog + protoS[protop].yys + 1
+
+ if protoj >= protoLast {
+ protostate = protoAct[protog]
+ } else {
+ protostate = protoAct[protoj]
+ if protoChk[protostate] != -proton {
+ protostate = protoAct[protog]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch protont {
+
+ case 1:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:119
+ {
+ protoVAL.file = &fileNode{syntax: protoDollar[1].syn}
+ protoVAL.file.setRange(protoDollar[1].syn, protoDollar[1].syn)
+ protolex.(*protoLex).res = protoVAL.file
+ }
+ case 2:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:124
+ {
+ protoVAL.file = &fileNode{decls: protoDollar[1].fileDecls}
+ if len(protoDollar[1].fileDecls) > 0 {
+ protoVAL.file.setRange(protoDollar[1].fileDecls[0], protoDollar[1].fileDecls[len(protoDollar[1].fileDecls)-1])
+ }
+ protolex.(*protoLex).res = protoVAL.file
+ }
+ case 3:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:131
+ {
+ protoVAL.file = &fileNode{syntax: protoDollar[1].syn, decls: protoDollar[2].fileDecls}
+ var end node
+ if len(protoDollar[2].fileDecls) > 0 {
+ end = protoDollar[2].fileDecls[len(protoDollar[2].fileDecls)-1]
+ } else {
+ end = protoDollar[1].syn
+ }
+ protoVAL.file.setRange(protoDollar[1].syn, end)
+ protolex.(*protoLex).res = protoVAL.file
+ }
+ case 4:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:142
+ {
+ }
+ case 5:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:145
+ {
+ protoVAL.fileDecls = append(protoDollar[1].fileDecls, protoDollar[2].fileDecls...)
+ }
+ case 7:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:150
+ {
+ protoVAL.fileDecls = []*fileElement{{imp: protoDollar[1].imprt}}
+ }
+ case 8:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:153
+ {
+ protoVAL.fileDecls = []*fileElement{{pkg: protoDollar[1].pkg}}
+ }
+ case 9:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:156
+ {
+ protoVAL.fileDecls = []*fileElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 10:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:159
+ {
+ protoVAL.fileDecls = []*fileElement{{message: protoDollar[1].msg}}
+ }
+ case 11:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:162
+ {
+ protoVAL.fileDecls = []*fileElement{{enum: protoDollar[1].en}}
+ }
+ case 12:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:165
+ {
+ protoVAL.fileDecls = []*fileElement{{extend: protoDollar[1].extend}}
+ }
+ case 13:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:168
+ {
+ protoVAL.fileDecls = []*fileElement{{service: protoDollar[1].svc}}
+ }
+ case 14:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:171
+ {
+ protoVAL.fileDecls = []*fileElement{{empty: protoDollar[1].b}}
+ }
+ case 15:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:174
+ {
+ }
+ case 16:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:176
+ {
+ }
+ case 17:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:179
+ {
+ if protoDollar[3].str.val != "proto2" && protoDollar[3].str.val != "proto3" {
+ lexError(protolex, protoDollar[3].str.start(), "syntax value must be 'proto2' or 'proto3'")
+ }
+ protoVAL.syn = &syntaxNode{syntax: protoDollar[3].str}
+ protoVAL.syn.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 18:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:187
+ {
+ protoVAL.imprt = &importNode{name: protoDollar[2].str}
+ protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 19:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:191
+ {
+ protoVAL.imprt = &importNode{name: protoDollar[3].str, weak: true}
+ protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 20:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:195
+ {
+ protoVAL.imprt = &importNode{name: protoDollar[3].str, public: true}
+ protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 21:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:200
+ {
+ protoVAL.pkg = &packageNode{name: protoDollar[2].cid}
+ protoVAL.pkg.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 22:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:205
+ {
+ protoVAL.cid = &compoundIdentNode{val: protoDollar[1].id.val}
+ protoVAL.cid.setRange(protoDollar[1].id, protoDollar[1].id)
+ }
+ case 23:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:209
+ {
+ protoVAL.cid = &compoundIdentNode{val: protoDollar[1].cid.val + "." + protoDollar[3].id.val}
+ protoVAL.cid.setRange(protoDollar[1].cid, protoDollar[3].id)
+ }
+ case 24:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:214
+ {
+ n := &optionNameNode{parts: protoDollar[2].optNm}
+ n.setRange(protoDollar[2].optNm[0], protoDollar[2].optNm[len(protoDollar[2].optNm)-1])
+ o := &optionNode{name: n, val: protoDollar[4].v}
+ o.setRange(protoDollar[1].id, protoDollar[5].b)
+ protoVAL.opts = []*optionNode{o}
+ }
+ case 25:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:222
+ {
+ protoVAL.optNm = toNameParts(protoDollar[1].cid, 0)
+ }
+ case 26:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:225
+ {
+ p := &optionNamePartNode{text: protoDollar[2].cid, isExtension: true}
+ p.setRange(protoDollar[1].b, protoDollar[3].b)
+ protoVAL.optNm = []*optionNamePartNode{p}
+ }
+ case 27:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:230
+ {
+ p := &optionNamePartNode{text: protoDollar[2].cid, isExtension: true}
+ p.setRange(protoDollar[1].b, protoDollar[3].b)
+ ps := make([]*optionNamePartNode, 1, len(protoDollar[4].optNm)+1)
+ ps[0] = p
+ protoVAL.optNm = append(ps, protoDollar[4].optNm...)
+ }
+ case 29:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:239
+ {
+ protoVAL.optNm = append(protoDollar[1].optNm, protoDollar[2].optNm...)
+ }
+ case 30:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:243
+ {
+ protoVAL.optNm = toNameParts(protoDollar[1].cid, 1 /* exclude leading dot */)
+ }
+ case 31:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:246
+ {
+ p := &optionNamePartNode{text: protoDollar[3].cid, isExtension: true}
+ p.setRange(protoDollar[2].b, protoDollar[4].b)
+ protoVAL.optNm = []*optionNamePartNode{p}
+ }
+ case 34:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:255
+ {
+ protoVAL.v = protoDollar[1].str
+ }
+ case 36:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:259
+ {
+ protoVAL.v = protoDollar[1].il
+ }
+ case 38:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:263
+ {
+ if protoDollar[1].id.val == "true" {
+ protoVAL.v = &boolLiteralNode{identNode: protoDollar[1].id, val: true}
+ } else if protoDollar[1].id.val == "false" {
+ protoVAL.v = &boolLiteralNode{identNode: protoDollar[1].id, val: false}
+ } else if protoDollar[1].id.val == "inf" {
+ f := &compoundFloatNode{val: math.Inf(1)}
+ f.setRange(protoDollar[1].id, protoDollar[1].id)
+ protoVAL.v = f
+ } else if protoDollar[1].id.val == "nan" {
+ f := &compoundFloatNode{val: math.NaN()}
+ f.setRange(protoDollar[1].id, protoDollar[1].id)
+ protoVAL.v = f
+ } else {
+ protoVAL.v = protoDollar[1].id
+ }
+ }
+ case 39:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:281
+ {
+ i := &compoundUintNode{val: protoDollar[1].i.val}
+ i.setRange(protoDollar[1].i, protoDollar[1].i)
+ protoVAL.v = i
+ }
+ case 40:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:286
+ {
+ i := &compoundUintNode{val: protoDollar[2].i.val}
+ i.setRange(protoDollar[1].b, protoDollar[2].i)
+ protoVAL.v = i
+ }
+ case 41:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:292
+ {
+ if protoDollar[2].i.val > math.MaxInt64+1 {
+ lexError(protolex, protoDollar[2].i.start(), fmt.Sprintf("numeric constant %d would underflow (allowed range is %d to %d)", protoDollar[2].i.val, int64(math.MinInt64), int64(math.MaxInt64)))
+ }
+ i := &compoundIntNode{val: -int64(protoDollar[2].i.val)}
+ i.setRange(protoDollar[1].b, protoDollar[2].i)
+ protoVAL.il = i
+ }
+ case 43:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:302
+ {
+ // we don't allow uintLit because this is for enum numeric vals, which don't allow '+'
+ checkUint64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+ i := &compoundIntNode{val: int64(protoDollar[1].i.val)}
+ i.setRange(protoDollar[1].i, protoDollar[1].i)
+ protoVAL.il = i
+ }
+ case 44:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:310
+ {
+ protoVAL.v = protoDollar[1].f
+ }
+ case 45:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:313
+ {
+ f := &compoundFloatNode{val: -protoDollar[2].f.val}
+ f.setRange(protoDollar[1].b, protoDollar[2].f)
+ protoVAL.v = f
+ }
+ case 46:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:318
+ {
+ f := &compoundFloatNode{val: protoDollar[2].f.val}
+ f.setRange(protoDollar[1].b, protoDollar[2].f)
+ protoVAL.v = f
+ }
+ case 47:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:323
+ {
+ f := &compoundFloatNode{val: math.Inf(1)}
+ f.setRange(protoDollar[1].b, protoDollar[2].id)
+ protoVAL.v = f
+ }
+ case 48:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:328
+ {
+ f := &compoundFloatNode{val: math.Inf(-1)}
+ f.setRange(protoDollar[1].b, protoDollar[2].id)
+ protoVAL.v = f
+ }
+ case 49:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:334
+ {
+ protoVAL.str = &compoundStringNode{val: protoDollar[1].s.val}
+ protoVAL.str.setRange(protoDollar[1].s, protoDollar[1].s)
+ }
+ case 50:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:338
+ {
+ protoVAL.str = &compoundStringNode{val: protoDollar[1].str.val + protoDollar[2].s.val}
+ protoVAL.str.setRange(protoDollar[1].str, protoDollar[2].s)
+ }
+ case 51:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:343
+ {
+ a := &aggregateLiteralNode{elements: protoDollar[2].agg}
+ a.setRange(protoDollar[1].b, protoDollar[3].b)
+ protoVAL.v = a
+ }
+ case 53:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:350
+ {
+ protoVAL.agg = append(protoDollar[1].agg, protoDollar[2].agg...)
+ }
+ case 54:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:353
+ {
+ protoVAL.agg = nil
+ }
+ case 56:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:358
+ {
+ protoVAL.agg = protoDollar[1].agg
+ }
+ case 57:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:361
+ {
+ protoVAL.agg = protoDollar[1].agg
+ }
+ case 58:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:364
+ {
+ }
+ case 59:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:366
+ {
+ }
+ case 60:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:368
+ {
+ }
+ case 61:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:371
+ {
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[3].v}
+ a.setRange(protoDollar[1].aggName, protoDollar[3].v)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 62:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:376
+ {
+ s := &sliceLiteralNode{}
+ s.setRange(protoDollar[3].b, protoDollar[4].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[4].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 63:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:383
+ {
+ s := &sliceLiteralNode{elements: protoDollar[4].sl}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[5].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 64:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:390
+ {
+ }
+ case 65:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:392
+ {
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[3].v}
+ a.setRange(protoDollar[1].aggName, protoDollar[3].v)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 66:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:397
+ {
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[2].v}
+ a.setRange(protoDollar[1].aggName, protoDollar[2].v)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 67:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:402
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[5].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 68:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:409
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[3].agg}
+ s.setRange(protoDollar[2].b, protoDollar[4].b)
+ a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+ a.setRange(protoDollar[1].aggName, protoDollar[4].b)
+ protoVAL.agg = []*aggregateEntryNode{a}
+ }
+ case 69:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:416
+ {
+ }
+ case 70:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:418
+ {
+ }
+ case 71:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:421
+ {
+ n := &compoundIdentNode{val: protoDollar[1].id.val}
+ n.setRange(protoDollar[1].id, protoDollar[1].id)
+ protoVAL.aggName = &aggregateNameNode{name: n}
+ protoVAL.aggName.setRange(protoDollar[1].id, protoDollar[1].id)
+ }
+ case 72:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:427
+ {
+ protoVAL.aggName = &aggregateNameNode{name: protoDollar[2].cid, isExtension: true}
+ protoVAL.aggName.setRange(protoDollar[1].b, protoDollar[3].b)
+ }
+ case 73:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:431
+ {
+ }
+ case 74:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:434
+ {
+ protoVAL.sl = []valueNode{protoDollar[1].v}
+ }
+ case 75:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:437
+ {
+ protoVAL.sl = append(protoDollar[1].sl, protoDollar[3].v)
+ }
+ case 76:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:440
+ {
+ protoVAL.sl = append(protoDollar[1].sl, protoDollar[3].v)
+ }
+ case 77:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:443
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[2].agg}
+ s.setRange(protoDollar[1].b, protoDollar[3].b)
+ protoVAL.sl = []valueNode{s}
+ }
+ case 78:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:448
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ protoVAL.sl = append(protoDollar[1].sl, s)
+ }
+ case 79:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:453
+ {
+ s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+ s.setRange(protoDollar[3].b, protoDollar[5].b)
+ protoVAL.sl = append(protoDollar[1].sl, s)
+ }
+ case 80:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:458
+ {
+ }
+ case 81:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:460
+ {
+ }
+ case 82:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:462
+ {
+ }
+ case 84:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:466
+ {
+ protoVAL.cid = &compoundIdentNode{val: "." + protoDollar[2].cid.val}
+ protoVAL.cid.setRange(protoDollar[1].b, protoDollar[2].cid)
+ }
+ case 85:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:471
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ lbl := fieldLabel{identNode: protoDollar[1].id, required: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].cid, name: protoDollar[3].id, tag: protoDollar[5].i}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 86:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:477
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ lbl := fieldLabel{identNode: protoDollar[1].id}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].cid, name: protoDollar[3].id, tag: protoDollar[5].i}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 87:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:483
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ lbl := fieldLabel{identNode: protoDollar[1].id, repeated: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].cid, name: protoDollar[3].id, tag: protoDollar[5].i}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 88:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:489
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].cid, name: protoDollar[2].id, tag: protoDollar[4].i}
+ protoVAL.fld.setRange(protoDollar[1].cid, protoDollar[5].b)
+ }
+ case 89:
+ protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:494
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ lbl := fieldLabel{identNode: protoDollar[1].id, required: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].cid, name: protoDollar[3].id, tag: protoDollar[5].i, options: protoDollar[6].cmpctOpts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[7].b)
+ }
+ case 90:
+ protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:500
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ lbl := fieldLabel{identNode: protoDollar[1].id}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].cid, name: protoDollar[3].id, tag: protoDollar[5].i, options: protoDollar[6].cmpctOpts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[7].b)
+ }
+ case 91:
+ protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:506
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ lbl := fieldLabel{identNode: protoDollar[1].id, repeated: true}
+ protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].cid, name: protoDollar[3].id, tag: protoDollar[5].i, options: protoDollar[6].cmpctOpts}
+ protoVAL.fld.setRange(protoDollar[1].id, protoDollar[7].b)
+ }
+ case 92:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:512
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].cid, name: protoDollar[2].id, tag: protoDollar[4].i, options: protoDollar[5].cmpctOpts}
+ protoVAL.fld.setRange(protoDollar[1].cid, protoDollar[6].b)
+ }
+ case 93:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:518
+ {
+ protoVAL.cmpctOpts = &compactOptionsNode{decls: protoDollar[2].opts}
+ protoVAL.cmpctOpts.setRange(protoDollar[1].b, protoDollar[3].b)
+ }
+ case 94:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:523
+ {
+ protoVAL.opts = append(protoDollar[1].opts, protoDollar[3].opts...)
+ }
+ case 96:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:528
+ {
+ n := &optionNameNode{parts: protoDollar[1].optNm}
+ n.setRange(protoDollar[1].optNm[0], protoDollar[1].optNm[len(protoDollar[1].optNm)-1])
+ o := &optionNode{name: n, val: protoDollar[3].v}
+ o.setRange(protoDollar[1].optNm[0], protoDollar[3].v)
+ protoVAL.opts = []*optionNode{o}
+ }
+ case 97:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:536
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+ lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+ }
+ lbl := fieldLabel{identNode: protoDollar[1].id, required: true}
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].i, decls: protoDollar[7].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 98:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:545
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+ lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+ }
+ lbl := fieldLabel{identNode: protoDollar[1].id}
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].i, decls: protoDollar[7].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 99:
+ protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:554
+ {
+ checkTag(protolex, protoDollar[5].i.start(), protoDollar[5].i.val)
+ if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+ lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+ }
+ lbl := fieldLabel{identNode: protoDollar[1].id, repeated: true}
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].i, decls: protoDollar[7].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+ }
+ case 100:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:564
+ {
+ c := 0
+ for _, el := range protoDollar[4].ooDecls {
+ if el.field != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, protoDollar[1].id.start(), "oneof must contain at least one field")
+ }
+ protoVAL.oo = &oneOfNode{name: protoDollar[2].id, decls: protoDollar[4].ooDecls}
+ protoVAL.oo.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 101:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:578
+ {
+ protoVAL.ooDecls = append(protoDollar[1].ooDecls, protoDollar[2].ooDecls...)
+ }
+ case 103:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:582
+ {
+ protoVAL.ooDecls = nil
+ }
+ case 104:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:586
+ {
+ protoVAL.ooDecls = []*oneOfElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 105:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:589
+ {
+ protoVAL.ooDecls = []*oneOfElement{{field: protoDollar[1].fld}}
+ }
+ case 106:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:592
+ {
+ protoVAL.ooDecls = []*oneOfElement{{group: protoDollar[1].grp}}
+ }
+ case 107:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:595
+ {
+ protoVAL.ooDecls = []*oneOfElement{{empty: protoDollar[1].b}}
+ }
+ case 108:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:598
+ {
+ }
+ case 109:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:600
+ {
+ }
+ case 110:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:603
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].cid, name: protoDollar[2].id, tag: protoDollar[4].i}
+ protoVAL.fld.setRange(protoDollar[1].cid, protoDollar[5].b)
+ }
+ case 111:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:608
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ protoVAL.fld = &fieldNode{fldType: protoDollar[1].cid, name: protoDollar[2].id, tag: protoDollar[4].i, options: protoDollar[5].cmpctOpts}
+ protoVAL.fld.setRange(protoDollar[1].cid, protoDollar[6].b)
+ }
+ case 112:
+ protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:614
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ if !unicode.IsUpper(rune(protoDollar[2].id.val[0])) {
+ lexError(protolex, protoDollar[2].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[2].id.val))
+ }
+ protoVAL.grp = &groupNode{groupKeyword: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].i, decls: protoDollar[6].msgDecls}
+ protoVAL.grp.setRange(protoDollar[1].id, protoDollar[7].b)
+ }
+ case 113:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:623
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ protoVAL.mapFld = &mapFieldNode{mapType: protoDollar[1].mapType, name: protoDollar[2].id, tag: protoDollar[4].i}
+ protoVAL.mapFld.setRange(protoDollar[1].mapType, protoDollar[5].b)
+ }
+ case 114:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:628
+ {
+ checkTag(protolex, protoDollar[4].i.start(), protoDollar[4].i.val)
+ protoVAL.mapFld = &mapFieldNode{mapType: protoDollar[1].mapType, name: protoDollar[2].id, tag: protoDollar[4].i, options: protoDollar[5].cmpctOpts}
+ protoVAL.mapFld.setRange(protoDollar[1].mapType, protoDollar[6].b)
+ }
+ case 115:
+ protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:634
+ {
+ protoVAL.mapType = &mapTypeNode{mapKeyword: protoDollar[1].id, keyType: protoDollar[3].id, valueType: protoDollar[5].cid}
+ protoVAL.mapType.setRange(protoDollar[1].id, protoDollar[6].b)
+ }
+ case 128:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:652
+ {
+ protoVAL.ext = &extensionRangeNode{ranges: protoDollar[2].rngs}
+ protoVAL.ext.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 129:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:656
+ {
+ protoVAL.ext = &extensionRangeNode{ranges: protoDollar[2].rngs, options: protoDollar[3].cmpctOpts}
+ protoVAL.ext.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 130:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:661
+ {
+ protoVAL.rngs = append(protoDollar[1].rngs, protoDollar[3].rngs...)
+ }
+ case 132:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:666
+ {
+ if protoDollar[1].i.val > internal.MaxTag {
+ lexError(protolex, protoDollar[1].i.start(), fmt.Sprintf("range includes out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].i.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[1].i, st: int32(protoDollar[1].i.val), en: int32(protoDollar[1].i.val)}
+ r.setRange(protoDollar[1].i, protoDollar[1].i)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 133:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:674
+ {
+ if protoDollar[1].i.val > internal.MaxTag {
+ lexError(protolex, protoDollar[1].i.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].i.val, internal.MaxTag))
+ }
+ if protoDollar[3].i.val > internal.MaxTag {
+ lexError(protolex, protoDollar[3].i.start(), fmt.Sprintf("range end is out-of-range tag: %d (should be between 0 and %d)", protoDollar[3].i.val, internal.MaxTag))
+ }
+ if protoDollar[1].i.val > protoDollar[3].i.val {
+ lexError(protolex, protoDollar[1].i.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].i.val, protoDollar[3].i.val))
+ }
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].i, st: int32(protoDollar[1].i.val), en: int32(protoDollar[3].i.val)}
+ r.setRange(protoDollar[1].i, protoDollar[3].i)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 134:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:688
+ {
+ if protoDollar[1].i.val > internal.MaxTag {
+ lexError(protolex, protoDollar[1].i.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].i.val, internal.MaxTag))
+ }
+ r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].id, st: int32(protoDollar[1].i.val), en: internal.MaxTag}
+ r.setRange(protoDollar[1].i, protoDollar[3].id)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 135:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:697
+ {
+ protoVAL.rngs = append(protoDollar[1].rngs, protoDollar[3].rngs...)
+ }
+ case 137:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:702
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].il.start(), protoDollar[1].il.val)
+ r := &rangeNode{stNode: protoDollar[1].il, enNode: protoDollar[1].il, st: int32(protoDollar[1].il.val), en: int32(protoDollar[1].il.val)}
+ r.setRange(protoDollar[1].il, protoDollar[1].il)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 138:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:708
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].il.start(), protoDollar[1].il.val)
+ checkInt64InInt32Range(protolex, protoDollar[3].il.start(), protoDollar[3].il.val)
+ if protoDollar[1].il.val > protoDollar[3].il.val {
+ lexError(protolex, protoDollar[1].il.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].il.val, protoDollar[3].il.val))
+ }
+ r := &rangeNode{stNode: protoDollar[1].il, enNode: protoDollar[3].il, st: int32(protoDollar[1].il.val), en: int32(protoDollar[3].il.val)}
+ r.setRange(protoDollar[1].il, protoDollar[3].il)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 139:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:718
+ {
+ checkInt64InInt32Range(protolex, protoDollar[1].il.start(), protoDollar[1].il.val)
+ r := &rangeNode{stNode: protoDollar[1].il, enNode: protoDollar[3].id, st: int32(protoDollar[1].il.val), en: math.MaxInt32}
+ r.setRange(protoDollar[1].il, protoDollar[3].id)
+ protoVAL.rngs = []*rangeNode{r}
+ }
+ case 140:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:725
+ {
+ protoVAL.resvd = &reservedNode{ranges: protoDollar[2].rngs}
+ protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 142:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:731
+ {
+ protoVAL.resvd = &reservedNode{ranges: protoDollar[2].rngs}
+ protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 144:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:737
+ {
+ rsvd := map[string]struct{}{}
+ for _, n := range protoDollar[2].names {
+ if _, ok := rsvd[n.val]; ok {
+ lexError(protolex, n.start(), fmt.Sprintf("name %q is reserved multiple times", n.val))
+ break
+ }
+ rsvd[n.val] = struct{}{}
+ }
+ protoVAL.resvd = &reservedNode{names: protoDollar[2].names}
+ protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+ }
+ case 145:
+ protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:750
+ {
+ protoVAL.names = append(protoDollar[1].names, protoDollar[3].str)
+ }
+ case 146:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:753
+ {
+ protoVAL.names = []*compoundStringNode{protoDollar[1].str}
+ }
+ case 147:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:757
+ {
+ c := 0
+ for _, el := range protoDollar[4].enDecls {
+ if el.value != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, protoDollar[1].id.start(), "enums must define at least one value")
+ }
+ protoVAL.en = &enumNode{name: protoDollar[2].id, decls: protoDollar[4].enDecls}
+ protoVAL.en.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 148:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:771
+ {
+ protoVAL.enDecls = append(protoDollar[1].enDecls, protoDollar[2].enDecls...)
+ }
+ case 150:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:775
+ {
+ protoVAL.enDecls = nil
+ }
+ case 151:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:779
+ {
+ protoVAL.enDecls = []*enumElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 152:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:782
+ {
+ protoVAL.enDecls = []*enumElement{{value: protoDollar[1].env}}
+ }
+ case 153:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:785
+ {
+ protoVAL.enDecls = []*enumElement{{reserved: protoDollar[1].resvd}}
+ }
+ case 154:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:788
+ {
+ protoVAL.enDecls = []*enumElement{{empty: protoDollar[1].b}}
+ }
+ case 155:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:791
+ {
+ }
+ case 156:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:793
+ {
+ }
+ case 157:
+ protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:796
+ {
+ checkInt64InInt32Range(protolex, protoDollar[3].il.start(), protoDollar[3].il.val)
+ protoVAL.env = &enumValueNode{name: protoDollar[1].id, number: protoDollar[3].il}
+ protoVAL.env.setRange(protoDollar[1].id, protoDollar[4].b)
+ }
+ case 158:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:801
+ {
+ checkInt64InInt32Range(protolex, protoDollar[3].il.start(), protoDollar[3].il.val)
+ protoVAL.env = &enumValueNode{name: protoDollar[1].id, number: protoDollar[3].il, options: protoDollar[4].cmpctOpts}
+ protoVAL.env.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 159:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:807
+ {
+ protoVAL.msg = &messageNode{name: protoDollar[2].id, decls: protoDollar[4].msgDecls}
+ protoVAL.msg.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 160:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:812
+ {
+ protoVAL.msgDecls = append(protoDollar[1].msgDecls, protoDollar[2].msgDecls...)
+ }
+ case 162:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:816
+ {
+ protoVAL.msgDecls = nil
+ }
+ case 163:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:820
+ {
+ protoVAL.msgDecls = []*messageElement{{field: protoDollar[1].fld}}
+ }
+ case 164:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:823
+ {
+ protoVAL.msgDecls = []*messageElement{{enum: protoDollar[1].en}}
+ }
+ case 165:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:826
+ {
+ protoVAL.msgDecls = []*messageElement{{nested: protoDollar[1].msg}}
+ }
+ case 166:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:829
+ {
+ protoVAL.msgDecls = []*messageElement{{extend: protoDollar[1].extend}}
+ }
+ case 167:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:832
+ {
+ protoVAL.msgDecls = []*messageElement{{extensionRange: protoDollar[1].ext}}
+ }
+ case 168:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:835
+ {
+ protoVAL.msgDecls = []*messageElement{{group: protoDollar[1].grp}}
+ }
+ case 169:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:838
+ {
+ protoVAL.msgDecls = []*messageElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 170:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:841
+ {
+ protoVAL.msgDecls = []*messageElement{{oneOf: protoDollar[1].oo}}
+ }
+ case 171:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:844
+ {
+ protoVAL.msgDecls = []*messageElement{{mapField: protoDollar[1].mapFld}}
+ }
+ case 172:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:847
+ {
+ protoVAL.msgDecls = []*messageElement{{reserved: protoDollar[1].resvd}}
+ }
+ case 173:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:850
+ {
+ protoVAL.msgDecls = []*messageElement{{empty: protoDollar[1].b}}
+ }
+ case 174:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:853
+ {
+ }
+ case 175:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:855
+ {
+ }
+ case 176:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:858
+ {
+ c := 0
+ for _, el := range protoDollar[4].extDecls {
+ if el.field != nil || el.group != nil {
+ c++
+ }
+ }
+ if c == 0 {
+ lexError(protolex, protoDollar[1].id.start(), "extend sections must define at least one extension")
+ }
+ protoVAL.extend = &extendNode{extendee: protoDollar[2].cid, decls: protoDollar[4].extDecls}
+ protoVAL.extend.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 177:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:872
+ {
+ protoVAL.extDecls = append(protoDollar[1].extDecls, protoDollar[2].extDecls...)
+ }
+ case 179:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:876
+ {
+ protoVAL.extDecls = nil
+ }
+ case 180:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:880
+ {
+ protoVAL.extDecls = []*extendElement{{field: protoDollar[1].fld}}
+ }
+ case 181:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:883
+ {
+ protoVAL.extDecls = []*extendElement{{group: protoDollar[1].grp}}
+ }
+ case 182:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:886
+ {
+ protoVAL.extDecls = []*extendElement{{empty: protoDollar[1].b}}
+ }
+ case 183:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:889
+ {
+ }
+ case 184:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:891
+ {
+ }
+ case 185:
+ protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:894
+ {
+ protoVAL.svc = &serviceNode{name: protoDollar[2].id, decls: protoDollar[4].svcDecls}
+ protoVAL.svc.setRange(protoDollar[1].id, protoDollar[5].b)
+ }
+ case 186:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:899
+ {
+ protoVAL.svcDecls = append(protoDollar[1].svcDecls, protoDollar[2].svcDecls...)
+ }
+ case 188:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:903
+ {
+ protoVAL.svcDecls = nil
+ }
+ case 189:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:910
+ {
+ protoVAL.svcDecls = []*serviceElement{{option: protoDollar[1].opts[0]}}
+ }
+ case 190:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:913
+ {
+ protoVAL.svcDecls = []*serviceElement{{rpc: protoDollar[1].mtd}}
+ }
+ case 191:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:916
+ {
+ protoVAL.svcDecls = []*serviceElement{{empty: protoDollar[1].b}}
+ }
+ case 192:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:919
+ {
+ }
+ case 193:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:921
+ {
+ }
+ case 194:
+ protoDollar = protoS[protopt-10 : protopt+1]
+//line proto.y:924
+ {
+ protoVAL.mtd = &methodNode{name: protoDollar[2].id, input: protoDollar[4].rpcType, output: protoDollar[8].rpcType}
+ protoVAL.mtd.setRange(protoDollar[1].id, protoDollar[10].b)
+ }
+ case 195:
+ protoDollar = protoS[protopt-12 : protopt+1]
+//line proto.y:928
+ {
+ protoVAL.mtd = &methodNode{name: protoDollar[2].id, input: protoDollar[4].rpcType, output: protoDollar[8].rpcType, options: protoDollar[11].opts}
+ protoVAL.mtd.setRange(protoDollar[1].id, protoDollar[12].b)
+ }
+ case 196:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:933
+ {
+ protoVAL.rpcType = &rpcTypeNode{msgType: protoDollar[2].cid, streamKeyword: protoDollar[1].id}
+ protoVAL.rpcType.setRange(protoDollar[1].id, protoDollar[2].cid)
+ }
+ case 197:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:937
+ {
+ protoVAL.rpcType = &rpcTypeNode{msgType: protoDollar[1].cid}
+ protoVAL.rpcType.setRange(protoDollar[1].cid, protoDollar[1].cid)
+ }
+ case 198:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:942
+ {
+ protoVAL.opts = append(protoDollar[1].opts, protoDollar[2].opts...)
+ }
+ case 200:
+ protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:946
+ {
+ protoVAL.opts = []*optionNode{}
+ }
+ case 201:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:950
+ {
+ protoVAL.opts = protoDollar[1].opts
+ }
+ case 202:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:953
+ {
+ protoVAL.opts = []*optionNode{}
+ }
+ case 203:
+ protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:956
+ {
+ }
+ case 204:
+ protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:958
+ {
+ }
+ }
+ goto protostack /* stack new state and value */
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go
new file mode 100644
index 0000000..2561e56
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go
@@ -0,0 +1,170 @@
+package protoparse
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var errNoImportPathsForAbsoluteFilePath = errors.New("must specify at least one import path if any absolute file paths are given")
+
+// ResolveFilenames tries to resolve fileNames into paths that are relative to
+// directories in the given importPaths. The returned slice has the results in
+// the same order as they are supplied in fileNames.
+//
+// The resulting names should be suitable for passing to Parser.ParseFiles.
+//
+// If no import paths are given and any file name is absolute, this returns an
+// error. If no import paths are given and all file names are relative, this
+// returns the original file names. If a file name is already relative to one
+// of the given import paths, it will be unchanged in the returned slice. If a
+// file name given is relative to the current working directory, it will be made
+// relative to one of the given import paths; but if it cannot be made relative
+// (due to no matching import path), an error will be returned.
+func ResolveFilenames(importPaths []string, fileNames ...string) ([]string, error) {
+ if len(importPaths) == 0 {
+ if containsAbsFilePath(fileNames) {
+ // We have to do this as otherwise parseProtoFiles can result in duplicate symbols.
+ // For example, assume we import "foo/bar/bar.proto" in a file "/home/alice/dev/foo/bar/baz.proto"
+ // as we call ParseFiles("/home/alice/dev/foo/bar/bar.proto","/home/alice/dev/foo/bar/baz.proto")
+ // with "/home/alice/dev" as our current directory. Due to the recursive nature of parseProtoFiles,
+ // it will discover the import "foo/bar/bar.proto" in the input file, and call parse on this,
+ // adding "foo/bar/bar.proto" to the parsed results, as well as "/home/alice/dev/foo/bar/bar.proto"
+ // from the input file list. This will result in a
+ // 'duplicate symbol SYMBOL: already defined as field in "/home/alice/dev/foo/bar/bar.proto'
+ // error being returned from ParseFiles.
+ return nil, errNoImportPathsForAbsoluteFilePath
+ }
+ return fileNames, nil
+ }
+ absImportPaths, err := absoluteFilePaths(importPaths)
+ if err != nil {
+ return nil, err
+ }
+ resolvedFileNames := make([]string, 0, len(fileNames))
+ for _, fileName := range fileNames {
+ resolvedFileName, err := resolveFilename(absImportPaths, fileName)
+ if err != nil {
+ return nil, err
+ }
+ resolvedFileNames = append(resolvedFileNames, resolvedFileName)
+ }
+ return resolvedFileNames, nil
+}
+
+func containsAbsFilePath(filePaths []string) bool {
+ for _, filePath := range filePaths {
+ if filepath.IsAbs(filePath) {
+ return true
+ }
+ }
+ return false
+}
+
+func absoluteFilePaths(filePaths []string) ([]string, error) {
+ absFilePaths := make([]string, 0, len(filePaths))
+ for _, filePath := range filePaths {
+ absFilePath, err := canonicalize(filePath)
+ if err != nil {
+ return nil, err
+ }
+ absFilePaths = append(absFilePaths, absFilePath)
+ }
+ return absFilePaths, nil
+}
+
+func canonicalize(filePath string) (string, error) {
+ absPath, err := filepath.Abs(filePath)
+ if err != nil {
+ return "", err
+ }
+ // this is kind of gross, but it lets us construct a resolved path even if some
+ // path elements do not exist (a single call to filepath.EvalSymlinks would just
+ // return an error, ENOENT, in that case).
+ head := absPath
+ tail := ""
+ for {
+ noLinks, err := filepath.EvalSymlinks(head)
+ if err == nil {
+ if tail != "" {
+ return filepath.Join(noLinks, tail), nil
+ }
+ return noLinks, nil
+ }
+
+ if tail == "" {
+ tail = filepath.Base(head)
+ } else {
+ tail = filepath.Join(filepath.Base(head), tail)
+ }
+ head = filepath.Dir(head)
+ if head == "." {
+ // ran out of path elements to try to resolve
+ return absPath, nil
+ }
+ }
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+const dotDotPrefix = ".." + string(filepath.Separator)
+
+func resolveFilename(absImportPaths []string, fileName string) (string, error) {
+ if filepath.IsAbs(fileName) {
+ return resolveAbsFilename(absImportPaths, fileName)
+ }
+
+ if !strings.HasPrefix(fileName, dotPrefix) && !strings.HasPrefix(fileName, dotDotPrefix) {
+ // Use of . and .. are assumed to be relative to current working
+ // directory. So if those aren't present, check to see if the file is
+ // relative to an import path.
+ for _, absImportPath := range absImportPaths {
+ absFileName := filepath.Join(absImportPath, fileName)
+ _, err := os.Stat(absFileName)
+ if err != nil {
+ continue
+ }
+ // found it! it was relative to this import path
+ return fileName, nil
+ }
+ }
+
+ // must be relative to current working dir
+ return resolveAbsFilename(absImportPaths, fileName)
+}
+
+func resolveAbsFilename(absImportPaths []string, fileName string) (string, error) {
+ absFileName, err := canonicalize(fileName)
+ if err != nil {
+ return "", err
+ }
+ for _, absImportPath := range absImportPaths {
+ if isDescendant(absImportPath, absFileName) {
+ resolvedPath, err := filepath.Rel(absImportPath, absFileName)
+ if err != nil {
+ return "", err
+ }
+ return resolvedPath, nil
+ }
+ }
+ return "", fmt.Errorf("%s does not reside in any import path", fileName)
+}
+
+// isDescendant returns true if file is a descendant of dir. Both dir and file must
+// be cleaned, absolute paths.
+func isDescendant(dir, file string) bool {
+ dir = filepath.Clean(dir)
+ cur := file
+ for {
+ d := filepath.Dir(cur)
+ if d == dir {
+ return true
+ }
+ if d == "." || d == cur {
+ // we've run out of path elements
+ return false
+ }
+ cur = d
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go
new file mode 100644
index 0000000..ff37733
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go
@@ -0,0 +1,532 @@
+package protoparse
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+func (r *parseResult) generateSourceCodeInfo() *dpb.SourceCodeInfo {
+ if r.nodes == nil {
+ // skip files that do not have AST info (these will be files
+ // that came from well-known descriptors, instead of from source)
+ return nil
+ }
+
+ sci := sourceCodeInfo{commentsUsed: map[*comment]struct{}{}}
+ path := make([]int32, 0, 10)
+
+ fn := r.getFileNode(r.fd).(*fileNode)
+ sci.newLocWithoutComments(fn, nil)
+
+ if fn.syntax != nil {
+ sci.newLoc(fn.syntax, append(path, internal.File_syntaxTag))
+ }
+
+ var depIndex, optIndex, msgIndex, enumIndex, extendIndex, svcIndex int32
+
+ for _, child := range fn.decls {
+ switch {
+ case child.imp != nil:
+ sci.newLoc(child.imp, append(path, internal.File_dependencyTag, int32(depIndex)))
+ depIndex++
+ case child.pkg != nil:
+ sci.newLoc(child.pkg, append(path, internal.File_packageTag))
+ case child.option != nil:
+ r.generateSourceCodeInfoForOption(&sci, child.option, false, &optIndex, append(path, internal.File_optionsTag))
+ case child.message != nil:
+ r.generateSourceCodeInfoForMessage(&sci, child.message, nil, append(path, internal.File_messagesTag, msgIndex))
+ msgIndex++
+ case child.enum != nil:
+ r.generateSourceCodeInfoForEnum(&sci, child.enum, append(path, internal.File_enumsTag, enumIndex))
+ enumIndex++
+ case child.extend != nil:
+ r.generateSourceCodeInfoForExtensions(&sci, child.extend, &extendIndex, &msgIndex, append(path, internal.File_extensionsTag), append(dup(path), internal.File_messagesTag))
+ case child.service != nil:
+ r.generateSourceCodeInfoForService(&sci, child.service, append(path, internal.File_servicesTag, svcIndex))
+ svcIndex++
+ }
+ }
+
+ return &dpb.SourceCodeInfo{Location: sci.locs}
+}
+
+func (r *parseResult) generateSourceCodeInfoForOption(sci *sourceCodeInfo, n *optionNode, compact bool, uninterpIndex *int32, path []int32) {
+ if !compact {
+ sci.newLocWithoutComments(n, path)
+ }
+ subPath := r.interpretedOptions[n]
+ if len(subPath) > 0 {
+ p := path
+ if subPath[0] == -1 {
+ // used by "default" and "json_name" field pseudo-options
+ // to attribute path to parent element (since those are
+ // stored directly on the descriptor, not its options)
+ p = make([]int32, len(path)-1)
+ copy(p, path)
+ subPath = subPath[1:]
+ }
+ sci.newLoc(n, append(p, subPath...))
+ return
+ }
+
+ // it's an uninterpreted option
+ optPath := append(path, internal.UninterpretedOptionsTag, *uninterpIndex)
+ *uninterpIndex++
+ sci.newLoc(n, optPath)
+ var valTag int32
+ switch n.val.(type) {
+ case *compoundIdentNode:
+ valTag = internal.Uninterpreted_identTag
+ case *intLiteralNode:
+ valTag = internal.Uninterpreted_posIntTag
+ case *compoundIntNode:
+ valTag = internal.Uninterpreted_negIntTag
+ case *compoundFloatNode:
+ valTag = internal.Uninterpreted_doubleTag
+ case *compoundStringNode:
+ valTag = internal.Uninterpreted_stringTag
+ case *aggregateLiteralNode:
+ valTag = internal.Uninterpreted_aggregateTag
+ }
+ if valTag != 0 {
+ sci.newLoc(n.val, append(optPath, valTag))
+ }
+ for j, nn := range n.name.parts {
+ optNmPath := append(optPath, internal.Uninterpreted_nameTag, int32(j))
+ sci.newLoc(nn, optNmPath)
+ sci.newLoc(nn.text, append(optNmPath, internal.UninterpretedName_nameTag))
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForMessage(sci *sourceCodeInfo, n msgDecl, fieldPath []int32, path []int32) {
+ sci.newLoc(n, path)
+
+ var decls []*messageElement
+ switch n := n.(type) {
+ case *messageNode:
+ decls = n.decls
+ case *groupNode:
+ decls = n.decls
+ case *mapFieldNode:
+ // map entry so nothing else to do
+ return
+ }
+
+ sci.newLoc(n.messageName(), append(path, internal.Message_nameTag))
+ // matching protoc, which emits the corresponding field type name (for group fields)
+ // right after the source location for the group message name
+ if fieldPath != nil {
+ sci.newLoc(n.messageName(), append(fieldPath, internal.Field_typeNameTag))
+ }
+
+ var optIndex, fieldIndex, oneOfIndex, extendIndex, nestedMsgIndex int32
+ var nestedEnumIndex, extRangeIndex, reservedRangeIndex, reservedNameIndex int32
+ for _, child := range decls {
+ switch {
+ case child.option != nil:
+ r.generateSourceCodeInfoForOption(sci, child.option, false, &optIndex, append(path, internal.Message_optionsTag))
+ case child.field != nil:
+ r.generateSourceCodeInfoForField(sci, child.field, append(path, internal.Message_fieldsTag, fieldIndex))
+ fieldIndex++
+ case child.group != nil:
+ fldPath := append(path, internal.Message_fieldsTag, fieldIndex)
+ r.generateSourceCodeInfoForField(sci, child.group, fldPath)
+ fieldIndex++
+ r.generateSourceCodeInfoForMessage(sci, child.group, fldPath, append(dup(path), internal.Message_nestedMessagesTag, nestedMsgIndex))
+ nestedMsgIndex++
+ case child.mapField != nil:
+ r.generateSourceCodeInfoForField(sci, child.mapField, append(path, internal.Message_fieldsTag, fieldIndex))
+ fieldIndex++
+ case child.oneOf != nil:
+ r.generateSourceCodeInfoForOneOf(sci, child.oneOf, &fieldIndex, &nestedMsgIndex, append(path, internal.Message_fieldsTag), append(dup(path), internal.Message_nestedMessagesTag), append(dup(path), internal.Message_oneOfsTag, oneOfIndex))
+ oneOfIndex++
+ case child.nested != nil:
+ r.generateSourceCodeInfoForMessage(sci, child.nested, nil, append(path, internal.Message_nestedMessagesTag, nestedMsgIndex))
+ nestedMsgIndex++
+ case child.enum != nil:
+ r.generateSourceCodeInfoForEnum(sci, child.enum, append(path, internal.Message_enumsTag, nestedEnumIndex))
+ nestedEnumIndex++
+ case child.extend != nil:
+ r.generateSourceCodeInfoForExtensions(sci, child.extend, &extendIndex, &nestedMsgIndex, append(path, internal.Message_extensionsTag), append(dup(path), internal.Message_nestedMessagesTag))
+ case child.extensionRange != nil:
+ r.generateSourceCodeInfoForExtensionRanges(sci, child.extensionRange, &extRangeIndex, append(path, internal.Message_extensionRangeTag))
+ case child.reserved != nil:
+ if len(child.reserved.names) > 0 {
+ resPath := append(path, internal.Message_reservedNameTag)
+ sci.newLoc(child.reserved, resPath)
+ for _, rn := range child.reserved.names {
+ sci.newLoc(rn, append(resPath, reservedNameIndex))
+ reservedNameIndex++
+ }
+ }
+ if len(child.reserved.ranges) > 0 {
+ resPath := append(path, internal.Message_reservedRangeTag)
+ sci.newLoc(child.reserved, resPath)
+ for _, rr := range child.reserved.ranges {
+ r.generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex))
+ reservedRangeIndex++
+ }
+ }
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForEnum(sci *sourceCodeInfo, n *enumNode, path []int32) {
+ sci.newLoc(n, path)
+ sci.newLoc(n.name, append(path, internal.Enum_nameTag))
+
+ var optIndex, valIndex, reservedNameIndex, reservedRangeIndex int32
+ for _, child := range n.decls {
+ switch {
+ case child.option != nil:
+ r.generateSourceCodeInfoForOption(sci, child.option, false, &optIndex, append(path, internal.Enum_optionsTag))
+ case child.value != nil:
+ r.generateSourceCodeInfoForEnumValue(sci, child.value, append(path, internal.Enum_valuesTag, valIndex))
+ valIndex++
+ case child.reserved != nil:
+ if len(child.reserved.names) > 0 {
+ resPath := append(path, internal.Enum_reservedNameTag)
+ sci.newLoc(child.reserved, resPath)
+ for _, rn := range child.reserved.names {
+ sci.newLoc(rn, append(resPath, reservedNameIndex))
+ reservedNameIndex++
+ }
+ }
+ if len(child.reserved.ranges) > 0 {
+ resPath := append(path, internal.Enum_reservedRangeTag)
+ sci.newLoc(child.reserved, resPath)
+ for _, rr := range child.reserved.ranges {
+ r.generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex))
+ reservedRangeIndex++
+ }
+ }
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForEnumValue(sci *sourceCodeInfo, n *enumValueNode, path []int32) {
+ sci.newLoc(n, path)
+ sci.newLoc(n.name, append(path, internal.EnumVal_nameTag))
+ sci.newLoc(n.getNumber(), append(path, internal.EnumVal_numberTag))
+
+ // enum value options
+ if n.options != nil {
+ optsPath := append(path, internal.EnumVal_optionsTag)
+ sci.newLoc(n.options, optsPath)
+ var optIndex int32
+ for _, opt := range n.options.decls {
+ r.generateSourceCodeInfoForOption(sci, opt, true, &optIndex, optsPath)
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForReservedRange(sci *sourceCodeInfo, n *rangeNode, path []int32) {
+ sci.newLoc(n, path)
+ sci.newLoc(n.stNode, append(path, internal.ReservedRange_startTag))
+ if n.stNode != n.enNode {
+ sci.newLoc(n.enNode, append(path, internal.ReservedRange_endTag))
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForExtensions(sci *sourceCodeInfo, n *extendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) {
+ sci.newLoc(n, extendPath)
+ for _, decl := range n.decls {
+ switch {
+ case decl.field != nil:
+ r.generateSourceCodeInfoForField(sci, decl.field, append(extendPath, *extendIndex))
+ *extendIndex++
+ case decl.group != nil:
+ fldPath := append(extendPath, *extendIndex)
+ r.generateSourceCodeInfoForField(sci, decl.group, fldPath)
+ *extendIndex++
+ r.generateSourceCodeInfoForMessage(sci, decl.group, fldPath, append(msgPath, *msgIndex))
+ *msgIndex++
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForOneOf(sci *sourceCodeInfo, n *oneOfNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneOfPath []int32) {
+ sci.newLoc(n, oneOfPath)
+ sci.newLoc(n.name, append(oneOfPath, internal.OneOf_nameTag))
+
+ var optIndex int32
+ for _, child := range n.decls {
+ switch {
+ case child.option != nil:
+ r.generateSourceCodeInfoForOption(sci, child.option, false, &optIndex, append(oneOfPath, internal.OneOf_optionsTag))
+ case child.field != nil:
+ r.generateSourceCodeInfoForField(sci, child.field, append(fieldPath, *fieldIndex))
+ *fieldIndex++
+ case child.group != nil:
+ fldPath := append(fieldPath, *fieldIndex)
+ r.generateSourceCodeInfoForField(sci, child.group, fldPath)
+ *fieldIndex++
+ r.generateSourceCodeInfoForMessage(sci, child.group, fldPath, append(nestedMsgPath, *nestedMsgIndex))
+ *nestedMsgIndex++
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForField(sci *sourceCodeInfo, n fieldDecl, path []int32) {
+ isGroup := false
+ var opts *compactOptionsNode
+ var extendee *extendNode
+ var fieldType string
+ switch n := n.(type) {
+ case *fieldNode:
+ opts = n.options
+ extendee = n.extendee
+ fieldType = n.fldType.val
+ case *mapFieldNode:
+ opts = n.options
+ case *groupNode:
+ isGroup = true
+ extendee = n.extendee
+ case *syntheticMapField:
+ // shouldn't get here since we don't recurse into fields from a mapNode
+ // in generateSourceCodeInfoForMessage... but just in case
+ return
+ }
+
+ if isGroup {
+ // comments will appear on group message
+ sci.newLocWithoutComments(n, path)
+ if extendee != nil {
+ sci.newLoc(extendee.extendee, append(path, internal.Field_extendeeTag))
+ }
+ if n.fieldLabel() != nil {
+ // no comments here either (label is first token for group, so we want
+ // to leave the comments to be associated with the group message instead)
+ sci.newLocWithoutComments(n.fieldLabel(), append(path, internal.Field_labelTag))
+ }
+ sci.newLoc(n.fieldType(), append(path, internal.Field_typeTag))
+ // let the name comments be attributed to the group name
+ sci.newLocWithoutComments(n.fieldName(), append(path, internal.Field_nameTag))
+ } else {
+ sci.newLoc(n, path)
+ if extendee != nil {
+ sci.newLoc(extendee.extendee, append(path, internal.Field_extendeeTag))
+ }
+ if n.fieldLabel() != nil {
+ sci.newLoc(n.fieldLabel(), append(path, internal.Field_labelTag))
+ }
+ n.fieldType()
+ var tag int32
+ if _, isScalar := fieldTypes[fieldType]; isScalar {
+ tag = internal.Field_typeTag
+ } else {
+ // this is a message or an enum, so attribute type location
+ // to the type name field
+ tag = internal.Field_typeNameTag
+ }
+ sci.newLoc(n.fieldType(), append(path, tag))
+ sci.newLoc(n.fieldName(), append(path, internal.Field_nameTag))
+ }
+ sci.newLoc(n.fieldTag(), append(path, internal.Field_numberTag))
+
+ if opts != nil {
+ optsPath := append(path, internal.Field_optionsTag)
+ sci.newLoc(opts, optsPath)
+ var optIndex int32
+ for _, opt := range opts.decls {
+ r.generateSourceCodeInfoForOption(sci, opt, true, &optIndex, optsPath)
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForExtensionRanges(sci *sourceCodeInfo, n *extensionRangeNode, extRangeIndex *int32, path []int32) {
+ sci.newLoc(n, path)
+ for _, child := range n.ranges {
+ path := append(path, *extRangeIndex)
+ *extRangeIndex++
+ sci.newLoc(child, path)
+ sci.newLoc(child.stNode, append(path, internal.ExtensionRange_startTag))
+ if child.stNode != child.enNode {
+ sci.newLoc(child.enNode, append(path, internal.ExtensionRange_endTag))
+ }
+ if n.options != nil {
+ optsPath := append(path, internal.ExtensionRange_optionsTag)
+ sci.newLoc(n.options, optsPath)
+ var optIndex int32
+ for _, opt := range n.options.decls {
+ r.generateSourceCodeInfoForOption(sci, opt, true, &optIndex, optsPath)
+ }
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForService(sci *sourceCodeInfo, n *serviceNode, path []int32) {
+ sci.newLoc(n, path)
+ sci.newLoc(n.name, append(path, internal.Service_nameTag))
+ var optIndex, rpcIndex int32
+ for _, child := range n.decls {
+ switch {
+ case child.option != nil:
+ r.generateSourceCodeInfoForOption(sci, child.option, false, &optIndex, append(path, internal.Service_optionsTag))
+ case child.rpc != nil:
+ r.generateSourceCodeInfoForMethod(sci, child.rpc, append(path, internal.Service_methodsTag, rpcIndex))
+ rpcIndex++
+ }
+ }
+}
+
+func (r *parseResult) generateSourceCodeInfoForMethod(sci *sourceCodeInfo, n *methodNode, path []int32) {
+ sci.newLoc(n, path)
+ sci.newLoc(n.name, append(path, internal.Method_nameTag))
+ if n.input.streamKeyword != nil {
+ sci.newLoc(n.input.streamKeyword, append(path, internal.Method_inputStreamTag))
+ }
+ sci.newLoc(n.input.msgType, append(path, internal.Method_inputTag))
+ if n.output.streamKeyword != nil {
+ sci.newLoc(n.output.streamKeyword, append(path, internal.Method_outputStreamTag))
+ }
+ sci.newLoc(n.output.msgType, append(path, internal.Method_outputTag))
+
+ optsPath := append(path, internal.Method_optionsTag)
+ var optIndex int32
+ for _, opt := range n.options {
+ r.generateSourceCodeInfoForOption(sci, opt, false, &optIndex, optsPath)
+ }
+}
+
+type sourceCodeInfo struct {
+ locs []*dpb.SourceCodeInfo_Location
+ commentsUsed map[*comment]struct{}
+}
+
+func (sci *sourceCodeInfo) newLocWithoutComments(n node, path []int32) {
+ dup := make([]int32, len(path))
+ copy(dup, path)
+ sci.locs = append(sci.locs, &dpb.SourceCodeInfo_Location{
+ Path: dup,
+ Span: makeSpan(n.start(), n.end()),
+ })
+}
+
+func (sci *sourceCodeInfo) newLoc(n node, path []int32) {
+ leadingComments := n.leadingComments()
+ trailingComments := n.trailingComments()
+ if sci.commentUsed(leadingComments) {
+ leadingComments = nil
+ }
+ if sci.commentUsed(trailingComments) {
+ trailingComments = nil
+ }
+ detached := groupComments(leadingComments)
+ var trail *string
+ if str, ok := combineComments(trailingComments); ok {
+ trail = proto.String(str)
+ }
+ var lead *string
+ if len(leadingComments) > 0 && leadingComments[len(leadingComments)-1].end.Line >= n.start().Line-1 {
+ lead = proto.String(detached[len(detached)-1])
+ detached = detached[:len(detached)-1]
+ }
+ dup := make([]int32, len(path))
+ copy(dup, path)
+ sci.locs = append(sci.locs, &dpb.SourceCodeInfo_Location{
+ LeadingDetachedComments: detached,
+ LeadingComments: lead,
+ TrailingComments: trail,
+ Path: dup,
+ Span: makeSpan(n.start(), n.end()),
+ })
+}
+
+func makeSpan(start, end *SourcePos) []int32 {
+ if start.Line == end.Line {
+ return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Col) - 1}
+ }
+ return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Line) - 1, int32(end.Col) - 1}
+}
+
+func (sci *sourceCodeInfo) commentUsed(c []comment) bool {
+ if len(c) == 0 {
+ return false
+ }
+ if _, ok := sci.commentsUsed[&c[0]]; ok {
+ return true
+ }
+
+ sci.commentsUsed[&c[0]] = struct{}{}
+ return false
+}
+
+func groupComments(comments []comment) []string {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ var groups []string
+ singleLineStyle := comments[0].text[:2] == "//"
+ line := comments[0].end.Line
+ start := 0
+ for i := 1; i < len(comments); i++ {
+ c := comments[i]
+ prevSingleLine := singleLineStyle
+ singleLineStyle = strings.HasPrefix(comments[i].text, "//")
+ if !singleLineStyle || prevSingleLine != singleLineStyle || c.start.Line > line+1 {
+ // new group!
+ if str, ok := combineComments(comments[start:i]); ok {
+ groups = append(groups, str)
+ }
+ start = i
+ }
+ line = c.end.Line
+ }
+ // don't forget last group
+ if str, ok := combineComments(comments[start:]); ok {
+ groups = append(groups, str)
+ }
+ return groups
+}
+
+func combineComments(comments []comment) (string, bool) {
+ if len(comments) == 0 {
+ return "", false
+ }
+ var buf bytes.Buffer
+ for _, c := range comments {
+ if c.text[:2] == "//" {
+ buf.WriteString(c.text[2:])
+ } else {
+ lines := strings.Split(c.text[2:len(c.text)-2], "\n")
+ first := true
+ for _, l := range lines {
+ if first {
+ first = false
+ } else {
+ buf.WriteByte('\n')
+ }
+
+ // strip a prefix of whitespace followed by '*'
+ j := 0
+ for j < len(l) {
+ if l[j] != ' ' && l[j] != '\t' {
+ break
+ }
+ j++
+ }
+ if j == len(l) {
+ l = ""
+ } else if l[j] == '*' {
+ l = l[j+1:]
+ } else if j > 0 {
+ l = " " + l[j:]
+ }
+
+ buf.WriteString(l)
+ }
+ }
+ }
+ return buf.String(), true
+}
+
+func dup(p []int32) []int32 {
+ return append(([]int32)(nil), p...)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go
new file mode 100644
index 0000000..8fbc1fc
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go
@@ -0,0 +1,50 @@
+package protoparse
+
+import (
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ // link in packages that include the standard protos included with protoc
+ _ "github.com/golang/protobuf/protoc-gen-go/plugin"
+ _ "github.com/golang/protobuf/ptypes/any"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ _ "github.com/golang/protobuf/ptypes/empty"
+ _ "github.com/golang/protobuf/ptypes/struct"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
+ _ "github.com/golang/protobuf/ptypes/wrappers"
+ _ "google.golang.org/genproto/protobuf/api"
+ _ "google.golang.org/genproto/protobuf/field_mask"
+ _ "google.golang.org/genproto/protobuf/ptype"
+ _ "google.golang.org/genproto/protobuf/source_context"
+
+ "github.com/jhump/protoreflect/internal"
+)
+
+// All files that are included with protoc are also included with this package
+// so that clients do not need to explicitly supply a copy of these protos (just
+// like callers of protoc do not need to supply them).
+var standardImports map[string]*dpb.FileDescriptorProto
+
+func init() {
+ standardFilenames := []string{
+ "google/protobuf/any.proto",
+ "google/protobuf/api.proto",
+ "google/protobuf/compiler/plugin.proto",
+ "google/protobuf/descriptor.proto",
+ "google/protobuf/duration.proto",
+ "google/protobuf/empty.proto",
+ "google/protobuf/field_mask.proto",
+ "google/protobuf/source_context.proto",
+ "google/protobuf/struct.proto",
+ "google/protobuf/timestamp.proto",
+ "google/protobuf/type.proto",
+ "google/protobuf/wrappers.proto",
+ }
+
+ standardImports = map[string]*dpb.FileDescriptorProto{}
+ for _, fn := range standardFilenames {
+ fd, err := internal.LoadFileDescriptor(fn)
+ if err != nil {
+ panic(err.Error())
+ }
+ standardImports[fn] = fd
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
new file mode 100644
index 0000000..324fb19
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
@@ -0,0 +1,6149 @@
+---- desc_test_comments.proto ----
+
+
+:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:141:2
+
+
+ > syntax:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:8:19
+ Leading detached comment [0]:
+ This is the first detached comment for the syntax.
+
+ Leading detached comment [1]:
+
+ This is a second detached comment.
+
+ Leading detached comment [2]:
+ This is a third.
+
+ Leading comments:
+ Syntax comment...
+
+ Trailing comments:
+ Syntax trailer.
+
+
+
+ > package:
+desc_test_comments.proto:12:1
+desc_test_comments.proto:12:17
+ Leading comments:
+ And now the package declaration
+
+
+
+ > options:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+
+
+ > options > go_package:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+ Leading comments:
+ option comments FTW!!!
+
+
+
+ > dependency[0]:
+desc_test_comments.proto:17:1
+desc_test_comments.proto:17:38
+
+
+ > dependency[1]:
+desc_test_comments.proto:18:1
+desc_test_comments.proto:18:34
+
+
+ > message_type[0]:
+desc_test_comments.proto:25:1
+desc_test_comments.proto:105:2
+ Leading detached comment [0]:
+ Multiple white space lines (like above) cannot
+ be preserved...
+
+ Leading comments:
+ We need a request for our RPC service below.
+
+ Trailing comments:
+ And next we'll need some extensions...
+
+
+
+ > message_type[0] > name:
+desc_test_comments.proto:25:68
+desc_test_comments.proto:25:75
+ Leading detached comment [0]:
+ detached message name
+ Leading comments:
+ request with a capital R
+ Trailing comments:
+ trailer
+
+
+
+ > message_type[0] > options:
+desc_test_comments.proto:26:9
+desc_test_comments.proto:26:34
+
+
+ > message_type[0] > options > deprecated:
+desc_test_comments.proto:26:9
+desc_test_comments.proto:26:34
+ Trailing comments:
+ deprecated!
+
+
+
+ > message_type[0] > field[0]:
+desc_test_comments.proto:29:9
+desc_test_comments.proto:32:132
+ Leading comments:
+ A field comment
+
+ Trailing comments:
+ field trailer #1...
+
+
+
+ > message_type[0] > field[0] > label:
+desc_test_comments.proto:29:9
+desc_test_comments.proto:29:17
+
+
+ > message_type[0] > field[0] > type:
+desc_test_comments.proto:29:18
+desc_test_comments.proto:29:23
+
+
+ > message_type[0] > field[0] > name:
+desc_test_comments.proto:29:24
+desc_test_comments.proto:29:27
+
+
+ > message_type[0] > field[0] > number:
+desc_test_comments.proto:29:70
+desc_test_comments.proto:29:71
+ Leading detached comment [0]:
+ detached tag
+ Leading comments:
+ tag numero uno
+ Trailing comments:
+ tag trailer
+ that spans multiple lines...
+ more than two.
+
+
+ > message_type[0] > field[0] > options:
+desc_test_comments.proto:32:11
+desc_test_comments.proto:32:131
+
+
+ > message_type[0] > field[0] > options > packed:
+desc_test_comments.proto:32:12
+desc_test_comments.proto:32:23
+ Trailing comments:
+ packed!
+
+
+ > message_type[0] > field[0] > json_name:
+desc_test_comments.proto:32:39
+desc_test_comments.proto:32:56
+ Trailing comments:
+ custom JSON!
+
+
+ > message_type[0] > field[0] > options > ffubar[0]:
+desc_test_comments.proto:32:77
+desc_test_comments.proto:32:102
+
+
+ > message_type[0] > field[0] > options > ffubarb:
+desc_test_comments.proto:32:104
+desc_test_comments.proto:32:130
+
+
+ > message_type[0] > options:
+desc_test_comments.proto:35:27
+desc_test_comments.proto:35:61
+
+
+ > message_type[0] > options > mfubar:
+desc_test_comments.proto:35:27
+desc_test_comments.proto:35:61
+ Leading comments:
+ lead mfubar
+ Trailing comments:
+ trailing mfubar
+
+
+
+ > message_type[0] > field[1]:
+desc_test_comments.proto:42:29
+desc_test_comments.proto:43:77
+ Leading detached comment [0]:
+ some detached comments
+
+ Leading detached comment [1]:
+ some detached comments
+
+ Leading detached comment [2]:
+ Another field comment
+
+ Leading comments:
+ label comment
+
+
+ > message_type[0] > field[1] > label:
+desc_test_comments.proto:42:29
+desc_test_comments.proto:42:37
+
+
+ > message_type[0] > field[1] > type:
+desc_test_comments.proto:42:57
+desc_test_comments.proto:42:63
+ Leading comments:
+ type comment
+
+
+ > message_type[0] > field[1] > name:
+desc_test_comments.proto:42:83
+desc_test_comments.proto:42:87
+ Leading comments:
+ name comment
+
+
+ > message_type[0] > field[1] > number:
+desc_test_comments.proto:42:90
+desc_test_comments.proto:42:91
+
+
+ > message_type[0] > field[1] > options:
+desc_test_comments.proto:43:17
+desc_test_comments.proto:43:76
+
+
+ > message_type[0] > field[1] > default_value:
+desc_test_comments.proto:43:37
+desc_test_comments.proto:43:54
+ Leading comments:
+ default lead
+ Trailing comments:
+ default trail
+
+
+ > message_type[0] > extension_range:
+desc_test_comments.proto:46:9
+desc_test_comments.proto:46:31
+ Leading comments:
+ extension range comments are (sadly) not preserved
+
+
+
+ > message_type[0] > extension_range[0]:
+desc_test_comments.proto:46:20
+desc_test_comments.proto:46:30
+
+
+ > message_type[0] > extension_range[0] > start:
+desc_test_comments.proto:46:20
+desc_test_comments.proto:46:23
+
+
+ > message_type[0] > extension_range[0] > end:
+desc_test_comments.proto:46:27
+desc_test_comments.proto:46:30
+
+
+ > message_type[0] > extension_range:
+desc_test_comments.proto:47:9
+desc_test_comments.proto:47:109
+
+
+ > message_type[0] > extension_range[1]:
+desc_test_comments.proto:47:20
+desc_test_comments.proto:47:30
+
+
+ > message_type[0] > extension_range[1] > start:
+desc_test_comments.proto:47:20
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[1] > end:
+desc_test_comments.proto:47:27
+desc_test_comments.proto:47:30
+
+
+ > message_type[0] > extension_range[1] > options:
+desc_test_comments.proto:47:31
+desc_test_comments.proto:47:108
+
+
+ > message_type[0] > extension_range[1] > options > exfubarb:
+desc_test_comments.proto:47:32
+desc_test_comments.proto:47:74
+
+
+ > message_type[0] > extension_range[1] > options > exfubar[0]:
+desc_test_comments.proto:47:76
+desc_test_comments.proto:47:107
+
+
+ > message_type[0] > reserved_range:
+desc_test_comments.proto:51:48
+desc_test_comments.proto:51:77
+ Leading detached comment [0]:
+ another detached comment
+
+ Leading comments:
+ same for reserved range comments
+
+
+ > message_type[0] > reserved_range[0]:
+desc_test_comments.proto:51:57
+desc_test_comments.proto:51:65
+
+
+ > message_type[0] > reserved_range[0] > start:
+desc_test_comments.proto:51:57
+desc_test_comments.proto:51:59
+
+
+ > message_type[0] > reserved_range[0] > end:
+desc_test_comments.proto:51:63
+desc_test_comments.proto:51:65
+
+
+ > message_type[0] > reserved_range[1]:
+desc_test_comments.proto:51:67
+desc_test_comments.proto:51:75
+
+
+ > message_type[0] > reserved_range[1] > start:
+desc_test_comments.proto:51:67
+desc_test_comments.proto:51:69
+
+
+ > message_type[0] > reserved_range[1] > end:
+desc_test_comments.proto:51:73
+desc_test_comments.proto:51:75
+
+
+ > message_type[0] > reserved_name:
+desc_test_comments.proto:52:9
+desc_test_comments.proto:52:38
+ Trailing comments:
+ reserved trailers
+
+
+ > message_type[0] > reserved_name[0]:
+desc_test_comments.proto:52:18
+desc_test_comments.proto:52:23
+
+
+ > message_type[0] > reserved_name[1]:
+desc_test_comments.proto:52:25
+desc_test_comments.proto:52:30
+
+
+ > message_type[0] > reserved_name[2]:
+desc_test_comments.proto:52:32
+desc_test_comments.proto:52:37
+
+
+ > message_type[0] > field[2]:
+desc_test_comments.proto:55:9
+desc_test_comments.proto:67:10
+
+
+ > message_type[0] > field[2] > label:
+desc_test_comments.proto:55:9
+desc_test_comments.proto:55:17
+
+
+ > message_type[0] > field[2] > type:
+desc_test_comments.proto:55:18
+desc_test_comments.proto:55:23
+
+
+ > message_type[0] > field[2] > name:
+desc_test_comments.proto:55:41
+desc_test_comments.proto:55:47
+
+
+ > message_type[0] > field[2] > number:
+desc_test_comments.proto:55:50
+desc_test_comments.proto:55:51
+
+
+ > message_type[0] > nested_type[0]:
+desc_test_comments.proto:55:9
+desc_test_comments.proto:67:10
+ Leading comments:
+ Group comment
+
+
+
+ > message_type[0] > nested_type[0] > name:
+desc_test_comments.proto:55:41
+desc_test_comments.proto:55:47
+ Leading comments:
+ group name
+
+
+ > message_type[0] > field[2] > type_name:
+desc_test_comments.proto:55:41
+desc_test_comments.proto:55:47
+
+
+ > message_type[0] > nested_type[0] > options:
+desc_test_comments.proto:57:17
+desc_test_comments.proto:57:52
+
+
+ > message_type[0] > nested_type[0] > options > mfubar:
+desc_test_comments.proto:57:17
+desc_test_comments.proto:57:52
+ Leading comments:
+ this is a custom option
+
+
+
+ > message_type[0] > nested_type[0] > field[0]:
+desc_test_comments.proto:59:17
+desc_test_comments.proto:59:41
+
+
+ > message_type[0] > nested_type[0] > field[0] > label:
+desc_test_comments.proto:59:17
+desc_test_comments.proto:59:25
+
+
+ > message_type[0] > nested_type[0] > field[0] > type:
+desc_test_comments.proto:59:26
+desc_test_comments.proto:59:32
+
+
+ > message_type[0] > nested_type[0] > field[0] > name:
+desc_test_comments.proto:59:33
+desc_test_comments.proto:59:36
+
+
+ > message_type[0] > nested_type[0] > field[0] > number:
+desc_test_comments.proto:59:39
+desc_test_comments.proto:59:40
+
+
+ > message_type[0] > nested_type[0] > field[1]:
+desc_test_comments.proto:60:17
+desc_test_comments.proto:60:40
+
+
+ > message_type[0] > nested_type[0] > field[1] > label:
+desc_test_comments.proto:60:17
+desc_test_comments.proto:60:25
+
+
+ > message_type[0] > nested_type[0] > field[1] > type:
+desc_test_comments.proto:60:26
+desc_test_comments.proto:60:31
+
+
+ > message_type[0] > nested_type[0] > field[1] > name:
+desc_test_comments.proto:60:32
+desc_test_comments.proto:60:35
+
+
+ > message_type[0] > nested_type[0] > field[1] > number:
+desc_test_comments.proto:60:38
+desc_test_comments.proto:60:39
+
+
+ > message_type[0] > nested_type[0] > options:
+desc_test_comments.proto:62:17
+desc_test_comments.proto:62:64
+
+
+ > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor:
+desc_test_comments.proto:62:17
+desc_test_comments.proto:62:64
+
+
+ > message_type[0] > nested_type[0] > field[2]:
+desc_test_comments.proto:65:17
+desc_test_comments.proto:65:41
+ Leading comments:
+ Leading comment...
+
+ Trailing comments:
+ Trailing comment...
+
+
+
+ > message_type[0] > nested_type[0] > field[2] > label:
+desc_test_comments.proto:65:17
+desc_test_comments.proto:65:25
+
+
+ > message_type[0] > nested_type[0] > field[2] > type:
+desc_test_comments.proto:65:26
+desc_test_comments.proto:65:32
+
+
+ > message_type[0] > nested_type[0] > field[2] > name:
+desc_test_comments.proto:65:33
+desc_test_comments.proto:65:36
+
+
+ > message_type[0] > nested_type[0] > field[2] > number:
+desc_test_comments.proto:65:39
+desc_test_comments.proto:65:40
+
+
+ > message_type[0] > enum_type[0]:
+desc_test_comments.proto:69:9
+desc_test_comments.proto:90:10
+
+
+ > message_type[0] > enum_type[0] > name:
+desc_test_comments.proto:69:14
+desc_test_comments.proto:69:29
+ Trailing comments:
+ "super"!
+
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:72:17
+desc_test_comments.proto:72:43
+
+
+ > message_type[0] > enum_type[0] > options > allow_alias:
+desc_test_comments.proto:72:17
+desc_test_comments.proto:72:43
+ Leading comments:
+ allow_alias comments!
+
+
+
+ > message_type[0] > enum_type[0] > value[0]:
+desc_test_comments.proto:74:17
+desc_test_comments.proto:74:86
+
+
+ > message_type[0] > enum_type[0] > value[0] > name:
+desc_test_comments.proto:74:17
+desc_test_comments.proto:74:22
+
+
+ > message_type[0] > enum_type[0] > value[0] > number:
+desc_test_comments.proto:74:25
+desc_test_comments.proto:74:26
+
+
+ > message_type[0] > enum_type[0] > value[0] > options:
+desc_test_comments.proto:74:27
+desc_test_comments.proto:74:85
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > evfubars:
+desc_test_comments.proto:74:28
+desc_test_comments.proto:74:56
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > evfubar:
+desc_test_comments.proto:74:58
+desc_test_comments.proto:74:84
+
+
+ > message_type[0] > enum_type[0] > value[1]:
+desc_test_comments.proto:75:17
+desc_test_comments.proto:75:100
+
+
+ > message_type[0] > enum_type[0] > value[1] > name:
+desc_test_comments.proto:75:17
+desc_test_comments.proto:75:22
+
+
+ > message_type[0] > enum_type[0] > value[1] > number:
+desc_test_comments.proto:75:25
+desc_test_comments.proto:75:26
+
+
+ > message_type[0] > enum_type[0] > value[1] > options:
+desc_test_comments.proto:75:27
+desc_test_comments.proto:75:99
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > evfubaruf:
+desc_test_comments.proto:75:29
+desc_test_comments.proto:75:57
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > evfubaru:
+desc_test_comments.proto:75:73
+desc_test_comments.proto:75:98
+
+
+ > message_type[0] > enum_type[0] > value[2]:
+desc_test_comments.proto:76:17
+desc_test_comments.proto:76:27
+
+
+ > message_type[0] > enum_type[0] > value[2] > name:
+desc_test_comments.proto:76:17
+desc_test_comments.proto:76:22
+
+
+ > message_type[0] > enum_type[0] > value[2] > number:
+desc_test_comments.proto:76:25
+desc_test_comments.proto:76:26
+
+
+ > message_type[0] > enum_type[0] > value[3]:
+desc_test_comments.proto:77:17
+desc_test_comments.proto:77:28
+
+
+ > message_type[0] > enum_type[0] > value[3] > name:
+desc_test_comments.proto:77:17
+desc_test_comments.proto:77:23
+
+
+ > message_type[0] > enum_type[0] > value[3] > number:
+desc_test_comments.proto:77:26
+desc_test_comments.proto:77:27
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:79:17
+desc_test_comments.proto:79:52
+
+
+ > message_type[0] > enum_type[0] > options > efubars:
+desc_test_comments.proto:79:17
+desc_test_comments.proto:79:52
+
+
+ > message_type[0] > enum_type[0] > value[4]:
+desc_test_comments.proto:81:17
+desc_test_comments.proto:81:27
+
+
+ > message_type[0] > enum_type[0] > value[4] > name:
+desc_test_comments.proto:81:17
+desc_test_comments.proto:81:22
+
+
+ > message_type[0] > enum_type[0] > value[4] > number:
+desc_test_comments.proto:81:25
+desc_test_comments.proto:81:26
+
+
+ > message_type[0] > enum_type[0] > value[5]:
+desc_test_comments.proto:82:17
+desc_test_comments.proto:82:29
+
+
+ > message_type[0] > enum_type[0] > value[5] > name:
+desc_test_comments.proto:82:17
+desc_test_comments.proto:82:24
+
+
+ > message_type[0] > enum_type[0] > value[5] > number:
+desc_test_comments.proto:82:27
+desc_test_comments.proto:82:28
+
+
+ > message_type[0] > enum_type[0] > value[6]:
+desc_test_comments.proto:83:17
+desc_test_comments.proto:83:60
+
+
+ > message_type[0] > enum_type[0] > value[6] > name:
+desc_test_comments.proto:83:17
+desc_test_comments.proto:83:24
+
+
+ > message_type[0] > enum_type[0] > value[6] > number:
+desc_test_comments.proto:83:27
+desc_test_comments.proto:83:28
+
+
+ > message_type[0] > enum_type[0] > value[6] > options:
+desc_test_comments.proto:83:29
+desc_test_comments.proto:83:59
+
+
+ > message_type[0] > enum_type[0] > value[6] > options > evfubarsf:
+desc_test_comments.proto:83:30
+desc_test_comments.proto:83:58
+
+
+ > message_type[0] > enum_type[0] > value[7]:
+desc_test_comments.proto:84:17
+desc_test_comments.proto:84:28
+
+
+ > message_type[0] > enum_type[0] > value[7] > name:
+desc_test_comments.proto:84:17
+desc_test_comments.proto:84:23
+
+
+ > message_type[0] > enum_type[0] > value[7] > number:
+desc_test_comments.proto:84:26
+desc_test_comments.proto:84:27
+
+
+ > message_type[0] > enum_type[0] > value[8]:
+desc_test_comments.proto:85:17
+desc_test_comments.proto:85:31
+
+
+ > message_type[0] > enum_type[0] > value[8] > name:
+desc_test_comments.proto:85:17
+desc_test_comments.proto:85:26
+
+
+ > message_type[0] > enum_type[0] > value[8] > number:
+desc_test_comments.proto:85:29
+desc_test_comments.proto:85:30
+
+
+ > message_type[0] > enum_type[0] > value[9]:
+desc_test_comments.proto:86:17
+desc_test_comments.proto:86:27
+
+
+ > message_type[0] > enum_type[0] > value[9] > name:
+desc_test_comments.proto:86:17
+desc_test_comments.proto:86:22
+
+
+ > message_type[0] > enum_type[0] > value[9] > number:
+desc_test_comments.proto:86:25
+desc_test_comments.proto:86:26
+
+
+ > message_type[0] > enum_type[0] > value[10]:
+desc_test_comments.proto:87:17
+desc_test_comments.proto:87:31
+
+
+ > message_type[0] > enum_type[0] > value[10] > name:
+desc_test_comments.proto:87:17
+desc_test_comments.proto:87:23
+
+
+ > message_type[0] > enum_type[0] > value[10] > number:
+desc_test_comments.proto:87:26
+desc_test_comments.proto:87:30
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:89:17
+desc_test_comments.proto:89:50
+
+
+ > message_type[0] > enum_type[0] > options > efubar:
+desc_test_comments.proto:89:17
+desc_test_comments.proto:89:50
+
+
+ > message_type[0] > oneof_decl[0]:
+desc_test_comments.proto:93:9
+desc_test_comments.proto:96:10
+ Leading comments:
+ can be this or that
+
+
+
+ > message_type[0] > oneof_decl[0] > name:
+desc_test_comments.proto:93:15
+desc_test_comments.proto:93:18
+
+
+ > message_type[0] > field[3]:
+desc_test_comments.proto:94:17
+desc_test_comments.proto:94:33
+
+
+ > message_type[0] > field[3] > type:
+desc_test_comments.proto:94:17
+desc_test_comments.proto:94:23
+
+
+ > message_type[0] > field[3] > name:
+desc_test_comments.proto:94:24
+desc_test_comments.proto:94:28
+
+
+ > message_type[0] > field[3] > number:
+desc_test_comments.proto:94:31
+desc_test_comments.proto:94:32
+
+
+ > message_type[0] > field[4]:
+desc_test_comments.proto:95:17
+desc_test_comments.proto:95:32
+
+
+ > message_type[0] > field[4] > type:
+desc_test_comments.proto:95:17
+desc_test_comments.proto:95:22
+
+
+ > message_type[0] > field[4] > name:
+desc_test_comments.proto:95:23
+desc_test_comments.proto:95:27
+
+
+ > message_type[0] > field[4] > number:
+desc_test_comments.proto:95:30
+desc_test_comments.proto:95:31
+
+
+ > message_type[0] > oneof_decl[1]:
+desc_test_comments.proto:98:9
+desc_test_comments.proto:101:10
+ Leading comments:
+ can be these or those
+
+
+
+ > message_type[0] > oneof_decl[1] > name:
+desc_test_comments.proto:98:15
+desc_test_comments.proto:98:18
+
+
+ > message_type[0] > field[5]:
+desc_test_comments.proto:99:17
+desc_test_comments.proto:99:34
+
+
+ > message_type[0] > field[5] > type:
+desc_test_comments.proto:99:17
+desc_test_comments.proto:99:23
+
+
+ > message_type[0] > field[5] > name:
+desc_test_comments.proto:99:24
+desc_test_comments.proto:99:29
+
+
+ > message_type[0] > field[5] > number:
+desc_test_comments.proto:99:32
+desc_test_comments.proto:99:33
+
+
+ > message_type[0] > field[6]:
+desc_test_comments.proto:100:17
+desc_test_comments.proto:100:33
+
+
+ > message_type[0] > field[6] > type:
+desc_test_comments.proto:100:17
+desc_test_comments.proto:100:22
+
+
+ > message_type[0] > field[6] > name:
+desc_test_comments.proto:100:23
+desc_test_comments.proto:100:28
+
+
+ > message_type[0] > field[6] > number:
+desc_test_comments.proto:100:31
+desc_test_comments.proto:100:32
+
+
+ > message_type[0] > field[7]:
+desc_test_comments.proto:104:9
+desc_test_comments.proto:104:40
+ Leading comments:
+ map field
+
+
+
+ > message_type[0] > field[7] > type_name:
+desc_test_comments.proto:104:9
+desc_test_comments.proto:104:28
+
+
+ > message_type[0] > field[7] > name:
+desc_test_comments.proto:104:29
+desc_test_comments.proto:104:35
+
+
+ > message_type[0] > field[7] > number:
+desc_test_comments.proto:104:38
+desc_test_comments.proto:104:39
+
+
+ > extension:
+desc_test_comments.proto:108:1
+desc_test_comments.proto:117:2
+ Trailing comments:
+ extend trailer...
+
+
+
+ > extension[0]:
+desc_test_comments.proto:114:9
+desc_test_comments.proto:114:37
+ Leading comments:
+ comment for guid1
+
+
+
+ > extension[0] > extendee:
+desc_test_comments.proto:110:1
+desc_test_comments.proto:110:8
+ Leading comments:
+ extendee comment
+
+ Trailing comments:
+ extendee trailer
+
+
+
+ > extension[0] > label:
+desc_test_comments.proto:114:9
+desc_test_comments.proto:114:17
+
+
+ > extension[0] > type:
+desc_test_comments.proto:114:18
+desc_test_comments.proto:114:24
+
+
+ > extension[0] > name:
+desc_test_comments.proto:114:25
+desc_test_comments.proto:114:30
+
+
+ > extension[0] > number:
+desc_test_comments.proto:114:33
+desc_test_comments.proto:114:36
+
+
+ > extension[1]:
+desc_test_comments.proto:116:9
+desc_test_comments.proto:116:37
+ Leading comments:
+ ... and a comment for guid2
+
+
+
+ > extension[1] > extendee:
+desc_test_comments.proto:110:1
+desc_test_comments.proto:110:8
+
+
+ > extension[1] > label:
+desc_test_comments.proto:116:9
+desc_test_comments.proto:116:17
+
+
+ > extension[1] > type:
+desc_test_comments.proto:116:18
+desc_test_comments.proto:116:24
+
+
+ > extension[1] > name:
+desc_test_comments.proto:116:25
+desc_test_comments.proto:116:30
+
+
+ > extension[1] > number:
+desc_test_comments.proto:116:33
+desc_test_comments.proto:116:36
+
+
+ > message_type[1]:
+desc_test_comments.proto:120:1
+desc_test_comments.proto:120:81
+
+
+ > message_type[1] > name:
+desc_test_comments.proto:120:36
+desc_test_comments.proto:120:50
+ Leading comments:
+ name leading comment
+ Trailing comments:
+ name trailing comment
+
+
+ > service[0]:
+desc_test_comments.proto:123:1
+desc_test_comments.proto:141:2
+ Leading comments:
+ Service comment
+
+ Trailing comments:
+ service trailer
+
+
+
+ > service[0] > name:
+desc_test_comments.proto:123:28
+desc_test_comments.proto:123:38
+ Leading comments:
+ service name
+
+
+ > service[0] > options:
+desc_test_comments.proto:125:9
+desc_test_comments.proto:125:43
+
+
+ > service[0] > options > sfubar > id:
+desc_test_comments.proto:125:9
+desc_test_comments.proto:125:43
+ Leading comments:
+ option that sets field
+
+
+
+ > service[0] > options:
+desc_test_comments.proto:127:9
+desc_test_comments.proto:127:47
+
+
+ > service[0] > options > sfubar > name:
+desc_test_comments.proto:127:9
+desc_test_comments.proto:127:47
+ Leading comments:
+ another option that sets field
+
+
+
+ > service[0] > options:
+desc_test_comments.proto:128:9
+desc_test_comments.proto:128:35
+
+
+ > service[0] > options > deprecated:
+desc_test_comments.proto:128:9
+desc_test_comments.proto:128:35
+ Trailing comments:
+ DEPRECATED!
+
+
+
+ > service[0] > options:
+desc_test_comments.proto:130:9
+desc_test_comments.proto:130:45
+
+
+ > service[0] > options > sfubare:
+desc_test_comments.proto:130:9
+desc_test_comments.proto:130:45
+
+
+ > service[0] > method[0]:
+desc_test_comments.proto:133:9
+desc_test_comments.proto:134:84
+ Leading comments:
+ Method comment
+
+
+
+ > service[0] > method[0] > name:
+desc_test_comments.proto:133:28
+desc_test_comments.proto:133:40
+ Leading comments:
+ rpc name
+ Trailing comments:
+ comment A
+
+
+ > service[0] > method[0] > client_streaming:
+desc_test_comments.proto:133:73
+desc_test_comments.proto:133:79
+ Leading comments:
+ comment B
+
+
+ > service[0] > method[0] > input_type:
+desc_test_comments.proto:133:96
+desc_test_comments.proto:133:103
+ Leading comments:
+ comment C
+
+
+ > service[0] > method[0] > output_type:
+desc_test_comments.proto:134:57
+desc_test_comments.proto:134:64
+ Leading comments:
+comment E
+
+
+ > service[0] > method[1]:
+desc_test_comments.proto:136:9
+desc_test_comments.proto:140:10
+
+
+ > service[0] > method[1] > name:
+desc_test_comments.proto:136:13
+desc_test_comments.proto:136:21
+
+
+ > service[0] > method[1] > input_type:
+desc_test_comments.proto:136:23
+desc_test_comments.proto:136:30
+
+
+ > service[0] > method[1] > output_type:
+desc_test_comments.proto:136:41
+desc_test_comments.proto:136:62
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:137:17
+desc_test_comments.proto:137:42
+
+
+ > service[0] > method[1] > options > deprecated:
+desc_test_comments.proto:137:17
+desc_test_comments.proto:137:42
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:138:17
+desc_test_comments.proto:138:53
+
+
+ > service[0] > method[1] > options > mtfubar[0]:
+desc_test_comments.proto:138:17
+desc_test_comments.proto:138:53
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:139:17
+desc_test_comments.proto:139:56
+
+
+ > service[0] > method[1] > options > mtfubard:
+desc_test_comments.proto:139:17
+desc_test_comments.proto:139:56
+---- desc_test_complex.proto ----
+
+
+:
+desc_test_complex.proto:1:1
+desc_test_complex.proto:286:2
+
+
+ > syntax:
+desc_test_complex.proto:1:1
+desc_test_complex.proto:1:19
+
+
+ > package:
+desc_test_complex.proto:3:1
+desc_test_complex.proto:3:17
+
+
+ > options:
+desc_test_complex.proto:5:1
+desc_test_complex.proto:5:73
+
+
+ > options > go_package:
+desc_test_complex.proto:5:1
+desc_test_complex.proto:5:73
+
+
+ > dependency[0]:
+desc_test_complex.proto:7:1
+desc_test_complex.proto:7:43
+
+
+ > message_type[0]:
+desc_test_complex.proto:9:1
+desc_test_complex.proto:12:2
+
+
+ > message_type[0] > name:
+desc_test_complex.proto:9:9
+desc_test_complex.proto:9:15
+
+
+ > message_type[0] > field[0]:
+desc_test_complex.proto:10:9
+desc_test_complex.proto:10:34
+
+
+ > message_type[0] > field[0] > label:
+desc_test_complex.proto:10:9
+desc_test_complex.proto:10:17
+
+
+ > message_type[0] > field[0] > type:
+desc_test_complex.proto:10:18
+desc_test_complex.proto:10:24
+
+
+ > message_type[0] > field[0] > name:
+desc_test_complex.proto:10:25
+desc_test_complex.proto:10:29
+
+
+ > message_type[0] > field[0] > number:
+desc_test_complex.proto:10:32
+desc_test_complex.proto:10:33
+
+
+ > message_type[0] > field[1]:
+desc_test_complex.proto:11:9
+desc_test_complex.proto:11:32
+
+
+ > message_type[0] > field[1] > label:
+desc_test_complex.proto:11:9
+desc_test_complex.proto:11:17
+
+
+ > message_type[0] > field[1] > type:
+desc_test_complex.proto:11:18
+desc_test_complex.proto:11:24
+
+
+ > message_type[0] > field[1] > name:
+desc_test_complex.proto:11:25
+desc_test_complex.proto:11:27
+
+
+ > message_type[0] > field[1] > number:
+desc_test_complex.proto:11:30
+desc_test_complex.proto:11:31
+
+
+ > extension:
+desc_test_complex.proto:14:1
+desc_test_complex.proto:18:2
+
+
+ > extension[0]:
+desc_test_complex.proto:17:9
+desc_test_complex.proto:17:39
+
+
+ > extension[0] > extendee:
+desc_test_complex.proto:14:8
+desc_test_complex.proto:16:25
+
+
+ > extension[0] > label:
+desc_test_complex.proto:17:9
+desc_test_complex.proto:17:17
+
+
+ > extension[0] > type:
+desc_test_complex.proto:17:18
+desc_test_complex.proto:17:24
+
+
+ > extension[0] > name:
+desc_test_complex.proto:17:25
+desc_test_complex.proto:17:30
+
+
+ > extension[0] > number:
+desc_test_complex.proto:17:33
+desc_test_complex.proto:17:38
+
+
+ > message_type[1]:
+desc_test_complex.proto:20:1
+desc_test_complex.proto:59:2
+
+
+ > message_type[1] > name:
+desc_test_complex.proto:20:9
+desc_test_complex.proto:20:13
+
+
+ > message_type[1] > field[0]:
+desc_test_complex.proto:21:9
+desc_test_complex.proto:21:55
+
+
+ > message_type[1] > field[0] > label:
+desc_test_complex.proto:21:9
+desc_test_complex.proto:21:17
+
+
+ > message_type[1] > field[0] > type:
+desc_test_complex.proto:21:18
+desc_test_complex.proto:21:24
+
+
+ > message_type[1] > field[0] > name:
+desc_test_complex.proto:21:25
+desc_test_complex.proto:21:28
+
+
+ > message_type[1] > field[0] > number:
+desc_test_complex.proto:21:31
+desc_test_complex.proto:21:32
+
+
+ > message_type[1] > field[0] > options:
+desc_test_complex.proto:21:33
+desc_test_complex.proto:21:54
+
+
+ > message_type[1] > field[0] > json_name:
+desc_test_complex.proto:21:34
+desc_test_complex.proto:21:53
+
+
+ > message_type[1] > field[1]:
+desc_test_complex.proto:22:9
+desc_test_complex.proto:22:34
+
+
+ > message_type[1] > field[1] > label:
+desc_test_complex.proto:22:9
+desc_test_complex.proto:22:17
+
+
+ > message_type[1] > field[1] > type:
+desc_test_complex.proto:22:18
+desc_test_complex.proto:22:23
+
+
+ > message_type[1] > field[1] > name:
+desc_test_complex.proto:22:24
+desc_test_complex.proto:22:29
+
+
+ > message_type[1] > field[1] > number:
+desc_test_complex.proto:22:32
+desc_test_complex.proto:22:33
+
+
+ > message_type[1] > field[2]:
+desc_test_complex.proto:23:9
+desc_test_complex.proto:23:31
+
+
+ > message_type[1] > field[2] > label:
+desc_test_complex.proto:23:9
+desc_test_complex.proto:23:17
+
+
+ > message_type[1] > field[2] > type_name:
+desc_test_complex.proto:23:18
+desc_test_complex.proto:23:24
+
+
+ > message_type[1] > field[2] > name:
+desc_test_complex.proto:23:25
+desc_test_complex.proto:23:26
+
+
+ > message_type[1] > field[2] > number:
+desc_test_complex.proto:23:29
+desc_test_complex.proto:23:30
+
+
+ > message_type[1] > field[3]:
+desc_test_complex.proto:24:9
+desc_test_complex.proto:24:31
+
+
+ > message_type[1] > field[3] > label:
+desc_test_complex.proto:24:9
+desc_test_complex.proto:24:17
+
+
+ > message_type[1] > field[3] > type_name:
+desc_test_complex.proto:24:18
+desc_test_complex.proto:24:24
+
+
+ > message_type[1] > field[3] > name:
+desc_test_complex.proto:24:25
+desc_test_complex.proto:24:26
+
+
+ > message_type[1] > field[3] > number:
+desc_test_complex.proto:24:29
+desc_test_complex.proto:24:30
+
+
+ > message_type[1] > field[4]:
+desc_test_complex.proto:25:9
+desc_test_complex.proto:25:34
+
+
+ > message_type[1] > field[4] > type_name:
+desc_test_complex.proto:25:9
+desc_test_complex.proto:25:27
+
+
+ > message_type[1] > field[4] > name:
+desc_test_complex.proto:25:28
+desc_test_complex.proto:25:29
+
+
+ > message_type[1] > field[4] > number:
+desc_test_complex.proto:25:32
+desc_test_complex.proto:25:33
+
+
+ > message_type[1] > field[5]:
+desc_test_complex.proto:27:9
+desc_test_complex.proto:27:67
+
+
+ > message_type[1] > field[5] > label:
+desc_test_complex.proto:27:9
+desc_test_complex.proto:27:17
+
+
+ > message_type[1] > field[5] > type:
+desc_test_complex.proto:27:18
+desc_test_complex.proto:27:23
+
+
+ > message_type[1] > field[5] > name:
+desc_test_complex.proto:27:24
+desc_test_complex.proto:27:25
+
+
+ > message_type[1] > field[5] > number:
+desc_test_complex.proto:27:28
+desc_test_complex.proto:27:29
+
+
+ > message_type[1] > field[5] > options:
+desc_test_complex.proto:27:30
+desc_test_complex.proto:27:66
+
+
+ > message_type[1] > field[5] > default_value:
+desc_test_complex.proto:27:31
+desc_test_complex.proto:27:65
+
+
+ > message_type[1] > extension_range:
+desc_test_complex.proto:29:9
+desc_test_complex.proto:29:31
+
+
+ > message_type[1] > extension_range[0]:
+desc_test_complex.proto:29:20
+desc_test_complex.proto:29:30
+
+
+ > message_type[1] > extension_range[0] > start:
+desc_test_complex.proto:29:20
+desc_test_complex.proto:29:23
+
+
+ > message_type[1] > extension_range[0] > end:
+desc_test_complex.proto:29:27
+desc_test_complex.proto:29:30
+
+
+ > message_type[1] > extension_range:
+desc_test_complex.proto:31:9
+desc_test_complex.proto:31:62
+
+
+ > message_type[1] > extension_range[1]:
+desc_test_complex.proto:31:20
+desc_test_complex.proto:31:30
+
+
+ > message_type[1] > extension_range[1] > start:
+desc_test_complex.proto:31:20
+desc_test_complex.proto:31:23
+
+
+ > message_type[1] > extension_range[1] > end:
+desc_test_complex.proto:31:27
+desc_test_complex.proto:31:30
+
+
+ > message_type[1] > extension_range[1] > options:
+desc_test_complex.proto:31:43
+desc_test_complex.proto:31:61
+
+
+ > message_type[1] > extension_range[1] > options > label:
+desc_test_complex.proto:31:44
+desc_test_complex.proto:31:60
+
+
+ > message_type[1] > extension_range[2]:
+desc_test_complex.proto:31:32
+desc_test_complex.proto:31:42
+
+
+ > message_type[1] > extension_range[2] > start:
+desc_test_complex.proto:31:32
+desc_test_complex.proto:31:35
+
+
+ > message_type[1] > extension_range[2] > end:
+desc_test_complex.proto:31:39
+desc_test_complex.proto:31:42
+
+
+ > message_type[1] > extension_range[2] > options:
+desc_test_complex.proto:31:43
+desc_test_complex.proto:31:61
+
+
+ > message_type[1] > extension_range[2] > options > label:
+desc_test_complex.proto:31:44
+desc_test_complex.proto:31:60
+
+
+ > message_type[1] > nested_type[0]:
+desc_test_complex.proto:33:9
+desc_test_complex.proto:58:10
+
+
+ > message_type[1] > nested_type[0] > name:
+desc_test_complex.proto:33:17
+desc_test_complex.proto:33:23
+
+
+ > message_type[1] > nested_type[0] > extension:
+desc_test_complex.proto:34:17
+desc_test_complex.proto:36:18
+
+
+ > message_type[1] > nested_type[0] > extension[0]:
+desc_test_complex.proto:35:25
+desc_test_complex.proto:35:56
+
+
+ > message_type[1] > nested_type[0] > extension[0] !!! > extendee:
+desc_test_complex.proto:34:24
+desc_test_complex.proto:34:54
+
+
+ > message_type[1] > nested_type[0] > extension[0] !!! > label:
+desc_test_complex.proto:35:25
+desc_test_complex.proto:35:33
+
+
+ > message_type[1] > nested_type[0] > extension[0] !!! > type:
+desc_test_complex.proto:35:34
+desc_test_complex.proto:35:39
+
+
+ > message_type[1] > nested_type[0] > extension[0] !!! > name:
+desc_test_complex.proto:35:40
+desc_test_complex.proto:35:47
+
+
+ > message_type[1] > nested_type[0] > extension[0] !!! > number:
+desc_test_complex.proto:35:50
+desc_test_complex.proto:35:55
+
+
+ > message_type[1] > nested_type[0] > nested_type[0]:
+desc_test_complex.proto:37:17
+desc_test_complex.proto:57:18
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > name:
+desc_test_complex.proto:37:25
+desc_test_complex.proto:37:38
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0]:
+desc_test_complex.proto:38:25
+desc_test_complex.proto:46:26
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > name:
+desc_test_complex.proto:38:30
+desc_test_complex.proto:38:33
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[0]:
+desc_test_complex.proto:39:33
+desc_test_complex.proto:39:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[0] > name:
+desc_test_complex.proto:39:33
+desc_test_complex.proto:39:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[0] > number:
+desc_test_complex.proto:39:38
+desc_test_complex.proto:39:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[1]:
+desc_test_complex.proto:40:33
+desc_test_complex.proto:40:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[1] > name:
+desc_test_complex.proto:40:33
+desc_test_complex.proto:40:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[1] > number:
+desc_test_complex.proto:40:38
+desc_test_complex.proto:40:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[2]:
+desc_test_complex.proto:41:33
+desc_test_complex.proto:41:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[2] > name:
+desc_test_complex.proto:41:33
+desc_test_complex.proto:41:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[2] > number:
+desc_test_complex.proto:41:38
+desc_test_complex.proto:41:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[3]:
+desc_test_complex.proto:42:33
+desc_test_complex.proto:42:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[3] > name:
+desc_test_complex.proto:42:33
+desc_test_complex.proto:42:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[3] > number:
+desc_test_complex.proto:42:38
+desc_test_complex.proto:42:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[4]:
+desc_test_complex.proto:43:33
+desc_test_complex.proto:43:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[4] > name:
+desc_test_complex.proto:43:33
+desc_test_complex.proto:43:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[4] > number:
+desc_test_complex.proto:43:38
+desc_test_complex.proto:43:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[5]:
+desc_test_complex.proto:44:33
+desc_test_complex.proto:44:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[5] > name:
+desc_test_complex.proto:44:33
+desc_test_complex.proto:44:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[5] > number:
+desc_test_complex.proto:44:38
+desc_test_complex.proto:44:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[6]:
+desc_test_complex.proto:45:33
+desc_test_complex.proto:45:40
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[6] > name:
+desc_test_complex.proto:45:33
+desc_test_complex.proto:45:35
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > enum_type[0] > value[6] > number:
+desc_test_complex.proto:45:38
+desc_test_complex.proto:45:39
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > options:
+desc_test_complex.proto:47:25
+desc_test_complex.proto:47:50
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > options > fooblez:
+desc_test_complex.proto:47:25
+desc_test_complex.proto:47:50
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension:
+desc_test_complex.proto:48:25
+desc_test_complex.proto:50:26
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension[0]:
+desc_test_complex.proto:49:33
+desc_test_complex.proto:49:64
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension[0] > extendee:
+desc_test_complex.proto:48:32
+desc_test_complex.proto:48:36
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension[0] > label:
+desc_test_complex.proto:49:33
+desc_test_complex.proto:49:41
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension[0] > type:
+desc_test_complex.proto:49:42
+desc_test_complex.proto:49:48
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension[0] > name:
+desc_test_complex.proto:49:49
+desc_test_complex.proto:49:57
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > extension[0] > number:
+desc_test_complex.proto:49:60
+desc_test_complex.proto:49:63
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > options:
+desc_test_complex.proto:51:25
+desc_test_complex.proto:51:108
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > options > rept[0]:
+desc_test_complex.proto:51:25
+desc_test_complex.proto:51:108
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0]:
+desc_test_complex.proto:52:25
+desc_test_complex.proto:56:26
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > name:
+desc_test_complex.proto:52:33
+desc_test_complex.proto:52:51
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > options:
+desc_test_complex.proto:53:33
+desc_test_complex.proto:53:109
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > options > rept[0]:
+desc_test_complex.proto:53:33
+desc_test_complex.proto:53:109
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > field[0]:
+desc_test_complex.proto:55:33
+desc_test_complex.proto:55:56
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > field[0] > label:
+desc_test_complex.proto:55:33
+desc_test_complex.proto:55:41
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > field[0] > type_name:
+desc_test_complex.proto:55:42
+desc_test_complex.proto:55:46
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > field[0] > name:
+desc_test_complex.proto:55:47
+desc_test_complex.proto:55:51
+
+
+ > message_type[1] > nested_type[0] > nested_type[0] !!! > nested_type[0] > field[0] > number:
+desc_test_complex.proto:55:54
+desc_test_complex.proto:55:55
+
+
+ > enum_type[0]:
+desc_test_complex.proto:61:1
+desc_test_complex.proto:70:2
+
+
+ > enum_type[0] > name:
+desc_test_complex.proto:61:6
+desc_test_complex.proto:61:26
+
+
+ > enum_type[0] > value[0]:
+desc_test_complex.proto:62:9
+desc_test_complex.proto:62:15
+
+
+ > enum_type[0] > value[0] > name:
+desc_test_complex.proto:62:9
+desc_test_complex.proto:62:10
+
+
+ > enum_type[0] > value[0] > number:
+desc_test_complex.proto:62:13
+desc_test_complex.proto:62:14
+
+
+ > enum_type[0] > value[1]:
+desc_test_complex.proto:63:9
+desc_test_complex.proto:63:15
+
+
+ > enum_type[0] > value[1] > name:
+desc_test_complex.proto:63:9
+desc_test_complex.proto:63:10
+
+
+ > enum_type[0] > value[1] > number:
+desc_test_complex.proto:63:13
+desc_test_complex.proto:63:14
+
+
+ > enum_type[0] > value[2]:
+desc_test_complex.proto:64:9
+desc_test_complex.proto:64:15
+
+
+ > enum_type[0] > value[2] > name:
+desc_test_complex.proto:64:9
+desc_test_complex.proto:64:10
+
+
+ > enum_type[0] > value[2] > number:
+desc_test_complex.proto:64:13
+desc_test_complex.proto:64:14
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:65:9
+desc_test_complex.proto:65:30
+
+
+ > enum_type[0] > reserved_range[0]:
+desc_test_complex.proto:65:18
+desc_test_complex.proto:65:29
+
+
+ > enum_type[0] > reserved_range[0] > start:
+desc_test_complex.proto:65:18
+desc_test_complex.proto:65:22
+
+
+ > enum_type[0] > reserved_range[0] > end:
+desc_test_complex.proto:65:26
+desc_test_complex.proto:65:29
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:66:9
+desc_test_complex.proto:66:26
+
+
+ > enum_type[0] > reserved_range[1]:
+desc_test_complex.proto:66:18
+desc_test_complex.proto:66:25
+
+
+ > enum_type[0] > reserved_range[1] > start:
+desc_test_complex.proto:66:18
+desc_test_complex.proto:66:20
+
+
+ > enum_type[0] > reserved_range[1] > end:
+desc_test_complex.proto:66:24
+desc_test_complex.proto:66:25
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:67:9
+desc_test_complex.proto:67:40
+
+
+ > enum_type[0] > reserved_range[2]:
+desc_test_complex.proto:67:18
+desc_test_complex.proto:67:25
+
+
+ > enum_type[0] > reserved_range[2] > start:
+desc_test_complex.proto:67:18
+desc_test_complex.proto:67:19
+
+
+ > enum_type[0] > reserved_range[2] > end:
+desc_test_complex.proto:67:23
+desc_test_complex.proto:67:25
+
+
+ > enum_type[0] > reserved_range[3]:
+desc_test_complex.proto:67:27
+desc_test_complex.proto:67:35
+
+
+ > enum_type[0] > reserved_range[3] > start:
+desc_test_complex.proto:67:27
+desc_test_complex.proto:67:29
+
+
+ > enum_type[0] > reserved_range[3] > end:
+desc_test_complex.proto:67:33
+desc_test_complex.proto:67:35
+
+
+ > enum_type[0] > reserved_range[4]:
+desc_test_complex.proto:67:37
+desc_test_complex.proto:67:39
+
+
+ > enum_type[0] > reserved_range[4] > start:
+desc_test_complex.proto:67:37
+desc_test_complex.proto:67:39
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:68:9
+desc_test_complex.proto:68:27
+
+
+ > enum_type[0] > reserved_range[5]:
+desc_test_complex.proto:68:18
+desc_test_complex.proto:68:26
+
+
+ > enum_type[0] > reserved_range[5] > start:
+desc_test_complex.proto:68:18
+desc_test_complex.proto:68:20
+
+
+ > enum_type[0] > reserved_range[5] > end:
+desc_test_complex.proto:68:24
+desc_test_complex.proto:68:26
+
+
+ > enum_type[0] > reserved_name:
+desc_test_complex.proto:69:9
+desc_test_complex.proto:69:32
+
+
+ > enum_type[0] > reserved_name[0]:
+desc_test_complex.proto:69:18
+desc_test_complex.proto:69:21
+
+
+ > enum_type[0] > reserved_name[1]:
+desc_test_complex.proto:69:23
+desc_test_complex.proto:69:26
+
+
+ > enum_type[0] > reserved_name[2]:
+desc_test_complex.proto:69:28
+desc_test_complex.proto:69:31
+
+
+ > message_type[2]:
+desc_test_complex.proto:72:1
+desc_test_complex.proto:76:2
+
+
+ > message_type[2] > name:
+desc_test_complex.proto:72:9
+desc_test_complex.proto:72:32
+
+
+ > message_type[2] > reserved_range:
+desc_test_complex.proto:73:9
+desc_test_complex.proto:73:40
+
+
+ > message_type[2] > reserved_range[0]:
+desc_test_complex.proto:73:18
+desc_test_complex.proto:73:25
+
+
+ > message_type[2] > reserved_range[0] > start:
+desc_test_complex.proto:73:18
+desc_test_complex.proto:73:19
+
+
+ > message_type[2] > reserved_range[0] > end:
+desc_test_complex.proto:73:23
+desc_test_complex.proto:73:25
+
+
+ > message_type[2] > reserved_range[1]:
+desc_test_complex.proto:73:27
+desc_test_complex.proto:73:35
+
+
+ > message_type[2] > reserved_range[1] > start:
+desc_test_complex.proto:73:27
+desc_test_complex.proto:73:29
+
+
+ > message_type[2] > reserved_range[1] > end:
+desc_test_complex.proto:73:33
+desc_test_complex.proto:73:35
+
+
+ > message_type[2] > reserved_range[2]:
+desc_test_complex.proto:73:37
+desc_test_complex.proto:73:39
+
+
+ > message_type[2] > reserved_range[2] > start:
+desc_test_complex.proto:73:37
+desc_test_complex.proto:73:39
+
+
+ > message_type[2] > reserved_range:
+desc_test_complex.proto:74:9
+desc_test_complex.proto:74:30
+
+
+ > message_type[2] > reserved_range[3]:
+desc_test_complex.proto:74:18
+desc_test_complex.proto:74:29
+
+
+ > message_type[2] > reserved_range[3] > start:
+desc_test_complex.proto:74:18
+desc_test_complex.proto:74:22
+
+
+ > message_type[2] > reserved_range[3] > end:
+desc_test_complex.proto:74:26
+desc_test_complex.proto:74:29
+
+
+ > message_type[2] > reserved_name:
+desc_test_complex.proto:75:9
+desc_test_complex.proto:75:32
+
+
+ > message_type[2] > reserved_name[0]:
+desc_test_complex.proto:75:18
+desc_test_complex.proto:75:21
+
+
+ > message_type[2] > reserved_name[1]:
+desc_test_complex.proto:75:23
+desc_test_complex.proto:75:26
+
+
+ > message_type[2] > reserved_name[2]:
+desc_test_complex.proto:75:28
+desc_test_complex.proto:75:31
+
+
+ > extension:
+desc_test_complex.proto:78:1
+desc_test_complex.proto:82:2
+
+
+ > extension[1]:
+desc_test_complex.proto:79:9
+desc_test_complex.proto:79:36
+
+
+ > extension[1] > extendee:
+desc_test_complex.proto:78:8
+desc_test_complex.proto:78:38
+
+
+ > extension[1] > label:
+desc_test_complex.proto:79:9
+desc_test_complex.proto:79:17
+
+
+ > extension[1] > type_name:
+desc_test_complex.proto:79:18
+desc_test_complex.proto:79:22
+
+
+ > extension[1] > name:
+desc_test_complex.proto:79:23
+desc_test_complex.proto:79:27
+
+
+ > extension[1] > number:
+desc_test_complex.proto:79:30
+desc_test_complex.proto:79:35
+
+
+ > extension[2]:
+desc_test_complex.proto:80:9
+desc_test_complex.proto:80:60
+
+
+ > extension[2] > extendee:
+desc_test_complex.proto:78:8
+desc_test_complex.proto:78:38
+
+
+ > extension[2] > label:
+desc_test_complex.proto:80:9
+desc_test_complex.proto:80:17
+
+
+ > extension[2] > type_name:
+desc_test_complex.proto:80:18
+desc_test_complex.proto:80:47
+
+
+ > extension[2] > name:
+desc_test_complex.proto:80:48
+desc_test_complex.proto:80:51
+
+
+ > extension[2] > number:
+desc_test_complex.proto:80:54
+desc_test_complex.proto:80:59
+
+
+ > extension[3]:
+desc_test_complex.proto:81:9
+desc_test_complex.proto:81:36
+
+
+ > extension[3] > extendee:
+desc_test_complex.proto:78:8
+desc_test_complex.proto:78:38
+
+
+ > extension[3] > label:
+desc_test_complex.proto:81:9
+desc_test_complex.proto:81:17
+
+
+ > extension[3] > type_name:
+desc_test_complex.proto:81:18
+desc_test_complex.proto:81:25
+
+
+ > extension[3] > name:
+desc_test_complex.proto:81:26
+desc_test_complex.proto:81:27
+
+
+ > extension[3] > number:
+desc_test_complex.proto:81:30
+desc_test_complex.proto:81:35
+
+
+ > message_type[3]:
+desc_test_complex.proto:84:1
+desc_test_complex.proto:99:2
+
+
+ > message_type[3] > name:
+desc_test_complex.proto:84:9
+desc_test_complex.proto:84:16
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:85:5
+desc_test_complex.proto:85:130
+
+
+ > message_type[3] > options > rept[0]:
+desc_test_complex.proto:85:5
+desc_test_complex.proto:85:130
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:86:5
+desc_test_complex.proto:86:115
+
+
+ > message_type[3] > options > rept[1]:
+desc_test_complex.proto:86:5
+desc_test_complex.proto:86:115
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:87:5
+desc_test_complex.proto:87:36
+
+
+ > message_type[3] > options > rept[2]:
+desc_test_complex.proto:87:5
+desc_test_complex.proto:87:36
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:88:5
+desc_test_complex.proto:88:23
+
+
+ > message_type[3] > options > eee:
+desc_test_complex.proto:88:5
+desc_test_complex.proto:88:23
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:89:9
+desc_test_complex.proto:89:34
+
+
+ > message_type[3] > options > a:
+desc_test_complex.proto:89:9
+desc_test_complex.proto:89:34
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:90:9
+desc_test_complex.proto:90:86
+
+
+ > message_type[3] > options > a > test:
+desc_test_complex.proto:90:9
+desc_test_complex.proto:90:86
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:91:9
+desc_test_complex.proto:91:37
+
+
+ > message_type[3] > options > a > test > foo:
+desc_test_complex.proto:91:9
+desc_test_complex.proto:91:37
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:92:9
+desc_test_complex.proto:92:41
+
+
+ > message_type[3] > options > a > test > s > name:
+desc_test_complex.proto:92:9
+desc_test_complex.proto:92:41
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:93:5
+desc_test_complex.proto:93:34
+
+
+ > message_type[3] > options > a > test > s > id:
+desc_test_complex.proto:93:5
+desc_test_complex.proto:93:34
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:94:5
+desc_test_complex.proto:94:31
+
+
+ > message_type[3] > options > a > test > array[0]:
+desc_test_complex.proto:94:5
+desc_test_complex.proto:94:31
+
+
+ > message_type[3] > options:
+desc_test_complex.proto:95:5
+desc_test_complex.proto:95:31
+
+
+ > message_type[3] > options > a > test > array[1]:
+desc_test_complex.proto:95:5
+desc_test_complex.proto:95:31
+
+
+ > message_type[3] > field[0]:
+desc_test_complex.proto:97:5
+desc_test_complex.proto:97:28
+
+
+ > message_type[3] > field[0] > label:
+desc_test_complex.proto:97:5
+desc_test_complex.proto:97:13
+
+
+ > message_type[3] > field[0] > type_name:
+desc_test_complex.proto:97:14
+desc_test_complex.proto:97:18
+
+
+ > message_type[3] > field[0] > name:
+desc_test_complex.proto:97:19
+desc_test_complex.proto:97:23
+
+
+ > message_type[3] > field[0] > number:
+desc_test_complex.proto:97:26
+desc_test_complex.proto:97:27
+
+
+ > message_type[3] > field[1]:
+desc_test_complex.proto:98:5
+desc_test_complex.proto:98:67
+
+
+ > message_type[3] > field[1] > label:
+desc_test_complex.proto:98:5
+desc_test_complex.proto:98:13
+
+
+ > message_type[3] > field[1] > type_name:
+desc_test_complex.proto:98:14
+desc_test_complex.proto:98:43
+
+
+ > message_type[3] > field[1] > name:
+desc_test_complex.proto:98:44
+desc_test_complex.proto:98:47
+
+
+ > message_type[3] > field[1] > number:
+desc_test_complex.proto:98:50
+desc_test_complex.proto:98:51
+
+
+ > message_type[3] > field[1] > options:
+desc_test_complex.proto:98:52
+desc_test_complex.proto:98:66
+
+
+ > message_type[3] > field[1] > default_value:
+desc_test_complex.proto:98:53
+desc_test_complex.proto:98:65
+
+
+ > message_type[4]:
+desc_test_complex.proto:101:1
+desc_test_complex.proto:115:2
+
+
+ > message_type[4] > name:
+desc_test_complex.proto:101:9
+desc_test_complex.proto:101:18
+
+
+ > message_type[4] > field[0]:
+desc_test_complex.proto:102:9
+desc_test_complex.proto:102:41
+
+
+ > message_type[4] > field[0] > label:
+desc_test_complex.proto:102:9
+desc_test_complex.proto:102:17
+
+
+ > message_type[4] > field[0] > type:
+desc_test_complex.proto:102:18
+desc_test_complex.proto:102:22
+
+
+ > message_type[4] > field[0] > name:
+desc_test_complex.proto:102:23
+desc_test_complex.proto:102:36
+
+
+ > message_type[4] > field[0] > number:
+desc_test_complex.proto:102:39
+desc_test_complex.proto:102:40
+
+
+ > message_type[4] > enum_type[0]:
+desc_test_complex.proto:104:9
+desc_test_complex.proto:108:10
+
+
+ > message_type[4] > enum_type[0] > name:
+desc_test_complex.proto:104:14
+desc_test_complex.proto:104:20
+
+
+ > message_type[4] > enum_type[0] > value[0]:
+desc_test_complex.proto:105:17
+desc_test_complex.proto:105:27
+
+
+ > message_type[4] > enum_type[0] > value[0] > name:
+desc_test_complex.proto:105:17
+desc_test_complex.proto:105:22
+
+
+ > message_type[4] > enum_type[0] > value[0] > number:
+desc_test_complex.proto:105:25
+desc_test_complex.proto:105:26
+
+
+ > message_type[4] > enum_type[0] > value[1]:
+desc_test_complex.proto:106:17
+desc_test_complex.proto:106:26
+
+
+ > message_type[4] > enum_type[0] > value[1] > name:
+desc_test_complex.proto:106:17
+desc_test_complex.proto:106:21
+
+
+ > message_type[4] > enum_type[0] > value[1] > number:
+desc_test_complex.proto:106:24
+desc_test_complex.proto:106:25
+
+
+ > message_type[4] > enum_type[0] > value[2]:
+desc_test_complex.proto:107:17
+desc_test_complex.proto:107:27
+
+
+ > message_type[4] > enum_type[0] > value[2] > name:
+desc_test_complex.proto:107:17
+desc_test_complex.proto:107:22
+
+
+ > message_type[4] > enum_type[0] > value[2] > number:
+desc_test_complex.proto:107:25
+desc_test_complex.proto:107:26
+
+
+ > message_type[4] > nested_type[0]:
+desc_test_complex.proto:109:9
+desc_test_complex.proto:112:10
+
+
+ > message_type[4] > nested_type[0] > name:
+desc_test_complex.proto:109:17
+desc_test_complex.proto:109:27
+
+
+ > message_type[4] > nested_type[0] > field[0]:
+desc_test_complex.proto:110:17
+desc_test_complex.proto:110:44
+
+
+ > message_type[4] > nested_type[0] > field[0] > label:
+desc_test_complex.proto:110:17
+desc_test_complex.proto:110:25
+
+
+ > message_type[4] > nested_type[0] > field[0] > type_name:
+desc_test_complex.proto:110:26
+desc_test_complex.proto:110:32
+
+
+ > message_type[4] > nested_type[0] > field[0] > name:
+desc_test_complex.proto:110:33
+desc_test_complex.proto:110:39
+
+
+ > message_type[4] > nested_type[0] > field[0] > number:
+desc_test_complex.proto:110:42
+desc_test_complex.proto:110:43
+
+
+ > message_type[4] > nested_type[0] > field[1]:
+desc_test_complex.proto:111:17
+desc_test_complex.proto:111:44
+
+
+ > message_type[4] > nested_type[0] > field[1] > label:
+desc_test_complex.proto:111:17
+desc_test_complex.proto:111:25
+
+
+ > message_type[4] > nested_type[0] > field[1] > type:
+desc_test_complex.proto:111:26
+desc_test_complex.proto:111:32
+
+
+ > message_type[4] > nested_type[0] > field[1] > name:
+desc_test_complex.proto:111:33
+desc_test_complex.proto:111:39
+
+
+ > message_type[4] > nested_type[0] > field[1] > number:
+desc_test_complex.proto:111:42
+desc_test_complex.proto:111:43
+
+
+ > message_type[4] > field[1]:
+desc_test_complex.proto:114:9
+desc_test_complex.proto:114:44
+
+
+ > message_type[4] > field[1] > label:
+desc_test_complex.proto:114:9
+desc_test_complex.proto:114:17
+
+
+ > message_type[4] > field[1] > type_name:
+desc_test_complex.proto:114:18
+desc_test_complex.proto:114:28
+
+
+ > message_type[4] > field[1] > name:
+desc_test_complex.proto:114:29
+desc_test_complex.proto:114:39
+
+
+ > message_type[4] > field[1] > number:
+desc_test_complex.proto:114:42
+desc_test_complex.proto:114:43
+
+
+ > extension:
+desc_test_complex.proto:117:1
+desc_test_complex.proto:119:2
+
+
+ > extension[4]:
+desc_test_complex.proto:118:9
+desc_test_complex.proto:118:46
+
+
+ > extension[4] > extendee:
+desc_test_complex.proto:117:8
+desc_test_complex.proto:117:37
+
+
+ > extension[4] > label:
+desc_test_complex.proto:118:9
+desc_test_complex.proto:118:17
+
+
+ > extension[4] > type_name:
+desc_test_complex.proto:118:18
+desc_test_complex.proto:118:27
+
+
+ > extension[4] > name:
+desc_test_complex.proto:118:28
+desc_test_complex.proto:118:37
+
+
+ > extension[4] > number:
+desc_test_complex.proto:118:40
+desc_test_complex.proto:118:45
+
+
+ > service[0]:
+desc_test_complex.proto:121:1
+desc_test_complex.proto:140:2
+
+
+ > service[0] > name:
+desc_test_complex.proto:121:9
+desc_test_complex.proto:121:24
+
+
+ > service[0] > method[0]:
+desc_test_complex.proto:122:9
+desc_test_complex.proto:130:10
+
+
+ > service[0] > method[0] > name:
+desc_test_complex.proto:122:13
+desc_test_complex.proto:122:21
+
+
+ > service[0] > method[0] > input_type:
+desc_test_complex.proto:122:22
+desc_test_complex.proto:122:26
+
+
+ > service[0] > method[0] > output_type:
+desc_test_complex.proto:122:37
+desc_test_complex.proto:122:41
+
+
+ > service[0] > method[0] > options:
+desc_test_complex.proto:123:17
+desc_test_complex.proto:129:19
+
+
+ > service[0] > method[0] > options > validator:
+desc_test_complex.proto:123:17
+desc_test_complex.proto:129:19
+
+
+ > service[0] > method[1]:
+desc_test_complex.proto:131:9
+desc_test_complex.proto:139:10
+
+
+ > service[0] > method[1] > name:
+desc_test_complex.proto:131:13
+desc_test_complex.proto:131:16
+
+
+ > service[0] > method[1] > input_type:
+desc_test_complex.proto:131:17
+desc_test_complex.proto:131:21
+
+
+ > service[0] > method[1] > output_type:
+desc_test_complex.proto:131:32
+desc_test_complex.proto:131:36
+
+
+ > service[0] > method[1] > options:
+desc_test_complex.proto:132:17
+desc_test_complex.proto:138:19
+
+
+ > service[0] > method[1] > options > validator:
+desc_test_complex.proto:132:17
+desc_test_complex.proto:138:19
+
+
+ > message_type[5]:
+desc_test_complex.proto:142:1
+desc_test_complex.proto:168:2
+
+
+ > message_type[5] > name:
+desc_test_complex.proto:142:9
+desc_test_complex.proto:142:13
+
+
+ > message_type[5] > nested_type[0]:
+desc_test_complex.proto:143:3
+desc_test_complex.proto:148:4
+
+
+ > message_type[5] > nested_type[0] > name:
+desc_test_complex.proto:143:11
+desc_test_complex.proto:143:21
+
+
+ > message_type[5] > nested_type[0] > field[0]:
+desc_test_complex.proto:144:5
+desc_test_complex.proto:144:33
+
+
+ > message_type[5] > nested_type[0] > field[0] > label:
+desc_test_complex.proto:144:5
+desc_test_complex.proto:144:13
+
+
+ > message_type[5] > nested_type[0] > field[0] > type:
+desc_test_complex.proto:144:14
+desc_test_complex.proto:144:20
+
+
+ > message_type[5] > nested_type[0] > field[0] > name:
+desc_test_complex.proto:144:21
+desc_test_complex.proto:144:28
+
+
+ > message_type[5] > nested_type[0] > field[0] > number:
+desc_test_complex.proto:144:31
+desc_test_complex.proto:144:32
+
+
+ > message_type[5] > nested_type[0] > field[1]:
+desc_test_complex.proto:145:5
+desc_test_complex.proto:145:35
+
+
+ > message_type[5] > nested_type[0] > field[1] > label:
+desc_test_complex.proto:145:5
+desc_test_complex.proto:145:13
+
+
+ > message_type[5] > nested_type[0] > field[1] > type:
+desc_test_complex.proto:145:14
+desc_test_complex.proto:145:18
+
+
+ > message_type[5] > nested_type[0] > field[1] > name:
+desc_test_complex.proto:145:19
+desc_test_complex.proto:145:30
+
+
+ > message_type[5] > nested_type[0] > field[1] > number:
+desc_test_complex.proto:145:33
+desc_test_complex.proto:145:34
+
+
+ > message_type[5] > nested_type[0] > field[2]:
+desc_test_complex.proto:146:5
+desc_test_complex.proto:146:32
+
+
+ > message_type[5] > nested_type[0] > field[2] > label:
+desc_test_complex.proto:146:5
+desc_test_complex.proto:146:13
+
+
+ > message_type[5] > nested_type[0] > field[2] > type:
+desc_test_complex.proto:146:14
+desc_test_complex.proto:146:19
+
+
+ > message_type[5] > nested_type[0] > field[2] > name:
+desc_test_complex.proto:146:20
+desc_test_complex.proto:146:27
+
+
+ > message_type[5] > nested_type[0] > field[2] > number:
+desc_test_complex.proto:146:30
+desc_test_complex.proto:146:31
+
+
+ > message_type[5] > nested_type[0] > field[3]:
+desc_test_complex.proto:147:5
+desc_test_complex.proto:147:32
+
+
+ > message_type[5] > nested_type[0] > field[3] > label:
+desc_test_complex.proto:147:5
+desc_test_complex.proto:147:13
+
+
+ > message_type[5] > nested_type[0] > field[3] > type:
+desc_test_complex.proto:147:14
+desc_test_complex.proto:147:19
+
+
+ > message_type[5] > nested_type[0] > field[3] > name:
+desc_test_complex.proto:147:20
+desc_test_complex.proto:147:27
+
+
+ > message_type[5] > nested_type[0] > field[3] > number:
+desc_test_complex.proto:147:30
+desc_test_complex.proto:147:31
+
+
+ > message_type[5] > nested_type[1]:
+desc_test_complex.proto:149:3
+desc_test_complex.proto:152:4
+
+
+ > message_type[5] > nested_type[1] > name:
+desc_test_complex.proto:149:11
+desc_test_complex.proto:149:18
+
+
+ > message_type[5] > nested_type[1] > field[0]:
+desc_test_complex.proto:150:5
+desc_test_complex.proto:150:32
+
+
+ > message_type[5] > nested_type[1] > field[0] > label:
+desc_test_complex.proto:150:5
+desc_test_complex.proto:150:13
+
+
+ > message_type[5] > nested_type[1] > field[0] > type:
+desc_test_complex.proto:150:14
+desc_test_complex.proto:150:19
+
+
+ > message_type[5] > nested_type[1] > field[0] > name:
+desc_test_complex.proto:150:20
+desc_test_complex.proto:150:27
+
+
+ > message_type[5] > nested_type[1] > field[0] > number:
+desc_test_complex.proto:150:30
+desc_test_complex.proto:150:31
+
+
+ > message_type[5] > nested_type[1] > field[1]:
+desc_test_complex.proto:151:5
+desc_test_complex.proto:151:33
+
+
+ > message_type[5] > nested_type[1] > field[1] > label:
+desc_test_complex.proto:151:5
+desc_test_complex.proto:151:13
+
+
+ > message_type[5] > nested_type[1] > field[1] > type:
+desc_test_complex.proto:151:14
+desc_test_complex.proto:151:20
+
+
+ > message_type[5] > nested_type[1] > field[1] > name:
+desc_test_complex.proto:151:21
+desc_test_complex.proto:151:28
+
+
+ > message_type[5] > nested_type[1] > field[1] > number:
+desc_test_complex.proto:151:31
+desc_test_complex.proto:151:32
+
+
+ > message_type[5] > nested_type[2]:
+desc_test_complex.proto:153:3
+desc_test_complex.proto:158:4
+
+
+ > message_type[5] > nested_type[2] > name:
+desc_test_complex.proto:153:11
+desc_test_complex.proto:153:23
+
+
+ > message_type[5] > nested_type[2] > field[0]:
+desc_test_complex.proto:154:5
+desc_test_complex.proto:154:35
+
+
+ > message_type[5] > nested_type[2] > field[0] > label:
+desc_test_complex.proto:154:5
+desc_test_complex.proto:154:13
+
+
+ > message_type[5] > nested_type[2] > field[0] > type:
+desc_test_complex.proto:154:14
+desc_test_complex.proto:154:18
+
+
+ > message_type[5] > nested_type[2] > field[0] > name:
+desc_test_complex.proto:154:19
+desc_test_complex.proto:154:30
+
+
+ > message_type[5] > nested_type[2] > field[0] > number:
+desc_test_complex.proto:154:33
+desc_test_complex.proto:154:34
+
+
+ > message_type[5] > nested_type[2] > field[1]:
+desc_test_complex.proto:155:5
+desc_test_complex.proto:155:34
+
+
+ > message_type[5] > nested_type[2] > field[1] > label:
+desc_test_complex.proto:155:5
+desc_test_complex.proto:155:13
+
+
+ > message_type[5] > nested_type[2] > field[1] > type:
+desc_test_complex.proto:155:14
+desc_test_complex.proto:155:19
+
+
+ > message_type[5] > nested_type[2] > field[1] > name:
+desc_test_complex.proto:155:20
+desc_test_complex.proto:155:29
+
+
+ > message_type[5] > nested_type[2] > field[1] > number:
+desc_test_complex.proto:155:32
+desc_test_complex.proto:155:33
+
+
+ > message_type[5] > nested_type[2] > field[2]:
+desc_test_complex.proto:156:5
+desc_test_complex.proto:156:34
+
+
+ > message_type[5] > nested_type[2] > field[2] > label:
+desc_test_complex.proto:156:5
+desc_test_complex.proto:156:13
+
+
+ > message_type[5] > nested_type[2] > field[2] > type:
+desc_test_complex.proto:156:14
+desc_test_complex.proto:156:19
+
+
+ > message_type[5] > nested_type[2] > field[2] > name:
+desc_test_complex.proto:156:20
+desc_test_complex.proto:156:29
+
+
+ > message_type[5] > nested_type[2] > field[2] > number:
+desc_test_complex.proto:156:32
+desc_test_complex.proto:156:33
+
+
+ > message_type[5] > nested_type[2] > field[3]:
+desc_test_complex.proto:157:5
+desc_test_complex.proto:157:29
+
+
+ > message_type[5] > nested_type[2] > field[3] > label:
+desc_test_complex.proto:157:5
+desc_test_complex.proto:157:13
+
+
+ > message_type[5] > nested_type[2] > field[3] > type_name:
+desc_test_complex.proto:157:14
+desc_test_complex.proto:157:18
+
+
+ > message_type[5] > nested_type[2] > field[3] > name:
+desc_test_complex.proto:157:19
+desc_test_complex.proto:157:24
+
+
+ > message_type[5] > nested_type[2] > field[3] > number:
+desc_test_complex.proto:157:27
+desc_test_complex.proto:157:28
+
+
+ > message_type[5] > oneof_decl[0]:
+desc_test_complex.proto:159:3
+desc_test_complex.proto:167:4
+
+
+ > message_type[5] > oneof_decl[0] > name:
+desc_test_complex.proto:159:9
+desc_test_complex.proto:159:13
+
+
+ > message_type[5] > field[0]:
+desc_test_complex.proto:160:5
+desc_test_complex.proto:160:27
+
+
+ > message_type[5] > field[0] > type_name:
+desc_test_complex.proto:160:5
+desc_test_complex.proto:160:15
+
+
+ > message_type[5] > field[0] > name:
+desc_test_complex.proto:160:16
+desc_test_complex.proto:160:22
+
+
+ > message_type[5] > field[0] > number:
+desc_test_complex.proto:160:25
+desc_test_complex.proto:160:26
+
+
+ > message_type[5] > field[1]:
+desc_test_complex.proto:161:5
+desc_test_complex.proto:161:31
+
+
+ > message_type[5] > field[1] > type_name:
+desc_test_complex.proto:161:5
+desc_test_complex.proto:161:17
+
+
+ > message_type[5] > field[1] > name:
+desc_test_complex.proto:161:18
+desc_test_complex.proto:161:26
+
+
+ > message_type[5] > field[1] > number:
+desc_test_complex.proto:161:29
+desc_test_complex.proto:161:30
+
+
+ > message_type[5] > field[2]:
+desc_test_complex.proto:162:5
+desc_test_complex.proto:162:21
+
+
+ > message_type[5] > field[2] > type_name:
+desc_test_complex.proto:162:5
+desc_test_complex.proto:162:12
+
+
+ > message_type[5] > field[2] > name:
+desc_test_complex.proto:162:13
+desc_test_complex.proto:162:16
+
+
+ > message_type[5] > field[2] > number:
+desc_test_complex.proto:162:19
+desc_test_complex.proto:162:20
+
+
+ > message_type[5] > field[3]:
+desc_test_complex.proto:163:9
+desc_test_complex.proto:166:10
+
+
+ > message_type[5] > field[3] > type:
+desc_test_complex.proto:163:9
+desc_test_complex.proto:163:14
+
+
+ > message_type[5] > field[3] > name:
+desc_test_complex.proto:163:15
+desc_test_complex.proto:163:24
+
+
+ > message_type[5] > field[3] > number:
+desc_test_complex.proto:163:27
+desc_test_complex.proto:163:28
+
+
+ > message_type[5] > nested_type[3]:
+desc_test_complex.proto:163:9
+desc_test_complex.proto:166:10
+
+
+ > message_type[5] > nested_type[3] > name:
+desc_test_complex.proto:163:15
+desc_test_complex.proto:163:24
+
+
+ > message_type[5] > field[3] > type_name:
+desc_test_complex.proto:163:15
+desc_test_complex.proto:163:24
+
+
+ > message_type[5] > nested_type[3] > field[0]:
+desc_test_complex.proto:164:17
+desc_test_complex.proto:164:45
+
+
+ > message_type[5] > nested_type[3] > field[0] > label:
+desc_test_complex.proto:164:17
+desc_test_complex.proto:164:25
+
+
+ > message_type[5] > nested_type[3] > field[0] > type:
+desc_test_complex.proto:164:26
+desc_test_complex.proto:164:32
+
+
+ > message_type[5] > nested_type[3] > field[0] > name:
+desc_test_complex.proto:164:33
+desc_test_complex.proto:164:40
+
+
+ > message_type[5] > nested_type[3] > field[0] > number:
+desc_test_complex.proto:164:43
+desc_test_complex.proto:164:44
+
+
+ > message_type[5] > nested_type[3] > field[1]:
+desc_test_complex.proto:165:17
+desc_test_complex.proto:165:45
+
+
+ > message_type[5] > nested_type[3] > field[1] > label:
+desc_test_complex.proto:165:17
+desc_test_complex.proto:165:25
+
+
+ > message_type[5] > nested_type[3] > field[1] > type:
+desc_test_complex.proto:165:26
+desc_test_complex.proto:165:32
+
+
+ > message_type[5] > nested_type[3] > field[1] > name:
+desc_test_complex.proto:165:33
+desc_test_complex.proto:165:40
+
+
+ > message_type[5] > nested_type[3] > field[1] > number:
+desc_test_complex.proto:165:43
+desc_test_complex.proto:165:44
+
+
+ > extension:
+desc_test_complex.proto:170:1
+desc_test_complex.proto:172:2
+
+
+ > extension[5]:
+desc_test_complex.proto:171:3
+desc_test_complex.proto:171:30
+
+
+ > extension[5] > extendee:
+desc_test_complex.proto:170:8
+desc_test_complex.proto:170:36
+
+
+ > extension[5] > label:
+desc_test_complex.proto:171:3
+desc_test_complex.proto:171:11
+
+
+ > extension[5] > type_name:
+desc_test_complex.proto:171:12
+desc_test_complex.proto:171:16
+
+
+ > extension[5] > name:
+desc_test_complex.proto:171:17
+desc_test_complex.proto:171:22
+
+
+ > extension[5] > number:
+desc_test_complex.proto:171:25
+desc_test_complex.proto:171:29
+
+
+ > message_type[6]:
+desc_test_complex.proto:174:1
+desc_test_complex.proto:180:2
+
+
+ > message_type[6] > name:
+desc_test_complex.proto:174:9
+desc_test_complex.proto:174:24
+
+
+ > message_type[6] > field[0]:
+desc_test_complex.proto:175:5
+desc_test_complex.proto:179:11
+
+
+ > message_type[6] > field[0] > label:
+desc_test_complex.proto:175:5
+desc_test_complex.proto:175:13
+
+
+ > message_type[6] > field[0] > type:
+desc_test_complex.proto:175:14
+desc_test_complex.proto:175:20
+
+
+ > message_type[6] > field[0] > name:
+desc_test_complex.proto:175:21
+desc_test_complex.proto:175:29
+
+
+ > message_type[6] > field[0] > number:
+desc_test_complex.proto:175:32
+desc_test_complex.proto:175:33
+
+
+ > message_type[6] > field[0] > options:
+desc_test_complex.proto:176:7
+desc_test_complex.proto:179:10
+
+
+ > message_type[6] > field[0] > options > rules > repeated:
+desc_test_complex.proto:176:8
+desc_test_complex.proto:179:9
+
+
+ > message_type[7]:
+desc_test_complex.proto:184:1
+desc_test_complex.proto:220:2
+ Leading detached comment [0]:
+ tests cases where field names collide with keywords
+
+
+
+ > message_type[7] > name:
+desc_test_complex.proto:184:9
+desc_test_complex.proto:184:26
+
+
+ > message_type[7] > field[0]:
+desc_test_complex.proto:185:9
+desc_test_complex.proto:185:34
+
+
+ > message_type[7] > field[0] > label:
+desc_test_complex.proto:185:9
+desc_test_complex.proto:185:17
+
+
+ > message_type[7] > field[0] > type:
+desc_test_complex.proto:185:18
+desc_test_complex.proto:185:22
+
+
+ > message_type[7] > field[0] > name:
+desc_test_complex.proto:185:23
+desc_test_complex.proto:185:29
+
+
+ > message_type[7] > field[0] > number:
+desc_test_complex.proto:185:32
+desc_test_complex.proto:185:33
+
+
+ > message_type[7] > field[1]:
+desc_test_complex.proto:186:9
+desc_test_complex.proto:186:34
+
+
+ > message_type[7] > field[1] > label:
+desc_test_complex.proto:186:9
+desc_test_complex.proto:186:17
+
+
+ > message_type[7] > field[1] > type:
+desc_test_complex.proto:186:18
+desc_test_complex.proto:186:22
+
+
+ > message_type[7] > field[1] > name:
+desc_test_complex.proto:186:23
+desc_test_complex.proto:186:29
+
+
+ > message_type[7] > field[1] > number:
+desc_test_complex.proto:186:32
+desc_test_complex.proto:186:33
+
+
+ > message_type[7] > field[2]:
+desc_test_complex.proto:187:9
+desc_test_complex.proto:187:34
+
+
+ > message_type[7] > field[2] > label:
+desc_test_complex.proto:187:9
+desc_test_complex.proto:187:17
+
+
+ > message_type[7] > field[2] > type:
+desc_test_complex.proto:187:18
+desc_test_complex.proto:187:22
+
+
+ > message_type[7] > field[2] > name:
+desc_test_complex.proto:187:23
+desc_test_complex.proto:187:29
+
+
+ > message_type[7] > field[2] > number:
+desc_test_complex.proto:187:32
+desc_test_complex.proto:187:33
+
+
+ > message_type[7] > field[3]:
+desc_test_complex.proto:188:9
+desc_test_complex.proto:188:32
+
+
+ > message_type[7] > field[3] > label:
+desc_test_complex.proto:188:9
+desc_test_complex.proto:188:17
+
+
+ > message_type[7] > field[3] > type:
+desc_test_complex.proto:188:18
+desc_test_complex.proto:188:22
+
+
+ > message_type[7] > field[3] > name:
+desc_test_complex.proto:188:23
+desc_test_complex.proto:188:27
+
+
+ > message_type[7] > field[3] > number:
+desc_test_complex.proto:188:30
+desc_test_complex.proto:188:31
+
+
+ > message_type[7] > field[4]:
+desc_test_complex.proto:189:9
+desc_test_complex.proto:189:35
+
+
+ > message_type[7] > field[4] > label:
+desc_test_complex.proto:189:9
+desc_test_complex.proto:189:17
+
+
+ > message_type[7] > field[4] > type:
+desc_test_complex.proto:189:18
+desc_test_complex.proto:189:22
+
+
+ > message_type[7] > field[4] > name:
+desc_test_complex.proto:189:23
+desc_test_complex.proto:189:30
+
+
+ > message_type[7] > field[4] > number:
+desc_test_complex.proto:189:33
+desc_test_complex.proto:189:34
+
+
+ > message_type[7] > field[5]:
+desc_test_complex.proto:190:9
+desc_test_complex.proto:190:36
+
+
+ > message_type[7] > field[5] > label:
+desc_test_complex.proto:190:9
+desc_test_complex.proto:190:17
+
+
+ > message_type[7] > field[5] > type:
+desc_test_complex.proto:190:18
+desc_test_complex.proto:190:24
+
+
+ > message_type[7] > field[5] > name:
+desc_test_complex.proto:190:25
+desc_test_complex.proto:190:31
+
+
+ > message_type[7] > field[5] > number:
+desc_test_complex.proto:190:34
+desc_test_complex.proto:190:35
+
+
+ > message_type[7] > field[6]:
+desc_test_complex.proto:191:9
+desc_test_complex.proto:191:34
+
+
+ > message_type[7] > field[6] > label:
+desc_test_complex.proto:191:9
+desc_test_complex.proto:191:17
+
+
+ > message_type[7] > field[6] > type:
+desc_test_complex.proto:191:18
+desc_test_complex.proto:191:23
+
+
+ > message_type[7] > field[6] > name:
+desc_test_complex.proto:191:24
+desc_test_complex.proto:191:29
+
+
+ > message_type[7] > field[6] > number:
+desc_test_complex.proto:191:32
+desc_test_complex.proto:191:33
+
+
+ > message_type[7] > field[7]:
+desc_test_complex.proto:192:9
+desc_test_complex.proto:192:34
+
+
+ > message_type[7] > field[7] > label:
+desc_test_complex.proto:192:9
+desc_test_complex.proto:192:17
+
+
+ > message_type[7] > field[7] > type:
+desc_test_complex.proto:192:18
+desc_test_complex.proto:192:23
+
+
+ > message_type[7] > field[7] > name:
+desc_test_complex.proto:192:24
+desc_test_complex.proto:192:29
+
+
+ > message_type[7] > field[7] > number:
+desc_test_complex.proto:192:32
+desc_test_complex.proto:192:33
+
+
+ > message_type[7] > field[8]:
+desc_test_complex.proto:193:9
+desc_test_complex.proto:193:34
+
+
+ > message_type[7] > field[8] > label:
+desc_test_complex.proto:193:9
+desc_test_complex.proto:193:17
+
+
+ > message_type[7] > field[8] > type:
+desc_test_complex.proto:193:18
+desc_test_complex.proto:193:23
+
+
+ > message_type[7] > field[8] > name:
+desc_test_complex.proto:193:24
+desc_test_complex.proto:193:29
+
+
+ > message_type[7] > field[8] > number:
+desc_test_complex.proto:193:32
+desc_test_complex.proto:193:33
+
+
+ > message_type[7] > field[9]:
+desc_test_complex.proto:194:9
+desc_test_complex.proto:194:37
+
+
+ > message_type[7] > field[9] > label:
+desc_test_complex.proto:194:9
+desc_test_complex.proto:194:17
+
+
+ > message_type[7] > field[9] > type:
+desc_test_complex.proto:194:18
+desc_test_complex.proto:194:24
+
+
+ > message_type[7] > field[9] > name:
+desc_test_complex.proto:194:25
+desc_test_complex.proto:194:31
+
+
+ > message_type[7] > field[9] > number:
+desc_test_complex.proto:194:34
+desc_test_complex.proto:194:36
+
+
+ > message_type[7] > field[10]:
+desc_test_complex.proto:195:9
+desc_test_complex.proto:195:37
+
+
+ > message_type[7] > field[10] > label:
+desc_test_complex.proto:195:9
+desc_test_complex.proto:195:17
+
+
+ > message_type[7] > field[10] > type:
+desc_test_complex.proto:195:18
+desc_test_complex.proto:195:24
+
+
+ > message_type[7] > field[10] > name:
+desc_test_complex.proto:195:25
+desc_test_complex.proto:195:31
+
+
+ > message_type[7] > field[10] > number:
+desc_test_complex.proto:195:34
+desc_test_complex.proto:195:36
+
+
+ > message_type[7] > field[11]:
+desc_test_complex.proto:196:9
+desc_test_complex.proto:196:37
+
+
+ > message_type[7] > field[11] > label:
+desc_test_complex.proto:196:9
+desc_test_complex.proto:196:17
+
+
+ > message_type[7] > field[11] > type:
+desc_test_complex.proto:196:18
+desc_test_complex.proto:196:24
+
+
+ > message_type[7] > field[11] > name:
+desc_test_complex.proto:196:25
+desc_test_complex.proto:196:31
+
+
+ > message_type[7] > field[11] > number:
+desc_test_complex.proto:196:34
+desc_test_complex.proto:196:36
+
+
+ > message_type[7] > field[12]:
+desc_test_complex.proto:197:9
+desc_test_complex.proto:197:37
+
+
+ > message_type[7] > field[12] > label:
+desc_test_complex.proto:197:9
+desc_test_complex.proto:197:17
+
+
+ > message_type[7] > field[12] > type:
+desc_test_complex.proto:197:18
+desc_test_complex.proto:197:24
+
+
+ > message_type[7] > field[12] > name:
+desc_test_complex.proto:197:25
+desc_test_complex.proto:197:31
+
+
+ > message_type[7] > field[12] > number:
+desc_test_complex.proto:197:34
+desc_test_complex.proto:197:36
+
+
+ > message_type[7] > field[13]:
+desc_test_complex.proto:198:9
+desc_test_complex.proto:198:39
+
+
+ > message_type[7] > field[13] > label:
+desc_test_complex.proto:198:9
+desc_test_complex.proto:198:17
+
+
+ > message_type[7] > field[13] > type:
+desc_test_complex.proto:198:18
+desc_test_complex.proto:198:25
+
+
+ > message_type[7] > field[13] > name:
+desc_test_complex.proto:198:26
+desc_test_complex.proto:198:33
+
+
+ > message_type[7] > field[13] > number:
+desc_test_complex.proto:198:36
+desc_test_complex.proto:198:38
+
+
+ > message_type[7] > field[14]:
+desc_test_complex.proto:199:9
+desc_test_complex.proto:199:39
+
+
+ > message_type[7] > field[14] > label:
+desc_test_complex.proto:199:9
+desc_test_complex.proto:199:17
+
+
+ > message_type[7] > field[14] > type:
+desc_test_complex.proto:199:18
+desc_test_complex.proto:199:25
+
+
+ > message_type[7] > field[14] > name:
+desc_test_complex.proto:199:26
+desc_test_complex.proto:199:33
+
+
+ > message_type[7] > field[14] > number:
+desc_test_complex.proto:199:36
+desc_test_complex.proto:199:38
+
+
+ > message_type[7] > field[15]:
+desc_test_complex.proto:200:9
+desc_test_complex.proto:200:41
+
+
+ > message_type[7] > field[15] > label:
+desc_test_complex.proto:200:9
+desc_test_complex.proto:200:17
+
+
+ > message_type[7] > field[15] > type:
+desc_test_complex.proto:200:18
+desc_test_complex.proto:200:26
+
+
+ > message_type[7] > field[15] > name:
+desc_test_complex.proto:200:27
+desc_test_complex.proto:200:35
+
+
+ > message_type[7] > field[15] > number:
+desc_test_complex.proto:200:38
+desc_test_complex.proto:200:40
+
+
+ > message_type[7] > field[16]:
+desc_test_complex.proto:201:9
+desc_test_complex.proto:201:41
+
+
+ > message_type[7] > field[16] > label:
+desc_test_complex.proto:201:9
+desc_test_complex.proto:201:17
+
+
+ > message_type[7] > field[16] > type:
+desc_test_complex.proto:201:18
+desc_test_complex.proto:201:26
+
+
+ > message_type[7] > field[16] > name:
+desc_test_complex.proto:201:27
+desc_test_complex.proto:201:35
+
+
+ > message_type[7] > field[16] > number:
+desc_test_complex.proto:201:38
+desc_test_complex.proto:201:40
+
+
+ > message_type[7] > field[17]:
+desc_test_complex.proto:202:9
+desc_test_complex.proto:202:33
+
+
+ > message_type[7] > field[17] > label:
+desc_test_complex.proto:202:9
+desc_test_complex.proto:202:17
+
+
+ > message_type[7] > field[17] > type:
+desc_test_complex.proto:202:18
+desc_test_complex.proto:202:22
+
+
+ > message_type[7] > field[17] > name:
+desc_test_complex.proto:202:23
+desc_test_complex.proto:202:27
+
+
+ > message_type[7] > field[17] > number:
+desc_test_complex.proto:202:30
+desc_test_complex.proto:202:32
+
+
+ > message_type[7] > field[18]:
+desc_test_complex.proto:203:9
+desc_test_complex.proto:203:35
+
+
+ > message_type[7] > field[18] > label:
+desc_test_complex.proto:203:9
+desc_test_complex.proto:203:17
+
+
+ > message_type[7] > field[18] > type:
+desc_test_complex.proto:203:18
+desc_test_complex.proto:203:23
+
+
+ > message_type[7] > field[18] > name:
+desc_test_complex.proto:203:24
+desc_test_complex.proto:203:29
+
+
+ > message_type[7] > field[18] > number:
+desc_test_complex.proto:203:32
+desc_test_complex.proto:203:34
+
+
+ > message_type[7] > field[19]:
+desc_test_complex.proto:204:9
+desc_test_complex.proto:204:37
+
+
+ > message_type[7] > field[19] > label:
+desc_test_complex.proto:204:9
+desc_test_complex.proto:204:17
+
+
+ > message_type[7] > field[19] > type:
+desc_test_complex.proto:204:18
+desc_test_complex.proto:204:24
+
+
+ > message_type[7] > field[19] > name:
+desc_test_complex.proto:204:25
+desc_test_complex.proto:204:31
+
+
+ > message_type[7] > field[19] > number:
+desc_test_complex.proto:204:34
+desc_test_complex.proto:204:36
+
+
+ > message_type[7] > field[20]:
+desc_test_complex.proto:205:9
+desc_test_complex.proto:205:37
+
+
+ > message_type[7] > field[20] > label:
+desc_test_complex.proto:205:9
+desc_test_complex.proto:205:17
+
+
+ > message_type[7] > field[20] > type:
+desc_test_complex.proto:205:18
+desc_test_complex.proto:205:22
+
+
+ > message_type[7] > field[20] > name:
+desc_test_complex.proto:205:23
+desc_test_complex.proto:205:31
+
+
+ > message_type[7] > field[20] > number:
+desc_test_complex.proto:205:34
+desc_test_complex.proto:205:36
+
+
+ > message_type[7] > field[21]:
+desc_test_complex.proto:206:9
+desc_test_complex.proto:206:37
+
+
+ > message_type[7] > field[21] > label:
+desc_test_complex.proto:206:9
+desc_test_complex.proto:206:17
+
+
+ > message_type[7] > field[21] > type:
+desc_test_complex.proto:206:18
+desc_test_complex.proto:206:22
+
+
+ > message_type[7] > field[21] > name:
+desc_test_complex.proto:206:23
+desc_test_complex.proto:206:31
+
+
+ > message_type[7] > field[21] > number:
+desc_test_complex.proto:206:34
+desc_test_complex.proto:206:36
+
+
+ > message_type[7] > field[22]:
+desc_test_complex.proto:207:9
+desc_test_complex.proto:207:37
+
+
+ > message_type[7] > field[22] > label:
+desc_test_complex.proto:207:9
+desc_test_complex.proto:207:17
+
+
+ > message_type[7] > field[22] > type:
+desc_test_complex.proto:207:18
+desc_test_complex.proto:207:22
+
+
+ > message_type[7] > field[22] > name:
+desc_test_complex.proto:207:23
+desc_test_complex.proto:207:31
+
+
+ > message_type[7] > field[22] > number:
+desc_test_complex.proto:207:34
+desc_test_complex.proto:207:36
+
+
+ > message_type[7] > field[23]:
+desc_test_complex.proto:208:9
+desc_test_complex.proto:208:36
+
+
+ > message_type[7] > field[23] > label:
+desc_test_complex.proto:208:9
+desc_test_complex.proto:208:17
+
+
+ > message_type[7] > field[23] > type:
+desc_test_complex.proto:208:18
+desc_test_complex.proto:208:22
+
+
+ > message_type[7] > field[23] > name:
+desc_test_complex.proto:208:23
+desc_test_complex.proto:208:30
+
+
+ > message_type[7] > field[23] > number:
+desc_test_complex.proto:208:33
+desc_test_complex.proto:208:35
+
+
+ > message_type[7] > field[24]:
+desc_test_complex.proto:209:9
+desc_test_complex.proto:209:33
+
+
+ > message_type[7] > field[24] > label:
+desc_test_complex.proto:209:9
+desc_test_complex.proto:209:17
+
+
+ > message_type[7] > field[24] > type:
+desc_test_complex.proto:209:18
+desc_test_complex.proto:209:22
+
+
+ > message_type[7] > field[24] > name:
+desc_test_complex.proto:209:23
+desc_test_complex.proto:209:27
+
+
+ > message_type[7] > field[24] > number:
+desc_test_complex.proto:209:30
+desc_test_complex.proto:209:32
+
+
+ > message_type[7] > field[25]:
+desc_test_complex.proto:210:9
+desc_test_complex.proto:210:36
+
+
+ > message_type[7] > field[25] > label:
+desc_test_complex.proto:210:9
+desc_test_complex.proto:210:17
+
+
+ > message_type[7] > field[25] > type:
+desc_test_complex.proto:210:18
+desc_test_complex.proto:210:22
+
+
+ > message_type[7] > field[25] > name:
+desc_test_complex.proto:210:23
+desc_test_complex.proto:210:30
+
+
+ > message_type[7] > field[25] > number:
+desc_test_complex.proto:210:33
+desc_test_complex.proto:210:35
+
+
+ > message_type[7] > field[26]:
+desc_test_complex.proto:211:9
+desc_test_complex.proto:211:32
+
+
+ > message_type[7] > field[26] > label:
+desc_test_complex.proto:211:9
+desc_test_complex.proto:211:17
+
+
+ > message_type[7] > field[26] > type:
+desc_test_complex.proto:211:18
+desc_test_complex.proto:211:22
+
+
+ > message_type[7] > field[26] > name:
+desc_test_complex.proto:211:23
+desc_test_complex.proto:211:26
+
+
+ > message_type[7] > field[26] > number:
+desc_test_complex.proto:211:29
+desc_test_complex.proto:211:31
+
+
+ > message_type[7] > field[27]:
+desc_test_complex.proto:212:9
+desc_test_complex.proto:212:35
+
+
+ > message_type[7] > field[27] > label:
+desc_test_complex.proto:212:9
+desc_test_complex.proto:212:17
+
+
+ > message_type[7] > field[27] > type:
+desc_test_complex.proto:212:18
+desc_test_complex.proto:212:22
+
+
+ > message_type[7] > field[27] > name:
+desc_test_complex.proto:212:23
+desc_test_complex.proto:212:29
+
+
+ > message_type[7] > field[27] > number:
+desc_test_complex.proto:212:32
+desc_test_complex.proto:212:34
+
+
+ > message_type[7] > field[28]:
+desc_test_complex.proto:213:9
+desc_test_complex.proto:213:35
+
+
+ > message_type[7] > field[28] > label:
+desc_test_complex.proto:213:9
+desc_test_complex.proto:213:17
+
+
+ > message_type[7] > field[28] > type:
+desc_test_complex.proto:213:18
+desc_test_complex.proto:213:22
+
+
+ > message_type[7] > field[28] > name:
+desc_test_complex.proto:213:23
+desc_test_complex.proto:213:29
+
+
+ > message_type[7] > field[28] > number:
+desc_test_complex.proto:213:32
+desc_test_complex.proto:213:34
+
+
+ > message_type[7] > field[29]:
+desc_test_complex.proto:214:9
+desc_test_complex.proto:214:39
+
+
+ > message_type[7] > field[29] > label:
+desc_test_complex.proto:214:9
+desc_test_complex.proto:214:17
+
+
+ > message_type[7] > field[29] > type:
+desc_test_complex.proto:214:18
+desc_test_complex.proto:214:22
+
+
+ > message_type[7] > field[29] > name:
+desc_test_complex.proto:214:23
+desc_test_complex.proto:214:33
+
+
+ > message_type[7] > field[29] > number:
+desc_test_complex.proto:214:36
+desc_test_complex.proto:214:38
+
+
+ > message_type[7] > field[30]:
+desc_test_complex.proto:215:9
+desc_test_complex.proto:215:37
+
+
+ > message_type[7] > field[30] > label:
+desc_test_complex.proto:215:9
+desc_test_complex.proto:215:17
+
+
+ > message_type[7] > field[30] > type:
+desc_test_complex.proto:215:18
+desc_test_complex.proto:215:22
+
+
+ > message_type[7] > field[30] > name:
+desc_test_complex.proto:215:23
+desc_test_complex.proto:215:31
+
+
+ > message_type[7] > field[30] > number:
+desc_test_complex.proto:215:34
+desc_test_complex.proto:215:36
+
+
+ > message_type[7] > field[31]:
+desc_test_complex.proto:216:9
+desc_test_complex.proto:216:31
+
+
+ > message_type[7] > field[31] > label:
+desc_test_complex.proto:216:9
+desc_test_complex.proto:216:17
+
+
+ > message_type[7] > field[31] > type:
+desc_test_complex.proto:216:18
+desc_test_complex.proto:216:22
+
+
+ > message_type[7] > field[31] > name:
+desc_test_complex.proto:216:23
+desc_test_complex.proto:216:25
+
+
+ > message_type[7] > field[31] > number:
+desc_test_complex.proto:216:28
+desc_test_complex.proto:216:30
+
+
+ > message_type[7] > field[32]:
+desc_test_complex.proto:217:9
+desc_test_complex.proto:217:34
+
+
+ > message_type[7] > field[32] > label:
+desc_test_complex.proto:217:9
+desc_test_complex.proto:217:17
+
+
+ > message_type[7] > field[32] > type:
+desc_test_complex.proto:217:18
+desc_test_complex.proto:217:23
+
+
+ > message_type[7] > field[32] > name:
+desc_test_complex.proto:217:24
+desc_test_complex.proto:217:28
+
+
+ > message_type[7] > field[32] > number:
+desc_test_complex.proto:217:31
+desc_test_complex.proto:217:33
+
+
+ > message_type[7] > field[33]:
+desc_test_complex.proto:218:9
+desc_test_complex.proto:218:35
+
+
+ > message_type[7] > field[33] > label:
+desc_test_complex.proto:218:9
+desc_test_complex.proto:218:17
+
+
+ > message_type[7] > field[33] > type:
+desc_test_complex.proto:218:18
+desc_test_complex.proto:218:23
+
+
+ > message_type[7] > field[33] > name:
+desc_test_complex.proto:218:24
+desc_test_complex.proto:218:29
+
+
+ > message_type[7] > field[33] > number:
+desc_test_complex.proto:218:32
+desc_test_complex.proto:218:34
+
+
+ > message_type[7] > field[34]:
+desc_test_complex.proto:219:9
+desc_test_complex.proto:219:37
+
+
+ > message_type[7] > field[34] > label:
+desc_test_complex.proto:219:9
+desc_test_complex.proto:219:17
+
+
+ > message_type[7] > field[34] > type:
+desc_test_complex.proto:219:18
+desc_test_complex.proto:219:23
+
+
+ > message_type[7] > field[34] > name:
+desc_test_complex.proto:219:24
+desc_test_complex.proto:219:31
+
+
+ > message_type[7] > field[34] > number:
+desc_test_complex.proto:219:34
+desc_test_complex.proto:219:36
+
+
+ > extension:
+desc_test_complex.proto:222:1
+desc_test_complex.proto:259:2
+
+
+ > extension[6]:
+desc_test_complex.proto:223:9
+desc_test_complex.proto:223:38
+
+
+ > extension[6] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[6] > label:
+desc_test_complex.proto:223:9
+desc_test_complex.proto:223:17
+
+
+ > extension[6] > type:
+desc_test_complex.proto:223:18
+desc_test_complex.proto:223:22
+
+
+ > extension[6] > name:
+desc_test_complex.proto:223:23
+desc_test_complex.proto:223:29
+
+
+ > extension[6] > number:
+desc_test_complex.proto:223:32
+desc_test_complex.proto:223:37
+
+
+ > extension[7]:
+desc_test_complex.proto:224:9
+desc_test_complex.proto:224:38
+
+
+ > extension[7] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[7] > label:
+desc_test_complex.proto:224:9
+desc_test_complex.proto:224:17
+
+
+ > extension[7] > type:
+desc_test_complex.proto:224:18
+desc_test_complex.proto:224:22
+
+
+ > extension[7] > name:
+desc_test_complex.proto:224:23
+desc_test_complex.proto:224:29
+
+
+ > extension[7] > number:
+desc_test_complex.proto:224:32
+desc_test_complex.proto:224:37
+
+
+ > extension[8]:
+desc_test_complex.proto:225:9
+desc_test_complex.proto:225:38
+
+
+ > extension[8] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[8] > label:
+desc_test_complex.proto:225:9
+desc_test_complex.proto:225:17
+
+
+ > extension[8] > type:
+desc_test_complex.proto:225:18
+desc_test_complex.proto:225:22
+
+
+ > extension[8] > name:
+desc_test_complex.proto:225:23
+desc_test_complex.proto:225:29
+
+
+ > extension[8] > number:
+desc_test_complex.proto:225:32
+desc_test_complex.proto:225:37
+
+
+ > extension[9]:
+desc_test_complex.proto:226:9
+desc_test_complex.proto:226:36
+
+
+ > extension[9] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[9] > label:
+desc_test_complex.proto:226:9
+desc_test_complex.proto:226:17
+
+
+ > extension[9] > type:
+desc_test_complex.proto:226:18
+desc_test_complex.proto:226:22
+
+
+ > extension[9] > name:
+desc_test_complex.proto:226:23
+desc_test_complex.proto:226:27
+
+
+ > extension[9] > number:
+desc_test_complex.proto:226:30
+desc_test_complex.proto:226:35
+
+
+ > extension[10]:
+desc_test_complex.proto:227:9
+desc_test_complex.proto:227:39
+
+
+ > extension[10] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[10] > label:
+desc_test_complex.proto:227:9
+desc_test_complex.proto:227:17
+
+
+ > extension[10] > type:
+desc_test_complex.proto:227:18
+desc_test_complex.proto:227:22
+
+
+ > extension[10] > name:
+desc_test_complex.proto:227:23
+desc_test_complex.proto:227:30
+
+
+ > extension[10] > number:
+desc_test_complex.proto:227:33
+desc_test_complex.proto:227:38
+
+
+ > extension[11]:
+desc_test_complex.proto:228:9
+desc_test_complex.proto:228:40
+
+
+ > extension[11] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[11] > label:
+desc_test_complex.proto:228:9
+desc_test_complex.proto:228:17
+
+
+ > extension[11] > type:
+desc_test_complex.proto:228:18
+desc_test_complex.proto:228:24
+
+
+ > extension[11] > name:
+desc_test_complex.proto:228:25
+desc_test_complex.proto:228:31
+
+
+ > extension[11] > number:
+desc_test_complex.proto:228:34
+desc_test_complex.proto:228:39
+
+
+ > extension[12]:
+desc_test_complex.proto:229:9
+desc_test_complex.proto:229:38
+
+
+ > extension[12] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[12] > label:
+desc_test_complex.proto:229:9
+desc_test_complex.proto:229:17
+
+
+ > extension[12] > type:
+desc_test_complex.proto:229:18
+desc_test_complex.proto:229:23
+
+
+ > extension[12] > name:
+desc_test_complex.proto:229:24
+desc_test_complex.proto:229:29
+
+
+ > extension[12] > number:
+desc_test_complex.proto:229:32
+desc_test_complex.proto:229:37
+
+
+ > extension[13]:
+desc_test_complex.proto:230:9
+desc_test_complex.proto:230:38
+
+
+ > extension[13] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[13] > label:
+desc_test_complex.proto:230:9
+desc_test_complex.proto:230:17
+
+
+ > extension[13] > type:
+desc_test_complex.proto:230:18
+desc_test_complex.proto:230:23
+
+
+ > extension[13] > name:
+desc_test_complex.proto:230:24
+desc_test_complex.proto:230:29
+
+
+ > extension[13] > number:
+desc_test_complex.proto:230:32
+desc_test_complex.proto:230:37
+
+
+ > extension[14]:
+desc_test_complex.proto:231:9
+desc_test_complex.proto:231:38
+
+
+ > extension[14] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[14] > label:
+desc_test_complex.proto:231:9
+desc_test_complex.proto:231:17
+
+
+ > extension[14] > type:
+desc_test_complex.proto:231:18
+desc_test_complex.proto:231:23
+
+
+ > extension[14] > name:
+desc_test_complex.proto:231:24
+desc_test_complex.proto:231:29
+
+
+ > extension[14] > number:
+desc_test_complex.proto:231:32
+desc_test_complex.proto:231:37
+
+
+ > extension[15]:
+desc_test_complex.proto:232:9
+desc_test_complex.proto:232:40
+
+
+ > extension[15] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[15] > label:
+desc_test_complex.proto:232:9
+desc_test_complex.proto:232:17
+
+
+ > extension[15] > type:
+desc_test_complex.proto:232:18
+desc_test_complex.proto:232:24
+
+
+ > extension[15] > name:
+desc_test_complex.proto:232:25
+desc_test_complex.proto:232:31
+
+
+ > extension[15] > number:
+desc_test_complex.proto:232:34
+desc_test_complex.proto:232:39
+
+
+ > extension[16]:
+desc_test_complex.proto:233:9
+desc_test_complex.proto:233:40
+
+
+ > extension[16] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[16] > label:
+desc_test_complex.proto:233:9
+desc_test_complex.proto:233:17
+
+
+ > extension[16] > type:
+desc_test_complex.proto:233:18
+desc_test_complex.proto:233:24
+
+
+ > extension[16] > name:
+desc_test_complex.proto:233:25
+desc_test_complex.proto:233:31
+
+
+ > extension[16] > number:
+desc_test_complex.proto:233:34
+desc_test_complex.proto:233:39
+
+
+ > extension[17]:
+desc_test_complex.proto:234:9
+desc_test_complex.proto:234:40
+
+
+ > extension[17] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[17] > label:
+desc_test_complex.proto:234:9
+desc_test_complex.proto:234:17
+
+
+ > extension[17] > type:
+desc_test_complex.proto:234:18
+desc_test_complex.proto:234:24
+
+
+ > extension[17] > name:
+desc_test_complex.proto:234:25
+desc_test_complex.proto:234:31
+
+
+ > extension[17] > number:
+desc_test_complex.proto:234:34
+desc_test_complex.proto:234:39
+
+
+ > extension[18]:
+desc_test_complex.proto:235:9
+desc_test_complex.proto:235:40
+
+
+ > extension[18] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[18] > label:
+desc_test_complex.proto:235:9
+desc_test_complex.proto:235:17
+
+
+ > extension[18] > type:
+desc_test_complex.proto:235:18
+desc_test_complex.proto:235:24
+
+
+ > extension[18] > name:
+desc_test_complex.proto:235:25
+desc_test_complex.proto:235:31
+
+
+ > extension[18] > number:
+desc_test_complex.proto:235:34
+desc_test_complex.proto:235:39
+
+
+ > extension[19]:
+desc_test_complex.proto:236:9
+desc_test_complex.proto:236:42
+
+
+ > extension[19] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[19] > label:
+desc_test_complex.proto:236:9
+desc_test_complex.proto:236:17
+
+
+ > extension[19] > type:
+desc_test_complex.proto:236:18
+desc_test_complex.proto:236:25
+
+
+ > extension[19] > name:
+desc_test_complex.proto:236:26
+desc_test_complex.proto:236:33
+
+
+ > extension[19] > number:
+desc_test_complex.proto:236:36
+desc_test_complex.proto:236:41
+
+
+ > extension[20]:
+desc_test_complex.proto:237:9
+desc_test_complex.proto:237:42
+
+
+ > extension[20] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[20] > label:
+desc_test_complex.proto:237:9
+desc_test_complex.proto:237:17
+
+
+ > extension[20] > type:
+desc_test_complex.proto:237:18
+desc_test_complex.proto:237:25
+
+
+ > extension[20] > name:
+desc_test_complex.proto:237:26
+desc_test_complex.proto:237:33
+
+
+ > extension[20] > number:
+desc_test_complex.proto:237:36
+desc_test_complex.proto:237:41
+
+
+ > extension[21]:
+desc_test_complex.proto:238:9
+desc_test_complex.proto:238:44
+
+
+ > extension[21] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[21] > label:
+desc_test_complex.proto:238:9
+desc_test_complex.proto:238:17
+
+
+ > extension[21] > type:
+desc_test_complex.proto:238:18
+desc_test_complex.proto:238:26
+
+
+ > extension[21] > name:
+desc_test_complex.proto:238:27
+desc_test_complex.proto:238:35
+
+
+ > extension[21] > number:
+desc_test_complex.proto:238:38
+desc_test_complex.proto:238:43
+
+
+ > extension[22]:
+desc_test_complex.proto:239:9
+desc_test_complex.proto:239:44
+
+
+ > extension[22] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[22] > label:
+desc_test_complex.proto:239:9
+desc_test_complex.proto:239:17
+
+
+ > extension[22] > type:
+desc_test_complex.proto:239:18
+desc_test_complex.proto:239:26
+
+
+ > extension[22] > name:
+desc_test_complex.proto:239:27
+desc_test_complex.proto:239:35
+
+
+ > extension[22] > number:
+desc_test_complex.proto:239:38
+desc_test_complex.proto:239:43
+
+
+ > extension[23]:
+desc_test_complex.proto:240:9
+desc_test_complex.proto:240:36
+
+
+ > extension[23] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[23] > label:
+desc_test_complex.proto:240:9
+desc_test_complex.proto:240:17
+
+
+ > extension[23] > type:
+desc_test_complex.proto:240:18
+desc_test_complex.proto:240:22
+
+
+ > extension[23] > name:
+desc_test_complex.proto:240:23
+desc_test_complex.proto:240:27
+
+
+ > extension[23] > number:
+desc_test_complex.proto:240:30
+desc_test_complex.proto:240:35
+
+
+ > extension[24]:
+desc_test_complex.proto:241:9
+desc_test_complex.proto:241:38
+
+
+ > extension[24] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[24] > label:
+desc_test_complex.proto:241:9
+desc_test_complex.proto:241:17
+
+
+ > extension[24] > type:
+desc_test_complex.proto:241:18
+desc_test_complex.proto:241:23
+
+
+ > extension[24] > name:
+desc_test_complex.proto:241:24
+desc_test_complex.proto:241:29
+
+
+ > extension[24] > number:
+desc_test_complex.proto:241:32
+desc_test_complex.proto:241:37
+
+
+ > extension[25]:
+desc_test_complex.proto:242:9
+desc_test_complex.proto:242:40
+
+
+ > extension[25] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[25] > label:
+desc_test_complex.proto:242:9
+desc_test_complex.proto:242:17
+
+
+ > extension[25] > type:
+desc_test_complex.proto:242:18
+desc_test_complex.proto:242:24
+
+
+ > extension[25] > name:
+desc_test_complex.proto:242:25
+desc_test_complex.proto:242:31
+
+
+ > extension[25] > number:
+desc_test_complex.proto:242:34
+desc_test_complex.proto:242:39
+
+
+ > extension[26]:
+desc_test_complex.proto:243:9
+desc_test_complex.proto:243:40
+
+
+ > extension[26] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[26] > label:
+desc_test_complex.proto:243:9
+desc_test_complex.proto:243:17
+
+
+ > extension[26] > type:
+desc_test_complex.proto:243:18
+desc_test_complex.proto:243:22
+
+
+ > extension[26] > name:
+desc_test_complex.proto:243:23
+desc_test_complex.proto:243:31
+
+
+ > extension[26] > number:
+desc_test_complex.proto:243:34
+desc_test_complex.proto:243:39
+
+
+ > extension[27]:
+desc_test_complex.proto:244:9
+desc_test_complex.proto:244:40
+
+
+ > extension[27] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[27] > label:
+desc_test_complex.proto:244:9
+desc_test_complex.proto:244:17
+
+
+ > extension[27] > type:
+desc_test_complex.proto:244:18
+desc_test_complex.proto:244:22
+
+
+ > extension[27] > name:
+desc_test_complex.proto:244:23
+desc_test_complex.proto:244:31
+
+
+ > extension[27] > number:
+desc_test_complex.proto:244:34
+desc_test_complex.proto:244:39
+
+
+ > extension[28]:
+desc_test_complex.proto:245:9
+desc_test_complex.proto:245:40
+
+
+ > extension[28] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[28] > label:
+desc_test_complex.proto:245:9
+desc_test_complex.proto:245:17
+
+
+ > extension[28] > type:
+desc_test_complex.proto:245:18
+desc_test_complex.proto:245:22
+
+
+ > extension[28] > name:
+desc_test_complex.proto:245:23
+desc_test_complex.proto:245:31
+
+
+ > extension[28] > number:
+desc_test_complex.proto:245:34
+desc_test_complex.proto:245:39
+
+
+ > extension[29]:
+desc_test_complex.proto:246:9
+desc_test_complex.proto:246:39
+
+
+ > extension[29] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[29] > label:
+desc_test_complex.proto:246:9
+desc_test_complex.proto:246:17
+
+
+ > extension[29] > type:
+desc_test_complex.proto:246:18
+desc_test_complex.proto:246:22
+
+
+ > extension[29] > name:
+desc_test_complex.proto:246:23
+desc_test_complex.proto:246:30
+
+
+ > extension[29] > number:
+desc_test_complex.proto:246:33
+desc_test_complex.proto:246:38
+
+
+ > extension[30]:
+desc_test_complex.proto:247:9
+desc_test_complex.proto:247:36
+
+
+ > extension[30] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[30] > label:
+desc_test_complex.proto:247:9
+desc_test_complex.proto:247:17
+
+
+ > extension[30] > type:
+desc_test_complex.proto:247:18
+desc_test_complex.proto:247:22
+
+
+ > extension[30] > name:
+desc_test_complex.proto:247:23
+desc_test_complex.proto:247:27
+
+
+ > extension[30] > number:
+desc_test_complex.proto:247:30
+desc_test_complex.proto:247:35
+
+
+ > extension[31]:
+desc_test_complex.proto:248:9
+desc_test_complex.proto:248:39
+
+
+ > extension[31] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[31] > label:
+desc_test_complex.proto:248:9
+desc_test_complex.proto:248:17
+
+
+ > extension[31] > type:
+desc_test_complex.proto:248:18
+desc_test_complex.proto:248:22
+
+
+ > extension[31] > name:
+desc_test_complex.proto:248:23
+desc_test_complex.proto:248:30
+
+
+ > extension[31] > number:
+desc_test_complex.proto:248:33
+desc_test_complex.proto:248:38
+
+
+ > extension[32]:
+desc_test_complex.proto:249:9
+desc_test_complex.proto:249:35
+
+
+ > extension[32] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[32] > label:
+desc_test_complex.proto:249:9
+desc_test_complex.proto:249:17
+
+
+ > extension[32] > type:
+desc_test_complex.proto:249:18
+desc_test_complex.proto:249:22
+
+
+ > extension[32] > name:
+desc_test_complex.proto:249:23
+desc_test_complex.proto:249:26
+
+
+ > extension[32] > number:
+desc_test_complex.proto:249:29
+desc_test_complex.proto:249:34
+
+
+ > extension[33]:
+desc_test_complex.proto:250:9
+desc_test_complex.proto:250:38
+
+
+ > extension[33] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[33] > label:
+desc_test_complex.proto:250:9
+desc_test_complex.proto:250:17
+
+
+ > extension[33] > type:
+desc_test_complex.proto:250:18
+desc_test_complex.proto:250:22
+
+
+ > extension[33] > name:
+desc_test_complex.proto:250:23
+desc_test_complex.proto:250:29
+
+
+ > extension[33] > number:
+desc_test_complex.proto:250:32
+desc_test_complex.proto:250:37
+
+
+ > extension[34]:
+desc_test_complex.proto:251:9
+desc_test_complex.proto:251:38
+
+
+ > extension[34] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[34] > label:
+desc_test_complex.proto:251:9
+desc_test_complex.proto:251:17
+
+
+ > extension[34] > type:
+desc_test_complex.proto:251:18
+desc_test_complex.proto:251:22
+
+
+ > extension[34] > name:
+desc_test_complex.proto:251:23
+desc_test_complex.proto:251:29
+
+
+ > extension[34] > number:
+desc_test_complex.proto:251:32
+desc_test_complex.proto:251:37
+
+
+ > extension[35]:
+desc_test_complex.proto:252:9
+desc_test_complex.proto:252:42
+
+
+ > extension[35] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[35] > label:
+desc_test_complex.proto:252:9
+desc_test_complex.proto:252:17
+
+
+ > extension[35] > type:
+desc_test_complex.proto:252:18
+desc_test_complex.proto:252:22
+
+
+ > extension[35] > name:
+desc_test_complex.proto:252:23
+desc_test_complex.proto:252:33
+
+
+ > extension[35] > number:
+desc_test_complex.proto:252:36
+desc_test_complex.proto:252:41
+
+
+ > extension[36]:
+desc_test_complex.proto:253:9
+desc_test_complex.proto:253:40
+
+
+ > extension[36] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[36] > label:
+desc_test_complex.proto:253:9
+desc_test_complex.proto:253:17
+
+
+ > extension[36] > type:
+desc_test_complex.proto:253:18
+desc_test_complex.proto:253:22
+
+
+ > extension[36] > name:
+desc_test_complex.proto:253:23
+desc_test_complex.proto:253:31
+
+
+ > extension[36] > number:
+desc_test_complex.proto:253:34
+desc_test_complex.proto:253:39
+
+
+ > extension[37]:
+desc_test_complex.proto:254:9
+desc_test_complex.proto:254:34
+
+
+ > extension[37] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[37] > label:
+desc_test_complex.proto:254:9
+desc_test_complex.proto:254:17
+
+
+ > extension[37] > type:
+desc_test_complex.proto:254:18
+desc_test_complex.proto:254:22
+
+
+ > extension[37] > name:
+desc_test_complex.proto:254:23
+desc_test_complex.proto:254:25
+
+
+ > extension[37] > number:
+desc_test_complex.proto:254:28
+desc_test_complex.proto:254:33
+
+
+ > extension[38]:
+desc_test_complex.proto:255:9
+desc_test_complex.proto:255:37
+
+
+ > extension[38] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[38] > label:
+desc_test_complex.proto:255:9
+desc_test_complex.proto:255:17
+
+
+ > extension[38] > type:
+desc_test_complex.proto:255:18
+desc_test_complex.proto:255:23
+
+
+ > extension[38] > name:
+desc_test_complex.proto:255:24
+desc_test_complex.proto:255:28
+
+
+ > extension[38] > number:
+desc_test_complex.proto:255:31
+desc_test_complex.proto:255:36
+
+
+ > extension[39]:
+desc_test_complex.proto:256:9
+desc_test_complex.proto:256:38
+
+
+ > extension[39] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[39] > label:
+desc_test_complex.proto:256:9
+desc_test_complex.proto:256:17
+
+
+ > extension[39] > type:
+desc_test_complex.proto:256:18
+desc_test_complex.proto:256:23
+
+
+ > extension[39] > name:
+desc_test_complex.proto:256:24
+desc_test_complex.proto:256:29
+
+
+ > extension[39] > number:
+desc_test_complex.proto:256:32
+desc_test_complex.proto:256:37
+
+
+ > extension[40]:
+desc_test_complex.proto:257:9
+desc_test_complex.proto:257:40
+
+
+ > extension[40] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[40] > label:
+desc_test_complex.proto:257:9
+desc_test_complex.proto:257:17
+
+
+ > extension[40] > type:
+desc_test_complex.proto:257:18
+desc_test_complex.proto:257:23
+
+
+ > extension[40] > name:
+desc_test_complex.proto:257:24
+desc_test_complex.proto:257:31
+
+
+ > extension[40] > number:
+desc_test_complex.proto:257:34
+desc_test_complex.proto:257:39
+
+
+ > extension[41]:
+desc_test_complex.proto:258:9
+desc_test_complex.proto:258:49
+
+
+ > extension[41] > extendee:
+desc_test_complex.proto:222:8
+desc_test_complex.proto:222:36
+
+
+ > extension[41] > label:
+desc_test_complex.proto:258:9
+desc_test_complex.proto:258:17
+
+
+ > extension[41] > type_name:
+desc_test_complex.proto:258:18
+desc_test_complex.proto:258:35
+
+
+ > extension[41] > name:
+desc_test_complex.proto:258:36
+desc_test_complex.proto:258:40
+
+
+ > extension[41] > number:
+desc_test_complex.proto:258:43
+desc_test_complex.proto:258:48
+
+
+ > message_type[8]:
+desc_test_complex.proto:261:1
+desc_test_complex.proto:286:2
+
+
+ > message_type[8] > name:
+desc_test_complex.proto:261:9
+desc_test_complex.proto:261:32
+
+
+ > message_type[8] > field[0]:
+desc_test_complex.proto:262:9
+desc_test_complex.proto:272:11
+
+
+ > message_type[8] > field[0] > label:
+desc_test_complex.proto:262:9
+desc_test_complex.proto:262:17
+
+
+ > message_type[8] > field[0] > type:
+desc_test_complex.proto:262:18
+desc_test_complex.proto:262:24
+
+
+ > message_type[8] > field[0] > name:
+desc_test_complex.proto:262:25
+desc_test_complex.proto:262:27
+
+
+ > message_type[8] > field[0] > number:
+desc_test_complex.proto:262:30
+desc_test_complex.proto:262:31
+
+
+ > message_type[8] > field[0] > options:
+desc_test_complex.proto:262:32
+desc_test_complex.proto:272:10
+
+
+ > message_type[8] > field[0] > options > syntax:
+desc_test_complex.proto:263:17
+desc_test_complex.proto:263:32
+
+
+ > message_type[8] > field[0] > options > import:
+desc_test_complex.proto:263:34
+desc_test_complex.proto:263:49
+
+
+ > message_type[8] > field[0] > options > public:
+desc_test_complex.proto:263:51
+desc_test_complex.proto:263:66
+
+
+ > message_type[8] > field[0] > options > weak:
+desc_test_complex.proto:263:68
+desc_test_complex.proto:263:81
+
+
+ > message_type[8] > field[0] > options > package:
+desc_test_complex.proto:263:83
+desc_test_complex.proto:263:99
+
+
+ > message_type[8] > field[0] > options > string:
+desc_test_complex.proto:264:17
+desc_test_complex.proto:264:36
+
+
+ > message_type[8] > field[0] > options > bytes:
+desc_test_complex.proto:264:38
+desc_test_complex.proto:264:55
+
+
+ > message_type[8] > field[0] > options > bool:
+desc_test_complex.proto:264:57
+desc_test_complex.proto:264:70
+
+
+ > message_type[8] > field[0] > options > float:
+desc_test_complex.proto:265:17
+desc_test_complex.proto:265:31
+
+
+ > message_type[8] > field[0] > options > double:
+desc_test_complex.proto:265:33
+desc_test_complex.proto:265:51
+
+
+ > message_type[8] > field[0] > options > int32:
+desc_test_complex.proto:266:17
+desc_test_complex.proto:266:29
+
+
+ > message_type[8] > field[0] > options > int64:
+desc_test_complex.proto:266:31
+desc_test_complex.proto:266:43
+
+
+ > message_type[8] > field[0] > options > uint32:
+desc_test_complex.proto:266:45
+desc_test_complex.proto:266:60
+
+
+ > message_type[8] > field[0] > options > uint64:
+desc_test_complex.proto:266:62
+desc_test_complex.proto:266:77
+
+
+ > message_type[8] > field[0] > options > sint32:
+desc_test_complex.proto:266:79
+desc_test_complex.proto:266:93
+
+
+ > message_type[8] > field[0] > options > sint64:
+desc_test_complex.proto:266:95
+desc_test_complex.proto:266:109
+
+
+ > message_type[8] > field[0] > options > fixed32:
+desc_test_complex.proto:267:17
+desc_test_complex.proto:267:33
+
+
+ > message_type[8] > field[0] > options > fixed64:
+desc_test_complex.proto:267:35
+desc_test_complex.proto:267:51
+
+
+ > message_type[8] > field[0] > options > sfixed32:
+desc_test_complex.proto:267:53
+desc_test_complex.proto:267:71
+
+
+ > message_type[8] > field[0] > options > sfixed64:
+desc_test_complex.proto:267:73
+desc_test_complex.proto:267:91
+
+
+ > message_type[8] > field[0] > options > optional:
+desc_test_complex.proto:268:17
+desc_test_complex.proto:268:34
+
+
+ > message_type[8] > field[0] > options > repeated:
+desc_test_complex.proto:268:36
+desc_test_complex.proto:268:53
+
+
+ > message_type[8] > field[0] > options > required:
+desc_test_complex.proto:268:55
+desc_test_complex.proto:268:72
+
+
+ > message_type[8] > field[0] > options > message:
+desc_test_complex.proto:269:17
+desc_test_complex.proto:269:33
+
+
+ > message_type[8] > field[0] > options > enum:
+desc_test_complex.proto:269:35
+desc_test_complex.proto:269:48
+
+
+ > message_type[8] > field[0] > options > service:
+desc_test_complex.proto:269:50
+desc_test_complex.proto:269:66
+
+
+ > message_type[8] > field[0] > options > rpc:
+desc_test_complex.proto:269:68
+desc_test_complex.proto:269:80
+
+
+ > message_type[8] > field[0] > options > option:
+desc_test_complex.proto:270:17
+desc_test_complex.proto:270:32
+
+
+ > message_type[8] > field[0] > options > extend:
+desc_test_complex.proto:270:34
+desc_test_complex.proto:270:49
+
+
+ > message_type[8] > field[0] > options > extensions:
+desc_test_complex.proto:270:51
+desc_test_complex.proto:270:70
+
+
+ > message_type[8] > field[0] > options > reserved:
+desc_test_complex.proto:270:72
+desc_test_complex.proto:270:89
+
+
+ > message_type[8] > field[0] > options > to:
+desc_test_complex.proto:271:17
+desc_test_complex.proto:271:28
+
+
+ > message_type[8] > field[0] > options > true:
+desc_test_complex.proto:271:30
+desc_test_complex.proto:271:42
+
+
+ > message_type[8] > field[0] > options > false:
+desc_test_complex.proto:271:44
+desc_test_complex.proto:271:58
+
+
+ > message_type[8] > field[0] > options > default:
+desc_test_complex.proto:271:60
+desc_test_complex.proto:271:75
+
+
+ > message_type[8] > field[1]:
+desc_test_complex.proto:273:9
+desc_test_complex.proto:285:11
+
+
+ > message_type[8] > field[1] > label:
+desc_test_complex.proto:273:9
+desc_test_complex.proto:273:17
+
+
+ > message_type[8] > field[1] > type:
+desc_test_complex.proto:273:18
+desc_test_complex.proto:273:24
+
+
+ > message_type[8] > field[1] > name:
+desc_test_complex.proto:273:25
+desc_test_complex.proto:273:29
+
+
+ > message_type[8] > field[1] > number:
+desc_test_complex.proto:273:32
+desc_test_complex.proto:273:33
+
+
+ > message_type[8] > field[1] > options:
+desc_test_complex.proto:273:34
+desc_test_complex.proto:285:10
+
+
+ > message_type[8] > field[1] > options > boom:
+desc_test_complex.proto:274:17
+desc_test_complex.proto:284:18
+---- desc_test_options.proto ----
+
+
+:
+desc_test_options.proto:1:1
+desc_test_options.proto:63:2
+
+
+ > syntax:
+desc_test_options.proto:1:1
+desc_test_options.proto:1:19
+
+
+ > options:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > options > go_package:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > package:
+desc_test_options.proto:5:1
+desc_test_options.proto:5:20
+
+
+ > dependency[0]:
+desc_test_options.proto:7:1
+desc_test_options.proto:7:43
+
+
+ > extension:
+desc_test_options.proto:9:1
+desc_test_options.proto:11:2
+
+
+ > extension[0]:
+desc_test_options.proto:10:9
+desc_test_options.proto:10:38
+
+
+ > extension[0] > extendee:
+desc_test_options.proto:9:8
+desc_test_options.proto:9:38
+
+
+ > extension[0] > label:
+desc_test_options.proto:10:9
+desc_test_options.proto:10:17
+
+
+ > extension[0] > type:
+desc_test_options.proto:10:18
+desc_test_options.proto:10:22
+
+
+ > extension[0] > name:
+desc_test_options.proto:10:23
+desc_test_options.proto:10:29
+
+
+ > extension[0] > number:
+desc_test_options.proto:10:32
+desc_test_options.proto:10:37
+
+
+ > extension:
+desc_test_options.proto:13:1
+desc_test_options.proto:16:2
+
+
+ > extension[1]:
+desc_test_options.proto:14:9
+desc_test_options.proto:14:40
+
+
+ > extension[1] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[1] > label:
+desc_test_options.proto:14:9
+desc_test_options.proto:14:17
+
+
+ > extension[1] > type:
+desc_test_options.proto:14:18
+desc_test_options.proto:14:24
+
+
+ > extension[1] > name:
+desc_test_options.proto:14:25
+desc_test_options.proto:14:31
+
+
+ > extension[1] > number:
+desc_test_options.proto:14:34
+desc_test_options.proto:14:39
+
+
+ > extension[2]:
+desc_test_options.proto:15:9
+desc_test_options.proto:15:40
+
+
+ > extension[2] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[2] > label:
+desc_test_options.proto:15:9
+desc_test_options.proto:15:17
+
+
+ > extension[2] > type:
+desc_test_options.proto:15:18
+desc_test_options.proto:15:23
+
+
+ > extension[2] > name:
+desc_test_options.proto:15:24
+desc_test_options.proto:15:31
+
+
+ > extension[2] > number:
+desc_test_options.proto:15:34
+desc_test_options.proto:15:39
+
+
+ > extension:
+desc_test_options.proto:18:1
+desc_test_options.proto:24:2
+
+
+ > extension[3]:
+desc_test_options.proto:19:9
+desc_test_options.proto:19:39
+
+
+ > extension[3] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[3] > label:
+desc_test_options.proto:19:9
+desc_test_options.proto:19:17
+
+
+ > extension[3] > type:
+desc_test_options.proto:19:18
+desc_test_options.proto:19:23
+
+
+ > extension[3] > name:
+desc_test_options.proto:19:24
+desc_test_options.proto:19:30
+
+
+ > extension[3] > number:
+desc_test_options.proto:19:33
+desc_test_options.proto:19:38
+
+
+ > extension[4]:
+desc_test_options.proto:20:9
+desc_test_options.proto:20:41
+
+
+ > extension[4] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[4] > label:
+desc_test_options.proto:20:9
+desc_test_options.proto:20:17
+
+
+ > extension[4] > type:
+desc_test_options.proto:20:18
+desc_test_options.proto:20:24
+
+
+ > extension[4] > name:
+desc_test_options.proto:20:25
+desc_test_options.proto:20:32
+
+
+ > extension[4] > number:
+desc_test_options.proto:20:35
+desc_test_options.proto:20:40
+
+
+ > extension[5]:
+desc_test_options.proto:21:9
+desc_test_options.proto:21:44
+
+
+ > extension[5] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[5] > label:
+desc_test_options.proto:21:9
+desc_test_options.proto:21:17
+
+
+ > extension[5] > type:
+desc_test_options.proto:21:18
+desc_test_options.proto:21:26
+
+
+ > extension[5] > name:
+desc_test_options.proto:21:27
+desc_test_options.proto:21:35
+
+
+ > extension[5] > number:
+desc_test_options.proto:21:38
+desc_test_options.proto:21:43
+
+
+ > extension[6]:
+desc_test_options.proto:22:9
+desc_test_options.proto:22:41
+
+
+ > extension[6] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[6] > label:
+desc_test_options.proto:22:9
+desc_test_options.proto:22:17
+
+
+ > extension[6] > type:
+desc_test_options.proto:22:18
+desc_test_options.proto:22:24
+
+
+ > extension[6] > name:
+desc_test_options.proto:22:25
+desc_test_options.proto:22:32
+
+
+ > extension[6] > number:
+desc_test_options.proto:22:35
+desc_test_options.proto:22:40
+
+
+ > extension[7]:
+desc_test_options.proto:23:9
+desc_test_options.proto:23:43
+
+
+ > extension[7] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[7] > label:
+desc_test_options.proto:23:9
+desc_test_options.proto:23:17
+
+
+ > extension[7] > type:
+desc_test_options.proto:23:18
+desc_test_options.proto:23:25
+
+
+ > extension[7] > name:
+desc_test_options.proto:23:26
+desc_test_options.proto:23:34
+
+
+ > extension[7] > number:
+desc_test_options.proto:23:37
+desc_test_options.proto:23:42
+
+
+ > extension:
+desc_test_options.proto:26:1
+desc_test_options.proto:32:2
+
+
+ > extension[8]:
+desc_test_options.proto:27:9
+desc_test_options.proto:27:40
+
+
+ > extension[8] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[8] > label:
+desc_test_options.proto:27:9
+desc_test_options.proto:27:17
+
+
+ > extension[8] > type:
+desc_test_options.proto:27:18
+desc_test_options.proto:27:23
+
+
+ > extension[8] > name:
+desc_test_options.proto:27:24
+desc_test_options.proto:27:31
+
+
+ > extension[8] > number:
+desc_test_options.proto:27:34
+desc_test_options.proto:27:39
+
+
+ > extension[9]:
+desc_test_options.proto:28:9
+desc_test_options.proto:28:42
+
+
+ > extension[9] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[9] > label:
+desc_test_options.proto:28:9
+desc_test_options.proto:28:17
+
+
+ > extension[9] > type:
+desc_test_options.proto:28:18
+desc_test_options.proto:28:24
+
+
+ > extension[9] > name:
+desc_test_options.proto:28:25
+desc_test_options.proto:28:33
+
+
+ > extension[9] > number:
+desc_test_options.proto:28:36
+desc_test_options.proto:28:41
+
+
+ > extension[10]:
+desc_test_options.proto:29:9
+desc_test_options.proto:29:45
+
+
+ > extension[10] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[10] > label:
+desc_test_options.proto:29:9
+desc_test_options.proto:29:17
+
+
+ > extension[10] > type:
+desc_test_options.proto:29:18
+desc_test_options.proto:29:26
+
+
+ > extension[10] > name:
+desc_test_options.proto:29:27
+desc_test_options.proto:29:36
+
+
+ > extension[10] > number:
+desc_test_options.proto:29:39
+desc_test_options.proto:29:44
+
+
+ > extension[11]:
+desc_test_options.proto:30:9
+desc_test_options.proto:30:42
+
+
+ > extension[11] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[11] > label:
+desc_test_options.proto:30:9
+desc_test_options.proto:30:17
+
+
+ > extension[11] > type:
+desc_test_options.proto:30:18
+desc_test_options.proto:30:24
+
+
+ > extension[11] > name:
+desc_test_options.proto:30:25
+desc_test_options.proto:30:33
+
+
+ > extension[11] > number:
+desc_test_options.proto:30:36
+desc_test_options.proto:30:41
+
+
+ > extension[12]:
+desc_test_options.proto:31:9
+desc_test_options.proto:31:44
+
+
+ > extension[12] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[12] > label:
+desc_test_options.proto:31:9
+desc_test_options.proto:31:17
+
+
+ > extension[12] > type:
+desc_test_options.proto:31:18
+desc_test_options.proto:31:25
+
+
+ > extension[12] > name:
+desc_test_options.proto:31:26
+desc_test_options.proto:31:35
+
+
+ > extension[12] > number:
+desc_test_options.proto:31:38
+desc_test_options.proto:31:43
+
+
+ > extension:
+desc_test_options.proto:34:1
+desc_test_options.proto:37:2
+
+
+ > extension[13]:
+desc_test_options.proto:35:9
+desc_test_options.proto:35:53
+
+
+ > extension[13] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[13] > label:
+desc_test_options.proto:35:9
+desc_test_options.proto:35:17
+
+
+ > extension[13] > type_name:
+desc_test_options.proto:35:18
+desc_test_options.proto:35:37
+
+
+ > extension[13] > name:
+desc_test_options.proto:35:38
+desc_test_options.proto:35:44
+
+
+ > extension[13] > number:
+desc_test_options.proto:35:47
+desc_test_options.proto:35:52
+
+
+ > extension[14]:
+desc_test_options.proto:36:9
+desc_test_options.proto:36:51
+
+
+ > extension[14] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[14] > label:
+desc_test_options.proto:36:9
+desc_test_options.proto:36:17
+
+
+ > extension[14] > type_name:
+desc_test_options.proto:36:18
+desc_test_options.proto:36:34
+
+
+ > extension[14] > name:
+desc_test_options.proto:36:35
+desc_test_options.proto:36:42
+
+
+ > extension[14] > number:
+desc_test_options.proto:36:45
+desc_test_options.proto:36:50
+
+
+ > extension:
+desc_test_options.proto:39:1
+desc_test_options.proto:42:2
+
+
+ > extension[15]:
+desc_test_options.proto:40:9
+desc_test_options.proto:40:40
+
+
+ > extension[15] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[15] > label:
+desc_test_options.proto:40:9
+desc_test_options.proto:40:17
+
+
+ > extension[15] > type:
+desc_test_options.proto:40:18
+desc_test_options.proto:40:23
+
+
+ > extension[15] > name:
+desc_test_options.proto:40:24
+desc_test_options.proto:40:31
+
+
+ > extension[15] > number:
+desc_test_options.proto:40:34
+desc_test_options.proto:40:39
+
+
+ > extension[16]:
+desc_test_options.proto:41:9
+desc_test_options.proto:41:42
+
+
+ > extension[16] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[16] > label:
+desc_test_options.proto:41:9
+desc_test_options.proto:41:17
+
+
+ > extension[16] > type:
+desc_test_options.proto:41:18
+desc_test_options.proto:41:24
+
+
+ > extension[16] > name:
+desc_test_options.proto:41:25
+desc_test_options.proto:41:33
+
+
+ > extension[16] > number:
+desc_test_options.proto:41:36
+desc_test_options.proto:41:41
+
+
+ > message_type[0]:
+desc_test_options.proto:45:1
+desc_test_options.proto:48:2
+ Leading comments:
+ Test message used by custom options
+
+
+
+ > message_type[0] > name:
+desc_test_options.proto:45:9
+desc_test_options.proto:45:28
+
+
+ > message_type[0] > field[0]:
+desc_test_options.proto:46:9
+desc_test_options.proto:46:32
+
+
+ > message_type[0] > field[0] > label:
+desc_test_options.proto:46:9
+desc_test_options.proto:46:17
+
+
+ > message_type[0] > field[0] > type:
+desc_test_options.proto:46:18
+desc_test_options.proto:46:24
+
+
+ > message_type[0] > field[0] > name:
+desc_test_options.proto:46:25
+desc_test_options.proto:46:27
+
+
+ > message_type[0] > field[0] > number:
+desc_test_options.proto:46:30
+desc_test_options.proto:46:31
+
+
+ > message_type[0] > field[1]:
+desc_test_options.proto:47:9
+desc_test_options.proto:47:34
+
+
+ > message_type[0] > field[1] > label:
+desc_test_options.proto:47:9
+desc_test_options.proto:47:17
+
+
+ > message_type[0] > field[1] > type:
+desc_test_options.proto:47:18
+desc_test_options.proto:47:24
+
+
+ > message_type[0] > field[1] > name:
+desc_test_options.proto:47:25
+desc_test_options.proto:47:29
+
+
+ > message_type[0] > field[1] > number:
+desc_test_options.proto:47:32
+desc_test_options.proto:47:33
+
+
+ > enum_type[0]:
+desc_test_options.proto:51:1
+desc_test_options.proto:53:2
+ Leading comments:
+ Test enum used by custom options
+
+
+
+ > enum_type[0] > name:
+desc_test_options.proto:51:6
+desc_test_options.proto:51:22
+
+
+ > enum_type[0] > value[0]:
+desc_test_options.proto:52:9
+desc_test_options.proto:52:19
+
+
+ > enum_type[0] > value[0] > name:
+desc_test_options.proto:52:9
+desc_test_options.proto:52:14
+
+
+ > enum_type[0] > value[0] > number:
+desc_test_options.proto:52:17
+desc_test_options.proto:52:18
+
+
+ > extension:
+desc_test_options.proto:55:1
+desc_test_options.proto:58:2
+
+
+ > extension[17]:
+desc_test_options.proto:56:9
+desc_test_options.proto:56:41
+
+
+ > extension[17] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[17] > label:
+desc_test_options.proto:56:9
+desc_test_options.proto:56:17
+
+
+ > extension[17] > type:
+desc_test_options.proto:56:18
+desc_test_options.proto:56:24
+
+
+ > extension[17] > name:
+desc_test_options.proto:56:25
+desc_test_options.proto:56:32
+
+
+ > extension[17] > number:
+desc_test_options.proto:56:35
+desc_test_options.proto:56:40
+
+
+ > extension[18]:
+desc_test_options.proto:57:9
+desc_test_options.proto:57:41
+
+
+ > extension[18] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[18] > label:
+desc_test_options.proto:57:9
+desc_test_options.proto:57:17
+
+
+ > extension[18] > type:
+desc_test_options.proto:57:18
+desc_test_options.proto:57:23
+
+
+ > extension[18] > name:
+desc_test_options.proto:57:24
+desc_test_options.proto:57:32
+
+
+ > extension[18] > number:
+desc_test_options.proto:57:35
+desc_test_options.proto:57:40
+
+
+ > extension:
+desc_test_options.proto:60:1
+desc_test_options.proto:63:2
+
+
+ > extension[19]:
+desc_test_options.proto:61:9
+desc_test_options.proto:61:41
+
+
+ > extension[19] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[19] > label:
+desc_test_options.proto:61:9
+desc_test_options.proto:61:17
+
+
+ > extension[19] > type:
+desc_test_options.proto:61:18
+desc_test_options.proto:61:24
+
+
+ > extension[19] > name:
+desc_test_options.proto:61:25
+desc_test_options.proto:61:32
+
+
+ > extension[19] > number:
+desc_test_options.proto:61:35
+desc_test_options.proto:61:40
+
+
+ > extension[20]:
+desc_test_options.proto:62:9
+desc_test_options.proto:62:41
+
+
+ > extension[20] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[20] > label:
+desc_test_options.proto:62:9
+desc_test_options.proto:62:17
+
+
+ > extension[20] > type:
+desc_test_options.proto:62:18
+desc_test_options.proto:62:23
+
+
+ > extension[20] > name:
+desc_test_options.proto:62:24
+desc_test_options.proto:62:32
+
+
+ > extension[20] > number:
+desc_test_options.proto:62:35
+desc_test_options.proto:62:40
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
new file mode 100644
index 0000000..b56e8ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
@@ -0,0 +1,7 @@
+// Package protoprint provides a mechanism to generate protobuf source code
+// from descriptors.
+//
+// This can be useful to turn file descriptor sets (produced by protoc) back
+// into proto IDL code. Combined with the protoreflect/builder package, it can
+// also be used to perform code generation of proto source code.
+package protoprint
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
new file mode 100644
index 0000000..0363cf0
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
@@ -0,0 +1,2417 @@
+package protoprint
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// Printer knows how to format file descriptors as proto source code. Its fields
+// provide some control over how the resulting source file is constructed and
+// formatted.
+type Printer struct {
+ // If true, comments are rendered using "/*" style comments. Otherwise, they
+ // are printed using "//" style line comments.
+ PreferMultiLineStyleComments bool
+
+ // If true, elements are sorted into a canonical order.
+ //
+ // The canonical order for elements in a file follows:
+ // 1. Syntax
+ // 2. Package
+ // 3. Imports (sorted lexically)
+ // 4. Options (sorted by name, standard options before custom options)
+ // 5. Messages (sorted by name)
+ // 6. Enums (sorted by name)
+ // 7. Services (sorted by name)
+ // 8. Extensions (grouped by extendee, sorted by extendee+tag)
+ //
+ // The canonical order of elements in a message follows:
+ // 1. Options (sorted by name, standard options before custom options)
+ // 2. Fields and One-Ofs (sorted by tag; one-ofs interleaved based on the
+ // minimum tag therein)
+ // 3. Nested Messages (sorted by name)
+ // 4. Nested Enums (sorted by name)
+ // 5. Extension ranges (sorted by starting tag number)
+ // 6. Nested Extensions (grouped by extendee, sorted by extendee+tag)
+ // 7. Reserved ranges (sorted by starting tag number)
+ // 8. Reserved names (sorted lexically)
+ //
+ // Methods are sorted within a service by name and appear after any service
+ // options (which are sorted by name, standard options before custom ones).
+ // Enum values are sorted within an enum, first by numeric value then by
+ // name, and also appear after any enum options.
+ //
+ // Options for fields, enum values, and extension ranges are sorted by name,
+ // standard options before custom ones.
+ SortElements bool
+
+ // The indentation used. Any characters other than spaces or tabs will be
+ // replaced with spaces. If unset/empty, two spaces will be used.
+ Indent string
+
+ // If true, detached comments (between elements) will be ignored.
+ //
+ // Deprecated: Use OmitComments bitmask instead.
+ OmitDetachedComments bool
+
+ // A bitmask of comment types to omit. If unset, all comments will be
+ // included. Use CommentsAll to not print any comments.
+ OmitComments CommentType
+
+ // If true, trailing comments that typically appear on the same line as an
+ // element (option, field, enum value, method) will be printed on a separate
+ // line instead.
+ //
+ // So, with this set, you'll get output like so:
+ //
+ // // leading comment for field
+ // repeated string names = 1;
+ // // trailing comment
+ //
+ // If left false, the printer will try to emit trailing comments on the same
+ // line instead:
+ //
+ // // leading comment for field
+ // repeated string names = 1; // trailing comment
+ //
+ // If the trailing comment has more than one line, it will automatically be
+ // forced to the next line. Also, elements that end with "}" instead of ";"
+ // will have trailing comments rendered on the subsequent line.
+ TrailingCommentsOnSeparateLine bool
+
+ // If true, the printed output will eschew any blank lines, which otherwise
+ // appear between descriptor elements and comment blocks. Note that if
+ // detached comments are being printed, this will cause them to be merged
+ // into the subsequent leading comments. Similarly, any element trailing
+ // comments will be merged into the subsequent leading comments.
+ Compact bool
+
+ // If true, all references to messages, extensions, and enums (such as in
+ // options, field types, and method request and response types) will be
+ // fully-qualified. When left unset, the referenced elements will contain
+ // only as much qualifier as is required.
+ //
+ // For example, if a message is in the same package as the reference, the
+ // simple name can be used. If a message shares some context with the
+ // reference, only the unshared context needs to be included. For example:
+ //
+ // message Foo {
+ // message Bar {
+ // enum Baz {
+ // ZERO = 0;
+ // ONE = 1;
+ // }
+ // }
+ //
+ // // This field shares some context as the enum it references: they are
+ // // both inside of the namespace Foo:
+ // // field is "Foo.my_baz"
+ // // enum is "Foo.Bar.Baz"
+ // // So we only need to qualify the reference with the context that they
+ // // do NOT have in common:
+ // Bar.Baz my_baz = 1;
+ // }
+ //
+ // When printing fully-qualified names, they will be preceded by a dot, to
+ // avoid any ambiguity that they might be relative vs. fully-qualified.
+ ForceFullyQualifiedNames bool
+}
+
+// CommentType is a kind of comments in a proto source file. This can be used
+// as a bitmask.
+type CommentType int
+
+const (
+ // CommentsDetached refers to comments that are not "attached" to any
+ // source element. They are attributed to the subsequent element in the
+ // file as "detached" comments.
+ CommentsDetached CommentType = 1 << iota
+ // CommentsTrailing refers to a comment block immediately following an
+ // element in the source file. If another element immediately follows
+ // the trailing comment, it is instead considered a leading comment for
+ // that subsequent element.
+ CommentsTrailing
+ // CommentsLeading refers to a comment block immediately preceding an
+ // element in the source file. For high-level elements (those that have
+ // their own descriptor), these are used as doc comments for that element.
+ CommentsLeading
+ // CommentsTokens refers to any comments (leading, trailing, or detached)
+ // on low-level elements in the file. "High-level" elements have their own
+ // descriptors, e.g. messages, enums, fields, services, and methods. But
+ // comments can appear anywhere (such as around identifiers and keywords,
+ // sprinkled inside the declarations of a high-level element). This class
+ // of comments are for those extra comments sprinkled into the file.
+ CommentsTokens
+
+ // CommentsNonDoc refers to comments that are *not* doc comments. This is a
+ // bitwise union of everything other than CommentsLeading. If you configure
+ // a printer to omit this, only doc comments on descriptor elements will be
+ // included in the printed output.
+ CommentsNonDoc = CommentsDetached | CommentsTrailing | CommentsTokens
+ // CommentsAll indicates all kinds of comments. If you configure a printer
+ // to omit this, no comments will appear in the printed output, even if the
+ // input descriptors had source info and comments.
+ CommentsAll = -1
+)
+
+// PrintProtoFiles prints all of the given file descriptors. The given open
+// function is given a file name and is responsible for creating the outputs and
+// returning the corresponding writer.
+func (p *Printer) PrintProtoFiles(fds []*desc.FileDescriptor, open func(name string) (io.WriteCloser, error)) error {
+ for _, fd := range fds {
+ w, err := open(fd.GetName())
+ if err != nil {
+ return fmt.Errorf("failed to open %s: %v", fd.GetName(), err)
+ }
+ err = func() error {
+ defer w.Close()
+ return p.PrintProtoFile(fd, w)
+ }()
+ if err != nil {
+ return fmt.Errorf("failed to write %s: %v", fd.GetName(), err)
+ }
+ }
+ return nil
+}
+
+// PrintProtosToFileSystem prints all of the given file descriptors to files in
+// the given directory. If file names in the given descriptors include path
+// information, they will be relative to the given root.
+func (p *Printer) PrintProtosToFileSystem(fds []*desc.FileDescriptor, rootDir string) error {
+ return p.PrintProtoFiles(fds, func(name string) (io.WriteCloser, error) {
+ fullPath := filepath.Join(rootDir, name)
+ dir := filepath.Dir(fullPath)
+ if err := os.MkdirAll(dir, os.ModePerm); err != nil {
+ return nil, err
+ }
+ return os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ })
+}
+
+// pkg represents a package name
+type pkg string
+
+// imp represents an imported file name
+type imp string
+
+// ident represents an identifier
+type ident string
+
+// option represents a resolved descriptor option
+type option struct {
+ name string
+ val interface{}
+}
+
+// reservedRange represents a reserved range from a message or enum
+type reservedRange struct {
+ start, end int32
+}
+
+// PrintProtoFile prints the given single file descriptor to the given writer.
+func (p *Printer) PrintProtoFile(fd *desc.FileDescriptor, out io.Writer) error {
+ return p.printProto(fd, out)
+}
+
+// PrintProto prints the given descriptor and returns the resulting string. This
+// can be used to print proto files, but it can also be used to get the proto
+// "source form" for any kind of descriptor, which can be a more user-friendly
+// way to present descriptors that are intended for human consumption.
+func (p *Printer) PrintProtoToString(dsc desc.Descriptor) (string, error) {
+ var buf bytes.Buffer
+ if err := p.printProto(dsc, &buf); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+func (p *Printer) printProto(dsc desc.Descriptor, out io.Writer) error {
+ w := newWriter(out)
+
+ if p.Indent == "" {
+ // default indent to two spaces
+ p.Indent = " "
+ } else {
+ // indent must be all spaces or tabs, so convert other chars to spaces
+ ind := make([]rune, 0, len(p.Indent))
+ for _, r := range p.Indent {
+ if r == '\t' {
+ ind = append(ind, r)
+ } else {
+ ind = append(ind, ' ')
+ }
+ }
+ p.Indent = string(ind)
+ }
+ if p.OmitDetachedComments {
+ p.OmitComments |= CommentsDetached
+ }
+
+ er := dynamic.ExtensionRegistry{}
+ er.AddExtensionsFromFileRecursively(dsc.GetFile())
+ mf := dynamic.NewMessageFactoryWithExtensionRegistry(&er)
+ fdp := dsc.GetFile().AsFileDescriptorProto()
+ sourceInfo := internal.CreateSourceInfoMap(fdp)
+ extendOptionLocations(sourceInfo, fdp.GetSourceCodeInfo().GetLocation())
+
+ path := findElement(dsc)
+ switch d := dsc.(type) {
+ case *desc.FileDescriptor:
+ p.printFile(d, mf, w, sourceInfo)
+ case *desc.MessageDescriptor:
+ p.printMessage(d, mf, w, sourceInfo, path, 0)
+ case *desc.FieldDescriptor:
+ var scope string
+ if md, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ scope = md.GetFullyQualifiedName()
+ } else {
+ scope = d.GetFile().GetPackage()
+ }
+ if d.IsExtension() {
+ fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(path, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, 0, p.qualifyName(d.GetFile().GetPackage(), scope, d.GetOwner().GetFullyQualifiedName()))
+ fmt.Fprintln(w, "{")
+
+ p.printField(d, mf, w, sourceInfo, path, scope, 1)
+
+ fmt.Fprintln(w, "}")
+ } else {
+ p.printField(d, mf, w, sourceInfo, path, scope, 0)
+ }
+ case *desc.OneOfDescriptor:
+ md := d.GetOwner()
+ elements := elementAddrs{dsc: md}
+ for i := range md.GetFields() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+ }
+ p.printOneOf(d, elements, 0, mf, w, sourceInfo, path[:len(path)-1], 0, path[len(path)-1])
+ case *desc.EnumDescriptor:
+ p.printEnum(d, mf, w, sourceInfo, path, 0)
+ case *desc.EnumValueDescriptor:
+ p.printEnumValue(d, mf, w, sourceInfo, path, 0)
+ case *desc.ServiceDescriptor:
+ p.printService(d, mf, w, sourceInfo, path, 0)
+ case *desc.MethodDescriptor:
+ p.printMethod(d, mf, w, sourceInfo, path, 0)
+ }
+
+ return w.err
+}
+
+func findElement(dsc desc.Descriptor) []int32 {
+ if dsc.GetParent() == nil {
+ return nil
+ }
+ path := findElement(dsc.GetParent())
+ switch d := dsc.(type) {
+ case *desc.MessageDescriptor:
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_nestedMessagesTag, getMessageIndex(d, pm.GetNestedMessageTypes()))
+ }
+ return append(path, internal.File_messagesTag, getMessageIndex(d, d.GetFile().GetMessageTypes()))
+
+ case *desc.FieldDescriptor:
+ if d.IsExtension() {
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_extensionsTag, getFieldIndex(d, pm.GetNestedExtensions()))
+ }
+ return append(path, internal.File_extensionsTag, getFieldIndex(d, d.GetFile().GetExtensions()))
+ }
+ return append(path, internal.Message_fieldsTag, getFieldIndex(d, d.GetOwner().GetFields()))
+
+ case *desc.OneOfDescriptor:
+ return append(path, internal.Message_oneOfsTag, getOneOfIndex(d, d.GetOwner().GetOneOfs()))
+
+ case *desc.EnumDescriptor:
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_enumsTag, getEnumIndex(d, pm.GetNestedEnumTypes()))
+ }
+ return append(path, internal.File_enumsTag, getEnumIndex(d, d.GetFile().GetEnumTypes()))
+
+ case *desc.EnumValueDescriptor:
+ return append(path, internal.Enum_valuesTag, getEnumValueIndex(d, d.GetEnum().GetValues()))
+
+ case *desc.ServiceDescriptor:
+ return append(path, internal.File_servicesTag, getServiceIndex(d, d.GetFile().GetServices()))
+
+ case *desc.MethodDescriptor:
+ return append(path, internal.Service_methodsTag, getMethodIndex(d, d.GetService().GetMethods()))
+
+ default:
+ panic(fmt.Sprintf("unexpected descriptor type: %T", dsc))
+ }
+}
+
+func getMessageIndex(md *desc.MessageDescriptor, list []*desc.MessageDescriptor) int32 {
+ for i := range list {
+ if md == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of message %s", md.GetFullyQualifiedName()))
+}
+
+func getFieldIndex(fd *desc.FieldDescriptor, list []*desc.FieldDescriptor) int32 {
+ for i := range list {
+ if fd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of field %s", fd.GetFullyQualifiedName()))
+}
+
+func getOneOfIndex(ood *desc.OneOfDescriptor, list []*desc.OneOfDescriptor) int32 {
+ for i := range list {
+ if ood == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of oneof %s", ood.GetFullyQualifiedName()))
+}
+
+func getEnumIndex(ed *desc.EnumDescriptor, list []*desc.EnumDescriptor) int32 {
+ for i := range list {
+ if ed == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of enum %s", ed.GetFullyQualifiedName()))
+}
+
+func getEnumValueIndex(evd *desc.EnumValueDescriptor, list []*desc.EnumValueDescriptor) int32 {
+ for i := range list {
+ if evd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of enum value %s", evd.GetFullyQualifiedName()))
+}
+
+func getServiceIndex(sd *desc.ServiceDescriptor, list []*desc.ServiceDescriptor) int32 {
+ for i := range list {
+ if sd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of service %s", sd.GetFullyQualifiedName()))
+}
+
+func getMethodIndex(mtd *desc.MethodDescriptor, list []*desc.MethodDescriptor) int32 {
+ for i := range list {
+ if mtd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of method %s", mtd.GetFullyQualifiedName()))
+}
+
+func (p *Printer) newLine(w io.Writer) {
+ if !p.Compact {
+ fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printFile(fd *desc.FileDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap) {
+ opts, err := p.extractOptions(fd, fd.GetOptions(), mf)
+ if err != nil {
+ return
+ }
+
+ fdp := fd.AsFileDescriptorProto()
+ path := make([]int32, 1)
+
+ path[0] = internal.File_packageTag
+ sourceInfo.PutIfAbsent(append(path, 0), sourceInfo.Get(path))
+
+ path[0] = internal.File_syntaxTag
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ syn := fdp.GetSyntax()
+ if syn == "" {
+ syn = "proto2"
+ }
+ fmt.Fprintf(w, "syntax = %q;", syn)
+ })
+ p.newLine(w)
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: fd, opts: opts}
+ if fdp.Package != nil {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_packageTag, elementIndex: 0, order: -3})
+ }
+ for i := range fd.AsFileDescriptorProto().GetDependency() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_dependencyTag, elementIndex: i, order: -2})
+ }
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.File_optionsTag, -1, opts)...)
+ for i := range fd.GetMessageTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_messagesTag, elementIndex: i})
+ }
+ for i := range fd.GetEnumTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_enumsTag, elementIndex: i})
+ }
+ for i := range fd.GetServices() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_servicesTag, elementIndex: i})
+ }
+ exts := p.computeExtensions(sourceInfo, fd.GetExtensions(), []int32{internal.File_extensionsTag})
+ for i, extd := range fd.GetExtensions() {
+ if extd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for groups since
+ // they get special treatment
+ skip[extd.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_extensionsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, nil)
+
+ pkgName := fd.GetPackage()
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ path = []int32{el.elementType, int32(el.elementIndex)}
+
+ switch d := d.(type) {
+ case pkg:
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ fmt.Fprintf(w, "package %s;", d)
+ })
+ case imp:
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ fmt.Fprintf(w, "import %q;", d)
+ })
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, path, 0)
+ case *desc.MessageDescriptor:
+ p.printMessage(d, mf, w, sourceInfo, path, 0)
+ case *desc.EnumDescriptor:
+ p.printEnum(d, mf, w, sourceInfo, path, 0)
+ case *desc.ServiceDescriptor:
+ p.printService(d, mf, w, sourceInfo, path, 0)
+ case *desc.FieldDescriptor:
+ extDecl := exts[d]
+ p.printExtensions(extDecl, exts, elements, i, mf, w, sourceInfo, nil, internal.File_extensionsTag, pkgName, pkgName, 0)
+ // we printed all extensions in the group, so we can skip the others
+ for _, fld := range extDecl.fields {
+ skip[fld] = true
+ }
+ }
+ }
+}
+
+func findExtSi(fieldSi *descriptor.SourceCodeInfo_Location, extSis []*descriptor.SourceCodeInfo_Location) *descriptor.SourceCodeInfo_Location {
+ if len(fieldSi.GetSpan()) == 0 {
+ return nil
+ }
+ for _, extSi := range extSis {
+ if isSpanWithin(fieldSi.Span, extSi.Span) {
+ return extSi
+ }
+ }
+ return nil
+}
+
+func isSpanWithin(span, enclosing []int32) bool {
+ start := enclosing[0]
+ var end int32
+ if len(enclosing) == 3 {
+ end = enclosing[0]
+ } else {
+ end = enclosing[2]
+ }
+ if span[0] < start || span[0] > end {
+ return false
+ }
+
+ if span[0] == start {
+ return span[1] >= enclosing[1]
+ } else if span[0] == end {
+ return span[1] <= enclosing[len(enclosing)-1]
+ }
+ return true
+}
+
+type extensionDecl struct {
+ extendee string
+ sourceInfo *descriptor.SourceCodeInfo_Location
+ fields []*desc.FieldDescriptor
+}
+
+type extensions map[*desc.FieldDescriptor]*extensionDecl
+
+func (p *Printer) computeExtensions(sourceInfo internal.SourceInfoMap, exts []*desc.FieldDescriptor, path []int32) extensions {
+ extsMap := map[string]map[*descriptor.SourceCodeInfo_Location]*extensionDecl{}
+ extSis := sourceInfo.GetAll(path)
+ for _, extd := range exts {
+ name := extd.GetOwner().GetFullyQualifiedName()
+ extSi := findExtSi(extd.GetSourceInfo(), extSis)
+ extsBySi := extsMap[name]
+ if extsBySi == nil {
+ extsBySi = map[*descriptor.SourceCodeInfo_Location]*extensionDecl{}
+ extsMap[name] = extsBySi
+ }
+ extDecl := extsBySi[extSi]
+ if extDecl == nil {
+ extDecl = &extensionDecl{
+ sourceInfo: extSi,
+ extendee: name,
+ }
+ extsBySi[extSi] = extDecl
+ }
+ extDecl.fields = append(extDecl.fields, extd)
+ }
+
+ ret := extensions{}
+ for _, extsBySi := range extsMap {
+ for _, extDecl := range extsBySi {
+ for _, extd := range extDecl.fields {
+ ret[extd] = extDecl
+ }
+ }
+ }
+ return ret
+}
+
+func (p *Printer) sort(elements elementAddrs, sourceInfo internal.SourceInfoMap, path []int32) {
+ if p.SortElements {
+ // canonical sorted order
+ sort.Stable(elements)
+ } else {
+ // use source order (per location information in SourceCodeInfo); or
+ // if that isn't present use declaration order, but grouped by type
+ sort.Stable(elementSrcOrder{
+ elementAddrs: elements,
+ sourceInfo: sourceInfo,
+ prefix: path,
+ })
+ }
+}
+
+func (p *Printer) qualifyName(pkg, scope string, fqn string) string {
+ if p.ForceFullyQualifiedNames {
+ // forcing fully-qualified names; make sure to include preceding dot
+ if fqn[0] == '.' {
+ return fqn
+ }
+ return fmt.Sprintf(".%s", fqn)
+ }
+
+ // compute relative name (so no leading dot)
+ if fqn[0] == '.' {
+ fqn = fqn[1:]
+ }
+ if len(scope) > 0 && scope[len(scope)-1] != '.' {
+ scope = scope + "."
+ }
+ for scope != "" {
+ if strings.HasPrefix(fqn, scope) {
+ return fqn[len(scope):]
+ }
+ if scope == pkg+"." {
+ break
+ }
+ pos := strings.LastIndex(scope[:len(scope)-1], ".")
+ scope = scope[:pos+1]
+ }
+ return fqn
+}
+
+func (p *Printer) typeString(fld *desc.FieldDescriptor, scope string) string {
+ if fld.IsMap() {
+ return fmt.Sprintf("map<%s, %s>", p.typeString(fld.GetMapKeyType(), scope), p.typeString(fld.GetMapValueType(), scope))
+ }
+ switch fld.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_INT32:
+ return "int32"
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ return "int64"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ return "uint32"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ return "uint64"
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ return "sint32"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return "sint64"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ return "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ return "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ return "sfixed32"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ return "sfixed64"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ return "float"
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return "double"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return "bool"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return "string"
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetEnumType().GetFullyQualifiedName())
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetMessageType().GetFullyQualifiedName())
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ return fld.GetMessageType().GetName()
+ }
+ panic(fmt.Sprintf("invalid type: %v", fld.GetType()))
+}
+
+func (p *Printer) printMessage(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "message ")
+ nameSi := sourceInfo.Get(append(path, internal.Message_nameTag))
+ p.printElementString(nameSi, w, indent, md.GetName())
+ fmt.Fprintln(w, "{")
+
+ p.printMessageBody(md, mf, w, sourceInfo, path, indent+1)
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printMessageBody(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ opts, err := p.extractOptions(md, md.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: md, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Message_optionsTag, -1, opts)...)
+ for i := range md.AsDescriptorProto().GetReservedRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedRangeTag, elementIndex: i})
+ }
+ for i := range md.AsDescriptorProto().GetReservedName() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedNameTag, elementIndex: i})
+ }
+ for i := range md.AsDescriptorProto().GetExtensionRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionRangeTag, elementIndex: i})
+ }
+ for i, fld := range md.GetFields() {
+ if fld.IsMap() || fld.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for map types or groups since
+ // they get special treatment
+ skip[fld.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+ }
+ for i := range md.GetNestedMessageTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_nestedMessagesTag, elementIndex: i})
+ }
+ for i := range md.GetNestedEnumTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_enumsTag, elementIndex: i})
+ }
+ exts := p.computeExtensions(sourceInfo, md.GetNestedExtensions(), append(path, internal.Message_extensionsTag))
+ for i, extd := range md.GetNestedExtensions() {
+ if extd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for groups since
+ // they get special treatment
+ skip[extd.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ pkg := md.GetFile().GetPackage()
+ scope := md.GetFullyQualifiedName()
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := d.(type) {
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.FieldDescriptor:
+ if d.IsExtension() {
+ extDecl := exts[d]
+ p.printExtensions(extDecl, exts, elements, i, mf, w, sourceInfo, path, internal.Message_extensionsTag, pkg, scope, indent)
+ // we printed all extensions in the group, so we can skip the others
+ for _, fld := range extDecl.fields {
+ skip[fld] = true
+ }
+ } else {
+ ood := d.GetOneOf()
+ if ood == nil {
+ p.printField(d, mf, w, sourceInfo, childPath, scope, indent)
+ } else {
+ // print the one-of, including all of its fields
+ p.printOneOf(ood, elements, i, mf, w, sourceInfo, path, indent, d.AsFieldDescriptorProto().GetOneofIndex())
+ for _, fld := range ood.GetChoices() {
+ skip[fld] = true
+ }
+ }
+ }
+ case *desc.MessageDescriptor:
+ p.printMessage(d, mf, w, sourceInfo, childPath, indent)
+ case *desc.EnumDescriptor:
+ p.printEnum(d, mf, w, sourceInfo, childPath, indent)
+ case *descriptor.DescriptorProto_ExtensionRange:
+ // collapse ranges into a single "extensions" block
+ ranges := []*descriptor.DescriptorProto_ExtensionRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ extr := elements.at(elnext).(*descriptor.DescriptorProto_ExtensionRange)
+ if !areEqual(d.Options, extr.Options, mf) {
+ break
+ }
+ ranges = append(ranges, extr)
+ addrs = append(addrs, elnext)
+ skip[extr] = true
+ }
+ p.printExtensionRanges(md, ranges, addrs, mf, w, sourceInfo, path, indent)
+ case reservedRange:
+ // collapse reserved ranges into a single "reserved" block
+ ranges := []reservedRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rr := elements.at(elnext).(reservedRange)
+ ranges = append(ranges, rr)
+ addrs = append(addrs, elnext)
+ skip[rr] = true
+ }
+ p.printReservedRanges(ranges, false, addrs, w, sourceInfo, path, indent)
+ case string: // reserved name
+ // collapse reserved names into a single "reserved" block
+ names := []string{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rn := elements.at(elnext).(string)
+ names = append(names, rn)
+ addrs = append(addrs, elnext)
+ skip[rn] = true
+ }
+ p.printReservedNames(names, addrs, w, sourceInfo, path, indent)
+ }
+ }
+}
+
+func areEqual(a, b proto.Message, mf *dynamic.MessageFactory) bool {
+ // proto.Equal doesn't handle unknown extensions very well :(
+ // so we convert to a dynamic message (which should know about all extensions via
+ // extension registry) and then compare
+ return dynamic.MessagesEqual(asDynamicIfPossible(a, mf), asDynamicIfPossible(b, mf))
+}
+
+func asDynamicIfPossible(msg proto.Message, mf *dynamic.MessageFactory) proto.Message {
+ if dm, ok := msg.(*dynamic.Message); ok {
+ return dm
+ } else {
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err == nil {
+ dm := mf.NewDynamicMessage(md)
+ if dm.ConvertFrom(msg) == nil {
+ return dm
+ }
+ }
+ }
+ return msg
+}
+
+func (p *Printer) printField(fld *desc.FieldDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, scope string, indent int) {
+ var groupPath []int32
+ var si *descriptor.SourceCodeInfo_Location
+ if isGroup(fld) {
+ // compute path to group message type
+ groupPath = make([]int32, len(path)-2)
+ copy(groupPath, path)
+
+ var candidates []*desc.MessageDescriptor
+ var parentTag int32
+ switch parent := fld.GetParent().(type) {
+ case *desc.MessageDescriptor:
+ // group in a message
+ candidates = parent.GetNestedMessageTypes()
+ parentTag = internal.Message_nestedMessagesTag
+ case *desc.FileDescriptor:
+ // group that is a top-level extension
+ candidates = parent.GetMessageTypes()
+ parentTag = internal.File_messagesTag
+ }
+
+ var groupMsgIndex int32
+ for i, nmd := range candidates {
+ if nmd == fld.GetMessageType() {
+ // found it
+ groupMsgIndex = int32(i)
+ break
+ }
+ }
+ groupPath = append(groupPath, parentTag, groupMsgIndex)
+
+ // the group message is where the field's comments and position are stored
+ si = sourceInfo.Get(groupPath)
+ } else {
+ si = sourceInfo.Get(path)
+ }
+
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+ if shouldEmitLabel(fld) {
+ locSi := sourceInfo.Get(append(path, internal.Field_labelTag))
+ p.printElementString(locSi, w, indent, labelString(fld.GetLabel()))
+ }
+
+ if isGroup(fld) {
+ fmt.Fprint(w, "group ")
+
+ typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+ p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+ fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+ fmt.Fprintln(w, "{")
+ p.printMessageBody(fld.GetMessageType(), mf, w, sourceInfo, groupPath, indent+1)
+
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ } else {
+ typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+ p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+
+ nameSi := sourceInfo.Get(append(path, internal.Field_nameTag))
+ p.printElementString(nameSi, w, indent, fld.GetName())
+ fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+ opts, err := p.extractOptions(fld, fld.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ // we use negative values for "extras" keys so they can't collide
+ // with legit option tags
+
+ if !fld.GetFile().IsProto3() && fld.AsFieldDescriptorProto().DefaultValue != nil {
+ defVal := fld.GetDefaultValue()
+ if fld.GetEnumType() != nil {
+ defVal = fld.GetEnumType().FindValueByNumber(defVal.(int32))
+ }
+ opts[-internal.Field_defaultTag] = []option{{name: "default", val: defVal}}
+ }
+
+ jsn := fld.AsFieldDescriptorProto().GetJsonName()
+ if jsn != "" && jsn != internal.JsonName(fld.GetName()) {
+ opts[-internal.Field_jsonNameTag] = []option{{name: "json_name", val: jsn}}
+ }
+
+ elements := elementAddrs{dsc: fld, opts: opts}
+ elements.addrs = optionsAsElementAddrs(internal.Field_optionsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+ p.printOptionElementsShort(elements, w, sourceInfo, path, indent)
+
+ fmt.Fprint(w, ";")
+ }
+ })
+}
+
+func shouldEmitLabel(fld *desc.FieldDescriptor) bool {
+ return !fld.IsMap() && fld.GetOneOf() == nil && (fld.GetLabel() != descriptor.FieldDescriptorProto_LABEL_OPTIONAL || !fld.GetFile().IsProto3())
+}
+
+func labelString(lbl descriptor.FieldDescriptorProto_Label) string {
+ switch lbl {
+ case descriptor.FieldDescriptorProto_LABEL_OPTIONAL:
+ return "optional"
+ case descriptor.FieldDescriptorProto_LABEL_REQUIRED:
+ return "required"
+ case descriptor.FieldDescriptorProto_LABEL_REPEATED:
+ return "repeated"
+ }
+ panic(fmt.Sprintf("invalid label: %v", lbl))
+}
+
+func isGroup(fld *desc.FieldDescriptor) bool {
+ return fld.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+}
+
+func (p *Printer) printOneOf(ood *desc.OneOfDescriptor, parentElements elementAddrs, startFieldIndex int, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int, ooIndex int32) {
+ oopath := append(parentPath, internal.Message_oneOfsTag, ooIndex)
+ oosi := sourceInfo.Get(oopath)
+ p.printElement(true, oosi, w, indent, func(w *writer) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "oneof ")
+ extNameSi := sourceInfo.Get(append(oopath, internal.OneOf_nameTag))
+ p.printElementString(extNameSi, w, indent, ood.GetName())
+ fmt.Fprintln(w, "{")
+
+ indent++
+ opts, err := p.extractOptions(ood, ood.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: ood, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.OneOf_optionsTag, -1, opts)...)
+
+ count := len(ood.GetChoices())
+ for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ {
+ el := parentElements.addrs[idx]
+ if el.elementType != internal.Message_fieldsTag {
+ continue
+ }
+ if parentElements.at(el).(*desc.FieldDescriptor).GetOneOf() == ood {
+ // negative tag indicates that this element is actually a sibling, not a child
+ elements.addrs = append(elements.addrs, elementAddr{elementType: -internal.Message_fieldsTag, elementIndex: el.elementIndex})
+ count--
+ }
+ }
+
+ // the fields are already sorted, but we have to re-sort in order to
+ // interleave the options (in the event that we are using file location
+ // order and the option locations are interleaved with the fields)
+ p.sort(elements, sourceInfo, oopath)
+ scope := ood.GetOwner().GetFullyQualifiedName()
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ switch d := elements.at(el).(type) {
+ case []option:
+ childPath := append(oopath, el.elementType, int32(el.elementIndex))
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.FieldDescriptor:
+ childPath := append(parentPath, -el.elementType, int32(el.elementIndex))
+ p.printField(d, mf, w, sourceInfo, childPath, scope, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printExtensions(exts *extensionDecl, allExts extensions, parentElements elementAddrs, startFieldIndex int, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, extTag int32, pkg, scope string, indent int) {
+ path := append(parentPath, extTag)
+ p.printLeadingComments(exts.sourceInfo, w, indent)
+ p.indent(w, indent)
+ fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(path, 0, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, indent, p.qualifyName(pkg, scope, exts.extendee))
+ fmt.Fprintln(w, "{")
+
+ count := len(exts.fields)
+ first := true
+ for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ {
+ el := parentElements.addrs[idx]
+ if el.elementType != extTag {
+ continue
+ }
+ fld := parentElements.at(el).(*desc.FieldDescriptor)
+ if allExts[fld] == exts {
+ if first {
+ first = false
+ } else {
+ p.newLine(w)
+ }
+ childPath := append(path, int32(el.elementIndex))
+ p.printField(fld, mf, w, sourceInfo, childPath, scope, indent+1)
+ count--
+ }
+ }
+
+ p.indent(w, indent)
+ fmt.Fprintln(w, "}")
+ p.printTrailingComments(exts.sourceInfo, w, indent)
+ if indent >= 0 && !w.newline {
+ // if we're not printing inline but element did not have trailing newline, add one now
+ fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printExtensionRanges(parent *desc.MessageDescriptor, ranges []*descriptor.DescriptorProto_ExtensionRange, addrs []elementAddr, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "extensions ")
+
+ var opts *descriptor.ExtensionRangeOptions
+ var elPath []int32
+ first := true
+ for i, extr := range ranges {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ opts = extr.Options
+ el := addrs[i]
+ elPath = append(parentPath, el.elementType, int32(el.elementIndex))
+ si := sourceInfo.Get(elPath)
+ p.printElement(true, si, w, inline(indent), func(w *writer) {
+ if extr.GetStart() == extr.GetEnd()-1 {
+ fmt.Fprintf(w, "%d ", extr.GetStart())
+ } else if extr.GetEnd()-1 == internal.MaxTag {
+ fmt.Fprintf(w, "%d to max ", extr.GetStart())
+ } else {
+ fmt.Fprintf(w, "%d to %d ", extr.GetStart(), extr.GetEnd()-1)
+ }
+ })
+ }
+ dsc := extensionRange{owner: parent, extRange: ranges[0]}
+ p.printOptionsShort(dsc, opts, mf, internal.ExtensionRange_optionsTag, w, sourceInfo, elPath, indent)
+
+ fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedRanges(ranges []reservedRange, isEnum bool, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "reserved ")
+
+ first := true
+ for i, rr := range ranges {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ el := addrs[i]
+ si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+ p.printElement(false, si, w, inline(indent), func(w *writer) {
+ if rr.start == rr.end {
+ fmt.Fprintf(w, "%d ", rr.start)
+ } else if (rr.end == internal.MaxTag && !isEnum) ||
+ (rr.end == math.MaxInt32 && isEnum) {
+ fmt.Fprintf(w, "%d to max ", rr.start)
+ } else {
+ fmt.Fprintf(w, "%d to %d ", rr.start, rr.end)
+ }
+ })
+ }
+
+ fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedNames(names []string, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "reserved ")
+
+ first := true
+ for i, name := range names {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ el := addrs[i]
+ si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+ p.printElementString(si, w, indent, quotedString(name))
+ }
+
+ fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printEnum(ed *desc.EnumDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "enum ")
+ nameSi := sourceInfo.Get(append(path, internal.Enum_nameTag))
+ p.printElementString(nameSi, w, indent, ed.GetName())
+ fmt.Fprintln(w, "{")
+
+ indent++
+ opts, err := p.extractOptions(ed, ed.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: ed, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Enum_optionsTag, -1, opts)...)
+ for i := range ed.GetValues() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_valuesTag, elementIndex: i})
+ }
+ for i := range ed.AsEnumDescriptorProto().GetReservedRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedRangeTag, elementIndex: i})
+ }
+ for i := range ed.AsEnumDescriptorProto().GetReservedName() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedNameTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := d.(type) {
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.EnumValueDescriptor:
+ p.printEnumValue(d, mf, w, sourceInfo, childPath, indent)
+ case reservedRange:
+ // collapse reserved ranges into a single "reserved" block
+ ranges := []reservedRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rr := elements.at(elnext).(reservedRange)
+ ranges = append(ranges, rr)
+ addrs = append(addrs, elnext)
+ skip[rr] = true
+ }
+ p.printReservedRanges(ranges, true, addrs, w, sourceInfo, path, indent)
+ case string: // reserved name
+ // collapse reserved names into a single "reserved" block
+ names := []string{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rn := elements.at(elnext).(string)
+ names = append(names, rn)
+ addrs = append(addrs, elnext)
+ skip[rn] = true
+ }
+ p.printReservedNames(names, addrs, w, sourceInfo, path, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printEnumValue(evd *desc.EnumValueDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ nameSi := sourceInfo.Get(append(path, internal.EnumVal_nameTag))
+ p.printElementString(nameSi, w, indent, evd.GetName())
+ fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.EnumVal_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", evd.GetNumber()))
+
+ p.printOptionsShort(evd, evd.GetOptions(), mf, internal.EnumVal_optionsTag, w, sourceInfo, path, indent)
+
+ fmt.Fprint(w, ";")
+ })
+}
+
+func (p *Printer) printService(sd *desc.ServiceDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "service ")
+ nameSi := sourceInfo.Get(append(path, internal.Service_nameTag))
+ p.printElementString(nameSi, w, indent, sd.GetName())
+ fmt.Fprintln(w, "{")
+
+ indent++
+
+ opts, err := p.extractOptions(sd, sd.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: sd, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Service_optionsTag, -1, opts)...)
+ for i := range sd.GetMethods() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Service_methodsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := elements.at(el).(type) {
+ case []option:
+ p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+ case *desc.MethodDescriptor:
+ p.printMethod(d, mf, w, sourceInfo, childPath, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printMethod(mtd *desc.MethodDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ pkg := mtd.GetFile().GetPackage()
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ fmt.Fprint(w, "rpc ")
+ nameSi := sourceInfo.Get(append(path, internal.Method_nameTag))
+ p.printElementString(nameSi, w, indent, mtd.GetName())
+
+ fmt.Fprint(w, "( ")
+ inSi := sourceInfo.Get(append(path, internal.Method_inputTag))
+ inName := p.qualifyName(pkg, pkg, mtd.GetInputType().GetFullyQualifiedName())
+ if mtd.IsClientStreaming() {
+ inName = "stream " + inName
+ }
+ p.printElementString(inSi, w, indent, inName)
+
+ fmt.Fprint(w, ") returns ( ")
+
+ outSi := sourceInfo.Get(append(path, internal.Method_outputTag))
+ outName := p.qualifyName(pkg, pkg, mtd.GetOutputType().GetFullyQualifiedName())
+ if mtd.IsServerStreaming() {
+ outName = "stream " + outName
+ }
+ p.printElementString(outSi, w, indent, outName)
+ fmt.Fprint(w, ") ")
+
+ opts, err := p.extractOptions(mtd, mtd.GetOptions(), mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ if len(opts) > 0 {
+ fmt.Fprintln(w, "{")
+ indent++
+
+ elements := elementAddrs{dsc: mtd, opts: opts}
+ elements.addrs = optionsAsElementAddrs(internal.Method_optionsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+ path = append(path, internal.Method_optionsTag)
+
+ for i, addr := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+ o := elements.at(addr).([]option)
+ p.printOptionsLong(o, w, sourceInfo, path, indent)
+ }
+
+ p.indent(w, indent-1)
+ fmt.Fprintln(w, "}")
+ } else {
+ fmt.Fprint(w, ";")
+ }
+ })
+}
+
+func (p *Printer) printOptionsLong(opts []option, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ p.printOptions(opts, w, indent,
+ func(i int32) *descriptor.SourceCodeInfo_Location {
+ return sourceInfo.Get(append(path, i))
+ },
+ func(w *writer, indent int, opt option) {
+ p.indent(w, indent)
+ fmt.Fprint(w, "option ")
+ p.printOption(opt.name, opt.val, w, indent)
+ fmt.Fprint(w, ";")
+ })
+}
+
+func (p *Printer) printOptionsShort(dsc interface{}, optsMsg proto.Message, mf *dynamic.MessageFactory, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ d, ok := dsc.(desc.Descriptor)
+ if !ok {
+ d = dsc.(extensionRange).owner
+ }
+ opts, err := p.extractOptions(d, optsMsg, mf)
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: dsc, opts: opts}
+ elements.addrs = optionsAsElementAddrs(optsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+ p.printOptionElementsShort(elements, w, sourceInfo, path, indent)
+}
+
+func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ if len(addrs.addrs) == 0 {
+ return
+ }
+ first := true
+ fmt.Fprint(w, "[")
+ for _, addr := range addrs.addrs {
+ opts := addrs.at(addr).([]option)
+ var childPath []int32
+ if addr.elementIndex < 0 {
+ // pseudo-option
+ childPath = append(path, int32(-addr.elementIndex))
+ } else {
+ childPath = append(path, addr.elementType, int32(addr.elementIndex))
+ }
+ p.printOptions(opts, w, inline(indent),
+ func(i int32) *descriptor.SourceCodeInfo_Location {
+ p := childPath
+ if addr.elementIndex >= 0 {
+ p = append(p, i)
+ }
+ return sourceInfo.Get(p)
+ },
+ func(w *writer, indent int, opt option) {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(w, ", ")
+ }
+ p.printOption(opt.name, opt.val, w, indent)
+ fmt.Fprint(w, " ") // trailing space
+ })
+ }
+ fmt.Fprint(w, "]")
+}
+
+func (p *Printer) printOptions(opts []option, w *writer, indent int, siFetch func(i int32) *descriptor.SourceCodeInfo_Location, fn func(w *writer, indent int, opt option)) {
+ for i, opt := range opts {
+ si := siFetch(int32(i))
+ p.printElement(false, si, w, indent, func(w *writer) {
+ fn(w, indent, opt)
+ })
+ }
+}
+
+func inline(indent int) int {
+ if indent < 0 {
+ // already inlined
+ return indent
+ }
+ // negative indent means inline; indent 2 stops further in case value wraps
+ return -indent - 2
+}
+
+func sortKeys(m map[interface{}]interface{}) []interface{} {
+ res := make(sortedKeys, len(m))
+ i := 0
+ for k := range m {
+ res[i] = k
+ i++
+ }
+ sort.Sort(res)
+ return ([]interface{})(res)
+}
+
+type sortedKeys []interface{}
+
+func (k sortedKeys) Len() int {
+ return len(k)
+}
+
+func (k sortedKeys) Swap(i, j int) {
+ k[i], k[j] = k[j], k[i]
+}
+
+func (k sortedKeys) Less(i, j int) bool {
+ switch i := k[i].(type) {
+ case int32:
+ return i < k[j].(int32)
+ case uint32:
+ return i < k[j].(uint32)
+ case int64:
+ return i < k[j].(int64)
+ case uint64:
+ return i < k[j].(uint64)
+ case string:
+ return i < k[j].(string)
+ case bool:
+ return !i && k[j].(bool)
+ default:
+ panic(fmt.Sprintf("invalid type for map key: %T", i))
+ }
+}
+
+func (p *Printer) printOption(name string, optVal interface{}, w *writer, indent int) {
+ fmt.Fprintf(w, "%s = ", name)
+
+ switch optVal := optVal.(type) {
+ case int32, uint32, int64, uint64:
+ fmt.Fprintf(w, "%d", optVal)
+ case float32, float64:
+ fmt.Fprintf(w, "%f", optVal)
+ case string:
+ fmt.Fprintf(w, "%s", quotedString(optVal))
+ case []byte:
+ fmt.Fprintf(w, "%s", quotedString(string(optVal)))
+ case bool:
+ fmt.Fprintf(w, "%v", optVal)
+ case ident:
+ fmt.Fprintf(w, "%s", optVal)
+ case *desc.EnumValueDescriptor:
+ fmt.Fprintf(w, "%s", optVal.GetName())
+ case proto.Message:
+ // TODO: if value is too long, marshal to text format with indentation to
+ // make output prettier (also requires correctly indenting subsequent lines)
+
+ // TODO: alternate approach so we can apply p.ForceFullyQualifiedNames
+ // inside the resulting value?
+
+ fmt.Fprintf(w, "{ %s }", proto.CompactTextString(optVal))
+ default:
+ panic(fmt.Sprintf("unknown type of value %T for field %s", optVal, name))
+ }
+}
+
+type edgeKind int
+
+const (
+ edgeKindOption edgeKind = iota
+ edgeKindFile
+ edgeKindMessage
+ edgeKindField
+ edgeKindOneOf
+ edgeKindExtensionRange
+ edgeKindEnum
+ edgeKindEnumVal
+ edgeKindService
+ edgeKindMethod
+)
+
+// edges in simple state machine for matching options paths
+// whose prefix should be included in source info to handle
+// the way options are printed (which cannot always include
+// the full path from original source)
+var edges = map[edgeKind]map[int32]edgeKind{
+ edgeKindFile: {
+ internal.File_optionsTag: edgeKindOption,
+ internal.File_messagesTag: edgeKindMessage,
+ internal.File_enumsTag: edgeKindEnum,
+ internal.File_extensionsTag: edgeKindField,
+ internal.File_servicesTag: edgeKindService,
+ },
+ edgeKindMessage: {
+ internal.Message_optionsTag: edgeKindOption,
+ internal.Message_fieldsTag: edgeKindField,
+ internal.Message_oneOfsTag: edgeKindOneOf,
+ internal.Message_nestedMessagesTag: edgeKindMessage,
+ internal.Message_enumsTag: edgeKindEnum,
+ internal.Message_extensionsTag: edgeKindField,
+ internal.Message_extensionRangeTag: edgeKindExtensionRange,
+ // TODO: reserved range tag
+ },
+ edgeKindField: {
+ internal.Field_optionsTag: edgeKindOption,
+ },
+ edgeKindOneOf: {
+ internal.OneOf_optionsTag: edgeKindOption,
+ },
+ edgeKindExtensionRange: {
+ internal.ExtensionRange_optionsTag: edgeKindOption,
+ },
+ edgeKindEnum: {
+ internal.Enum_optionsTag: edgeKindOption,
+ internal.Enum_valuesTag: edgeKindEnumVal,
+ },
+ edgeKindEnumVal: {
+ internal.EnumVal_optionsTag: edgeKindOption,
+ },
+ edgeKindService: {
+ internal.Service_optionsTag: edgeKindOption,
+ internal.Service_methodsTag: edgeKindMethod,
+ },
+ edgeKindMethod: {
+ internal.Method_optionsTag: edgeKindOption,
+ },
+}
+
+func extendOptionLocations(sc internal.SourceInfoMap, locs []*descriptor.SourceCodeInfo_Location) {
+ // we iterate in the order that locations appear in descriptor
+ // for determinism (if we ranged over the map, order and thus
+ // potentially results are non-deterministic)
+ for _, loc := range locs {
+ allowed := edges[edgeKindFile]
+ for i := 0; i+1 < len(loc.Path); i += 2 {
+ nextKind, ok := allowed[loc.Path[i]]
+ if !ok {
+ break
+ }
+ if nextKind == edgeKindOption {
+ // We've found an option entry. This could be arbitrarily
+ // deep (for options that nested messages) or it could end
+ // abruptly (for non-repeated fields). But we need a path
+ // that is exactly the path-so-far plus two: the option tag
+ // and an optional index for repeated option fields (zero
+ // for non-repeated option fields). This is used for
+ // querying source info when printing options.
+ // for sorting elements
+ newPath := make([]int32, i+3)
+ copy(newPath, loc.Path)
+ sc.PutIfAbsent(newPath, loc)
+ // we do another path of path-so-far plus two, but with
+ // explicit zero index -- just in case this actual path has
+ // an extra path element, but it's not an index (e.g the
+ // option field is not repeated, but the source info we are
+ // looking at indicates a tag of a nested field)
+ newPath[len(newPath)-1] = 0
+ sc.PutIfAbsent(newPath, loc)
+ // finally, we need the path-so-far plus one, just the option
+ // tag, for sorting option groups
+ newPath = newPath[:len(newPath)-1]
+ sc.PutIfAbsent(newPath, loc)
+
+ break
+ } else {
+ allowed = edges[nextKind]
+ }
+ }
+ }
+}
+
+func (p *Printer) extractOptions(dsc desc.Descriptor, opts proto.Message, mf *dynamic.MessageFactory) (map[int32][]option, error) {
+ md, err := desc.LoadMessageDescriptorForMessage(opts)
+ if err != nil {
+ return nil, err
+ }
+ dm := mf.NewDynamicMessage(md)
+ if err = dm.ConvertFrom(opts); err != nil {
+ return nil, fmt.Errorf("failed convert %s to dynamic message: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ pkg := dsc.GetFile().GetPackage()
+ var scope string
+ if _, ok := dsc.(*desc.FileDescriptor); ok {
+ scope = pkg
+ } else {
+ scope = dsc.GetFullyQualifiedName()
+ }
+
+ options := map[int32][]option{}
+ var uninterpreted []interface{}
+ for _, fldset := range [][]*desc.FieldDescriptor{md.GetFields(), mf.GetExtensionRegistry().AllExtensionsForType(md.GetFullyQualifiedName())} {
+ for _, fld := range fldset {
+ if dm.HasField(fld) {
+ val := dm.GetField(fld)
+ var opts []option
+ var name string
+ if fld.IsExtension() {
+ name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName()))
+ } else {
+ name = fld.GetName()
+ }
+ switch val := val.(type) {
+ case []interface{}:
+ if fld.GetNumber() == internal.UninterpretedOptionsTag {
+ // we handle uninterpreted options differently
+ uninterpreted = val
+ continue
+ }
+
+ for _, e := range val {
+ if fld.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ ev := fld.GetEnumType().FindValueByNumber(e.(int32))
+ if ev == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ e = ev
+ }
+ var name string
+ if fld.IsExtension() {
+ name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName()))
+ } else {
+ name = fld.GetName()
+ }
+ opts = append(opts, option{name: name, val: e})
+ }
+ case map[interface{}]interface{}:
+ for k := range sortKeys(val) {
+ v := val[k]
+ vf := fld.GetMapValueType()
+ if vf.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ ev := vf.GetEnumType().FindValueByNumber(v.(int32))
+ if ev == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ v = ev
+ }
+ entry := mf.NewDynamicMessage(fld.GetMessageType())
+ entry.SetFieldByNumber(1, k)
+ entry.SetFieldByNumber(2, v)
+ opts = append(opts, option{name: name, val: entry})
+ }
+ default:
+ if fld.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ ev := fld.GetEnumType().FindValueByNumber(val.(int32))
+ if ev == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ val = ev
+ }
+ opts = append(opts, option{name: name, val: val})
+ }
+ if len(opts) > 0 {
+ options[fld.GetNumber()] = opts
+ }
+ }
+ }
+ }
+
+ // if there are uninterpreted options, add those too
+ if len(uninterpreted) > 0 {
+ opts := make([]option, len(uninterpreted))
+ for i, u := range uninterpreted {
+ var unint *descriptor.UninterpretedOption
+ if un, ok := u.(*descriptor.UninterpretedOption); ok {
+ unint = un
+ } else {
+ dm := u.(*dynamic.Message)
+ unint = &descriptor.UninterpretedOption{}
+ if err := dm.ConvertTo(unint); err != nil {
+ return nil, err
+ }
+ }
+
+ var buf bytes.Buffer
+ for ni, n := range unint.Name {
+ if ni > 0 {
+ buf.WriteByte('.')
+ }
+ if n.GetIsExtension() {
+ fmt.Fprintf(&buf, "(%s)", n.GetNamePart())
+ } else {
+ buf.WriteString(n.GetNamePart())
+ }
+ }
+
+ var v interface{}
+ switch {
+ case unint.IdentifierValue != nil:
+ v = ident(unint.GetIdentifierValue())
+ case unint.StringValue != nil:
+ v = string(unint.GetStringValue())
+ case unint.DoubleValue != nil:
+ v = unint.GetDoubleValue()
+ case unint.PositiveIntValue != nil:
+ v = unint.GetPositiveIntValue()
+ case unint.NegativeIntValue != nil:
+ v = unint.GetNegativeIntValue()
+ case unint.AggregateValue != nil:
+ v = ident(unint.GetAggregateValue())
+ }
+
+ opts[i] = option{name: buf.String(), val: v}
+ }
+ options[internal.UninterpretedOptionsTag] = opts
+ }
+
+ return options, nil
+}
+
+func optionsAsElementAddrs(optionsTag int32, order int, opts map[int32][]option) []elementAddr {
+ var optAddrs []elementAddr
+ for tag := range opts {
+ optAddrs = append(optAddrs, elementAddr{elementType: optionsTag, elementIndex: int(tag), order: order})
+ }
+ sort.Sort(optionsByName{addrs: optAddrs, opts: opts})
+ return optAddrs
+}
+
+// quotedString implements the text format for string literals for protocol
+// buffers. This form is also acceptable for string literals in option values
+// by the protocol buffer compiler, protoc.
+func quotedString(s string) string {
+ var b bytes.Buffer
+ // use WriteByte here to get any needed indent
+ b.WriteByte('"')
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ b.WriteString("\\n")
+ case '\r':
+ b.WriteString("\\r")
+ case '\t':
+ b.WriteString("\\t")
+ case '"':
+ b.WriteString("\\")
+ case '\\':
+ b.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c < 0x7f {
+ b.WriteByte(c)
+ } else {
+ fmt.Fprintf(&b, "\\%03o", c)
+ }
+ }
+ }
+ b.WriteByte('"')
+
+ return b.String()
+}
+
+type elementAddr struct {
+ elementType int32
+ elementIndex int
+ order int
+}
+
+type elementAddrs struct {
+ addrs []elementAddr
+ dsc interface{}
+ opts map[int32][]option
+}
+
+func (a elementAddrs) Len() int {
+ return len(a.addrs)
+}
+
+func (a elementAddrs) Less(i, j int) bool {
+ // explicit order is considered first
+ if a.addrs[i].order < a.addrs[j].order {
+ return true
+ } else if a.addrs[i].order > a.addrs[j].order {
+ return false
+ }
+ // if order is equal, sort by element type
+ if a.addrs[i].elementType < a.addrs[j].elementType {
+ return true
+ } else if a.addrs[i].elementType > a.addrs[j].elementType {
+ return false
+ }
+
+ di := a.at(a.addrs[i])
+ dj := a.at(a.addrs[j])
+
+ switch vi := di.(type) {
+ case *desc.FieldDescriptor:
+ // fields are ordered by tag number
+ vj := dj.(*desc.FieldDescriptor)
+ // regular fields before extensions; extensions grouped by extendee
+ if !vi.IsExtension() && vj.IsExtension() {
+ return true
+ } else if vi.IsExtension() && !vj.IsExtension() {
+ return false
+ } else if vi.IsExtension() && vj.IsExtension() {
+ if vi.GetOwner() != vj.GetOwner() {
+ return vi.GetOwner().GetFullyQualifiedName() < vj.GetOwner().GetFullyQualifiedName()
+ }
+ }
+ return vi.GetNumber() < vj.GetNumber()
+
+ case *desc.EnumValueDescriptor:
+ // enum values ordered by number then name
+ vj := dj.(*desc.EnumValueDescriptor)
+ if vi.GetNumber() == vj.GetNumber() {
+ return vi.GetName() < vj.GetName()
+ }
+ return vi.GetNumber() < vj.GetNumber()
+
+ case *descriptor.DescriptorProto_ExtensionRange:
+ // extension ranges ordered by tag
+ return vi.GetStart() < dj.(*descriptor.DescriptorProto_ExtensionRange).GetStart()
+
+ case reservedRange:
+ // reserved ranges ordered by tag, too
+ return vi.start < dj.(reservedRange).start
+
+ case string:
+ // reserved names lexically sorted
+ return vi < dj.(string)
+
+ case pkg:
+ // reserved names lexically sorted
+ return vi < dj.(pkg)
+
+ case imp:
+ // reserved names lexically sorted
+ return vi < dj.(imp)
+
+ case []option:
+ // options sorted by name, extensions last
+ return optionLess(vi, dj.([]option))
+
+ default:
+ // all other descriptors ordered by name
+ return di.(desc.Descriptor).GetName() < dj.(desc.Descriptor).GetName()
+ }
+}
+
+func (a elementAddrs) Swap(i, j int) {
+ a.addrs[i], a.addrs[j] = a.addrs[j], a.addrs[i]
+}
+
+func (a elementAddrs) at(addr elementAddr) interface{} {
+ switch dsc := a.dsc.(type) {
+ case *desc.FileDescriptor:
+ switch addr.elementType {
+ case internal.File_packageTag:
+ return pkg(dsc.GetPackage())
+ case internal.File_dependencyTag:
+ return imp(dsc.AsFileDescriptorProto().GetDependency()[addr.elementIndex])
+ case internal.File_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.File_messagesTag:
+ return dsc.GetMessageTypes()[addr.elementIndex]
+ case internal.File_enumsTag:
+ return dsc.GetEnumTypes()[addr.elementIndex]
+ case internal.File_servicesTag:
+ return dsc.GetServices()[addr.elementIndex]
+ case internal.File_extensionsTag:
+ return dsc.GetExtensions()[addr.elementIndex]
+ }
+ case *desc.MessageDescriptor:
+ switch addr.elementType {
+ case internal.Message_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Message_fieldsTag:
+ return dsc.GetFields()[addr.elementIndex]
+ case internal.Message_nestedMessagesTag:
+ return dsc.GetNestedMessageTypes()[addr.elementIndex]
+ case internal.Message_enumsTag:
+ return dsc.GetNestedEnumTypes()[addr.elementIndex]
+ case internal.Message_extensionsTag:
+ return dsc.GetNestedExtensions()[addr.elementIndex]
+ case internal.Message_extensionRangeTag:
+ return dsc.AsDescriptorProto().GetExtensionRange()[addr.elementIndex]
+ case internal.Message_reservedRangeTag:
+ rng := dsc.AsDescriptorProto().GetReservedRange()[addr.elementIndex]
+ return reservedRange{start: rng.GetStart(), end: rng.GetEnd() - 1}
+ case internal.Message_reservedNameTag:
+ return dsc.AsDescriptorProto().GetReservedName()[addr.elementIndex]
+ }
+ case *desc.FieldDescriptor:
+ if addr.elementType == internal.Field_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case *desc.OneOfDescriptor:
+ switch addr.elementType {
+ case internal.OneOf_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case -internal.Message_fieldsTag:
+ return dsc.GetOwner().GetFields()[addr.elementIndex]
+ }
+ case *desc.EnumDescriptor:
+ switch addr.elementType {
+ case internal.Enum_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Enum_valuesTag:
+ return dsc.GetValues()[addr.elementIndex]
+ case internal.Enum_reservedRangeTag:
+ rng := dsc.AsEnumDescriptorProto().GetReservedRange()[addr.elementIndex]
+ return reservedRange{start: rng.GetStart(), end: rng.GetEnd()}
+ case internal.Enum_reservedNameTag:
+ return dsc.AsEnumDescriptorProto().GetReservedName()[addr.elementIndex]
+ }
+ case *desc.EnumValueDescriptor:
+ if addr.elementType == internal.EnumVal_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case *desc.ServiceDescriptor:
+ switch addr.elementType {
+ case internal.Service_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Service_methodsTag:
+ return dsc.GetMethods()[addr.elementIndex]
+ }
+ case *desc.MethodDescriptor:
+ if addr.elementType == internal.Method_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case extensionRange:
+ if addr.elementType == internal.ExtensionRange_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ }
+
+ panic(fmt.Sprintf("location for unknown field %d of %T", addr.elementType, a.dsc))
+}
+
+type extensionRange struct {
+ owner *desc.MessageDescriptor
+ extRange *descriptor.DescriptorProto_ExtensionRange
+}
+
+type elementSrcOrder struct {
+ elementAddrs
+ sourceInfo internal.SourceInfoMap
+ prefix []int32
+}
+
+func (a elementSrcOrder) Less(i, j int) bool {
+ ti := a.addrs[i].elementType
+ ei := a.addrs[i].elementIndex
+
+ tj := a.addrs[j].elementType
+ ej := a.addrs[j].elementIndex
+
+ var si, sj *descriptor.SourceCodeInfo_Location
+ if ei < 0 {
+ si = a.sourceInfo.Get(append(a.prefix, -int32(ei)))
+ } else if ti < 0 {
+ p := make([]int32, len(a.prefix)-2)
+ copy(p, a.prefix)
+ si = a.sourceInfo.Get(append(p, ti, int32(ei)))
+ } else {
+ si = a.sourceInfo.Get(append(a.prefix, ti, int32(ei)))
+ }
+ if ej < 0 {
+ sj = a.sourceInfo.Get(append(a.prefix, -int32(ej)))
+ } else if tj < 0 {
+ p := make([]int32, len(a.prefix)-2)
+ copy(p, a.prefix)
+ sj = a.sourceInfo.Get(append(p, tj, int32(ej)))
+ } else {
+ sj = a.sourceInfo.Get(append(a.prefix, tj, int32(ej)))
+ }
+
+ if (si == nil) != (sj == nil) {
+ // generally, we put unknown elements after known ones;
+ // except package, imports, and option elements go first
+
+ // i will be unknown and j will be known
+ swapped := false
+ if si != nil {
+ si, sj = sj, si
+ ti, tj = tj, ti
+ swapped = true
+ }
+ switch a.dsc.(type) {
+ case *desc.FileDescriptor:
+ // NB: These comparisons are *trying* to get things ordered so that
+ // 1) If the package element has no source info, it appears _first_.
+ // 2) If any import element has no source info, it appears _after_
+ // the package element but _before_ any other element.
+ // 3) If any option element has no source info, it appears _after_
+ // the package and import elements but _before_ any other element.
+ // If the package, imports, and options are all missing source info,
+ // this will sort them all to the top in expected order. But if they
+ // are mixed (some _do_ have source info, some do not), and elements
+ // with source info have spans that positions them _after_ other
+ // elements in the file, then this Less function will be unstable
+ // since the above dual objectives for imports and options ("before
+ // this but after that") may be in conflict with one another. This
+ // should not cause any problems, other than elements being possibly
+ // sorted in a confusing order.
+ //
+ // Well-formed descriptors should instead have consistent source
+ // info: either all elements have source info or none do. So this
+ // should not be an issue in practice.
+ if ti == internal.File_packageTag {
+ return !swapped
+ }
+ if ti == internal.File_dependencyTag {
+ if tj == internal.File_packageTag {
+ // imports will come *after* package
+ return swapped
+ }
+ return !swapped
+ }
+ if ti == internal.File_optionsTag {
+ if tj == internal.File_packageTag || tj == internal.File_dependencyTag {
+ // options will come *after* package and imports
+ return swapped
+ }
+ return !swapped
+ }
+ case *desc.MessageDescriptor:
+ if ti == internal.Message_optionsTag {
+ return !swapped
+ }
+ case *desc.EnumDescriptor:
+ if ti == internal.Enum_optionsTag {
+ return !swapped
+ }
+ case *desc.ServiceDescriptor:
+ if ti == internal.Service_optionsTag {
+ return !swapped
+ }
+ }
+ return swapped
+
+ } else if si == nil || sj == nil {
+ // let stable sort keep unknown elements in same relative order
+ return false
+ }
+
+ for idx := 0; idx < len(sj.Span); idx++ {
+ if idx >= len(si.Span) {
+ return true
+ }
+ if si.Span[idx] < sj.Span[idx] {
+ return true
+ }
+ if si.Span[idx] > sj.Span[idx] {
+ return false
+ }
+ }
+ return false
+}
+
+type optionsByName struct {
+ addrs []elementAddr
+ opts map[int32][]option
+}
+
+func (o optionsByName) Len() int {
+ return len(o.addrs)
+}
+
+func (o optionsByName) Less(i, j int) bool {
+ oi := o.opts[int32(o.addrs[i].elementIndex)]
+ oj := o.opts[int32(o.addrs[j].elementIndex)]
+ return optionLess(oi, oj)
+}
+
+func optionLess(i, j []option) bool {
+ ni := i[0].name
+ nj := j[0].name
+ if ni[0] != '(' && nj[0] == '(' {
+ return true
+ } else if ni[0] == '(' && nj[0] != '(' {
+ return false
+ }
+ return ni < nj
+}
+
+func (o optionsByName) Swap(i, j int) {
+ o.addrs[i], o.addrs[j] = o.addrs[j], o.addrs[i]
+}
+
+func (p *Printer) printElement(isDecriptor bool, si *descriptor.SourceCodeInfo_Location, w *writer, indent int, el func(*writer)) {
+ includeComments := isDecriptor || p.includeCommentType(CommentsTokens)
+
+ if includeComments && si != nil {
+ p.printLeadingComments(si, w, indent)
+ }
+ el(w)
+ if includeComments && si != nil {
+ p.printTrailingComments(si, w, indent)
+ }
+ if indent >= 0 && !w.newline {
+ // if we're not printing inline but element did not have trailing newline, add one now
+ fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printElementString(si *descriptor.SourceCodeInfo_Location, w *writer, indent int, str string) {
+ p.printElement(false, si, w, inline(indent), func(w *writer) {
+ fmt.Fprintf(w, "%s ", str)
+ })
+}
+
+func (p *Printer) includeCommentType(c CommentType) bool {
+ return (p.OmitComments & c) == 0
+}
+
+func (p *Printer) printLeadingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) bool {
+ endsInNewLine := false
+
+ if p.includeCommentType(CommentsDetached) {
+ for _, c := range si.GetLeadingDetachedComments() {
+ if p.printComment(c, w, indent, true) {
+ // if comment ended in newline, add another newline to separate
+ // this comment from the next
+ p.newLine(w)
+ endsInNewLine = true
+ } else if indent < 0 {
+ // comment did not end in newline and we are trying to inline?
+ // just add a space to separate this comment from what follows
+ fmt.Fprint(w, " ")
+ endsInNewLine = false
+ } else {
+ // comment did not end in newline and we are *not* trying to inline?
+ // add newline to end of comment and add another to separate this
+ // comment from what follows
+ fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ p.newLine(w)
+ endsInNewLine = true
+ }
+ }
+ }
+
+ if p.includeCommentType(CommentsLeading) && si.GetLeadingComments() != "" {
+ endsInNewLine = p.printComment(si.GetLeadingComments(), w, indent, true)
+ if !endsInNewLine {
+ if indent >= 0 {
+ // leading comment didn't end with newline but needs one
+ // (because we're *not* inlining)
+ fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ endsInNewLine = true
+ } else {
+ // space between comment and following element when inlined
+ fmt.Fprint(w, " ")
+ }
+ }
+ }
+
+ return endsInNewLine
+}
+
+func (p *Printer) printTrailingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) {
+ if p.includeCommentType(CommentsTrailing) && si.GetTrailingComments() != "" {
+ if !p.printComment(si.GetTrailingComments(), w, indent, p.TrailingCommentsOnSeparateLine) && indent >= 0 {
+ // trailing comment didn't end with newline but needs one
+ // (because we're *not* inlining)
+ fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ } else if indent < 0 {
+ fmt.Fprint(w, " ")
+ }
+ }
+}
+
+func (p *Printer) printComment(comments string, w *writer, indent int, forceNextLine bool) bool {
+ if comments == "" {
+ return false
+ }
+
+ var multiLine bool
+ if indent < 0 {
+ // use multi-line style when inlining
+ multiLine = true
+ } else {
+ multiLine = p.PreferMultiLineStyleComments
+ }
+ if multiLine && strings.Contains(comments, "*/") {
+ // can't emit '*/' in a multi-line style comment
+ multiLine = false
+ }
+
+ lines := strings.Split(comments, "\n")
+
+ // first, remove leading and trailing blank lines
+ if lines[0] == "" {
+ lines = lines[1:]
+ }
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ if len(lines) == 0 {
+ return false
+ }
+
+ if indent >= 0 && !w.newline {
+ // last element did not have trailing newline, so we
+ // either need to tack on newline or, if comment is
+ // just one line, inline it on the end
+ if forceNextLine || len(lines) > 1 {
+ fmt.Fprintln(w)
+ } else {
+ if !w.space {
+ fmt.Fprint(w, " ")
+ }
+ indent = inline(indent)
+ }
+ }
+
+ if len(lines) == 1 && multiLine {
+ p.indent(w, indent)
+ line := lines[0]
+ if line[0] == ' ' && line[len(line)-1] != ' ' {
+ // add trailing space for symmetry
+ line += " "
+ }
+ fmt.Fprintf(w, "/*%s*/", line)
+ if indent >= 0 {
+ fmt.Fprintln(w)
+ return true
+ }
+ return false
+ }
+
+ if multiLine {
+ // multi-line style comments that actually span multiple lines
+ // get a blank line before and after so that comment renders nicely
+ lines = append(lines, "", "")
+ copy(lines[1:], lines)
+ lines[0] = ""
+ }
+
+ for i, l := range lines {
+ p.maybeIndent(w, indent, i > 0)
+ if multiLine {
+ if i == 0 {
+ // first line
+ fmt.Fprintf(w, "/*%s\n", strings.TrimRight(l, " \t"))
+ } else if i == len(lines)-1 {
+ // last line
+ if l == "" {
+ fmt.Fprint(w, " */")
+ } else {
+ fmt.Fprintf(w, " *%s*/", l)
+ }
+ if indent >= 0 {
+ fmt.Fprintln(w)
+ }
+ } else {
+ fmt.Fprintf(w, " *%s\n", strings.TrimRight(l, " \t"))
+ }
+ } else {
+ fmt.Fprintf(w, "//%s\n", strings.TrimRight(l, " \t"))
+ }
+ }
+
+ // single-line comments always end in newline; multi-line comments only
+ // end in newline for non-negative (e.g. non-inlined) indentation
+ return !multiLine || indent >= 0
+}
+
+func (p *Printer) indent(w io.Writer, indent int) {
+ for i := 0; i < indent; i++ {
+ fmt.Fprint(w, p.Indent)
+ }
+}
+
+func (p *Printer) maybeIndent(w io.Writer, indent int, requireIndent bool) {
+ if indent < 0 && requireIndent {
+ p.indent(w, -indent)
+ } else {
+ p.indent(w, indent)
+ }
+}
+
+type writer struct {
+ io.Writer
+ err error
+ space bool
+ newline bool
+}
+
+func newWriter(w io.Writer) *writer {
+ return &writer{Writer: w, newline: true}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ w.newline = false
+
+ if w.space {
+ // skip any trailing space if the following
+ // character is semicolon, comma, or close bracket
+ if p[0] != ';' && p[0] != ',' && p[0] != ']' {
+ _, err := w.Writer.Write([]byte{' '})
+ if err != nil {
+ w.err = err
+ return 0, err
+ }
+ }
+ w.space = false
+ }
+
+ if p[len(p)-1] == ' ' {
+ w.space = true
+ p = p[:len(p)-1]
+ }
+ if len(p) > 0 && p[len(p)-1] == '\n' {
+ w.newline = true
+ }
+
+ num, err := w.Writer.Write(p)
+ if err != nil {
+ w.err = err
+ } else if w.space {
+ // pretend space was written
+ num++
+ }
+ return num, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
new file mode 100644
index 0000000..91fd672
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -0,0 +1,185 @@
+package dynamic
+
+// Binary serialization and de-serialization for dynamic messages
+
+import (
+ "fmt"
+ "github.com/golang/protobuf/proto"
+ "github.com/jhump/protoreflect/codec"
+ "io"
+)
+
+// defaultDeterminism, if true, will mean that calls to Marshal will produce
+// deterministic output. This is used to make the output of proto.Marshal(...)
+// deterministic (since there is no way to have that convey determinism intent).
+// **This is only used from tests.**
+var defaultDeterminism = false
+
+// Marshal serializes this message to bytes, returning an error if the operation
+// fails. The resulting bytes are in the standard protocol buffer binary format.
+func (m *Message) Marshal() ([]byte, error) {
+ var b codec.Buffer
+ b.SetDeterministic(defaultDeterminism)
+ if err := m.marshal(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a
+// new byte slice to marshal into, it uses the provided byte slice. The backing array
+// for the returned byte slice *may* be the same as the one that was passed in, but
+// it's not guaranteed as a new backing array will automatically be allocated if
+// more bytes need to be written than the provided buffer has capacity for.
+func (m *Message) MarshalAppend(b []byte) ([]byte, error) {
+ codedBuf := codec.NewBuffer(b)
+ codedBuf.SetDeterministic(defaultDeterminism)
+ if err := m.marshal(codedBuf); err != nil {
+ return nil, err
+ }
+ return codedBuf.Bytes(), nil
+}
+
+// MarshalDeterministic serializes this message to bytes in a deterministic way,
+// returning an error if the operation fails. This differs from Marshal in that
+// map keys will be sorted before serializing to bytes. The protobuf spec does
+// not define ordering for map entries, so Marshal will use standard Go map
+// iteration order (which will be random). But for cases where determinism is
+// more important than performance, use this method instead.
+func (m *Message) MarshalDeterministic() ([]byte, error) {
+ var b codec.Buffer
+ b.SetDeterministic(true)
+ if err := m.marshal(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalAppendDeterministic behaves exactly the same as MarshalDeterministic,
+// except instead of allocating a new byte slice to marshal into, it uses the
+// provided byte slice. The backing array for the returned byte slice *may* be
+// the same as the one that was passed in, but it's not guaranteed as a new
+// backing array will automatically be allocated if more bytes need to be written
+// than the provided buffer has capacity for.
+func (m *Message) MarshalAppendDeterministic(b []byte) ([]byte, error) {
+ codedBuf := codec.NewBuffer(b)
+ codedBuf.SetDeterministic(true)
+ if err := m.marshal(codedBuf); err != nil {
+ return nil, err
+ }
+ return codedBuf.Bytes(), nil
+}
+
+func (m *Message) marshal(b *codec.Buffer) error {
+ if err := m.marshalKnownFields(b); err != nil {
+ return err
+ }
+ return m.marshalUnknownFields(b)
+}
+
+func (m *Message) marshalKnownFields(b *codec.Buffer) error {
+ for _, tag := range m.knownFieldTags() {
+ itag := int32(tag)
+ val := m.values[itag]
+ fd := m.FindFieldDescriptor(itag)
+ if fd == nil {
+ panic(fmt.Sprintf("Couldn't find field for tag %d", itag))
+ }
+ if err := b.EncodeFieldValue(fd, val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *Message) marshalUnknownFields(b *codec.Buffer) error {
+ for _, tag := range m.unknownFieldTags() {
+ itag := int32(tag)
+ sl := m.unknownFields[itag]
+ for _, u := range sl {
+ if err := b.EncodeTagAndWireType(itag, u.Encoding); err != nil {
+ return err
+ }
+ switch u.Encoding {
+ case proto.WireBytes:
+ if err := b.EncodeRawBytes(u.Contents); err != nil {
+ return err
+ }
+ case proto.WireStartGroup:
+ _, _ = b.Write(u.Contents)
+ if err := b.EncodeTagAndWireType(itag, proto.WireEndGroup); err != nil {
+ return err
+ }
+ case proto.WireFixed32:
+ if err := b.EncodeFixed32(u.Value); err != nil {
+ return err
+ }
+ case proto.WireFixed64:
+ if err := b.EncodeFixed64(u.Value); err != nil {
+ return err
+ }
+ case proto.WireVarint:
+ if err := b.EncodeVarint(u.Value); err != nil {
+ return err
+ }
+ default:
+ return codec.ErrBadWireType
+ }
+ }
+ }
+ return nil
+}
+
+// Unmarshal de-serializes the message that is present in the given bytes into
+// this message. It first resets the current message. It returns an error if the
+// given bytes do not contain a valid encoding of this message type.
+func (m *Message) Unmarshal(b []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMerge(b); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMerge de-serializes the message that is present in the given bytes
+// into this message. Unlike Unmarshal, it does not first reset the message,
+// instead merging the data in the given bytes into the existing data in this
+// message.
+func (m *Message) UnmarshalMerge(b []byte) error {
+ return m.unmarshal(codec.NewBuffer(b), false)
+}
+
+func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+ for !buf.EOF() {
+ fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
+ if err != nil {
+ if err == codec.ErrWireTypeEndGroup {
+ if isGroup {
+ // finished parsing group
+ return nil
+ }
+ return codec.ErrBadWireType
+ }
+ return err
+ }
+
+ if fd == nil {
+ if m.unknownFields == nil {
+ m.unknownFields = map[int32][]UnknownField{}
+ }
+ uv := val.(codec.UnknownField)
+ u := UnknownField{
+ Encoding: uv.Encoding,
+ Value: uv.Value,
+ Contents: uv.Contents,
+ }
+ m.unknownFields[uv.Tag] = append(m.unknownFields[uv.Tag], u)
+ } else if err := mergeField(m, fd, val); err != nil {
+ return err
+ }
+ }
+ if isGroup {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
new file mode 100644
index 0000000..c329fcd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -0,0 +1,163 @@
+// Package dynamic provides an implementation for a dynamic protobuf message.
+//
+// The dynamic message is essentially a message descriptor along with a map of
+// tag numbers to values. It has a broad API for interacting with the message,
+// including inspection and modification. Generally, most operations have two
+// forms: a regular method that panics on bad input or error and a "Try" form
+// of the method that will instead return an error.
+//
+// A dynamic message can optionally be constructed with a MessageFactory. The
+// MessageFactory has various registries that may be used by the dynamic message,
+// such as during de-serialization. The message factory is "inherited" by any
+// other dynamic messages created, such as nested messages that are created
+// during de-serialization. Similarly, any dynamic message created using
+// MessageFactory.NewMessage will be associated with that factory, which in turn
+// will be used to create other messages or parse extension fields during
+// de-serialization.
+//
+//
+// Field Types
+//
+// The types of values expected by setters and returned by getters are the
+// same as protoc generates for scalar fields. For repeated fields, there are
+// methods for getting and setting values at a particular index or for adding
+// an element. Similarly, for map fields, there are methods for getting and
+// setting values for a particular key.
+//
+// If you use GetField for a repeated field, it will return a copy of all
+// elements as a slice []interface{}. Similarly, using GetField for a map field
+// will return a copy of all mappings as a map[interface{}]interface{}. You can
+// also use SetField to supply an entire slice or map for repeated or map fields.
+// The slice need not be []interface{} but can actually be typed according to
+// the field's expected type. For example, a repeated uint64 field can be set
+// using a slice of type []uint64.
+//
+// Descriptors for map fields describe them as repeated fields with a nested
+// message type. The nested message type is a special generated type that
+// represents a single mapping: key and value pair. The dynamic message has some
+// special affordances for this representation. For example, you can use
+// SetField to set a map field using a slice of these entry messages. Internally,
+// the slice of entries will be converted to an actual map. Similarly, you can
+// use AddRepeatedField with an entry message to add (or overwrite) a mapping.
+// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps,
+// since those take numeric index arguments which are not relevant to maps
+// (since maps in Go have no defined ordering).
+//
+// When setting field values in dynamic messages, the type-checking is lenient
+// in that it accepts any named type with the right kind. So a string field can
+// be assigned to any type that is defined as a string. Enum fields require
+// int32 values (or any type that is defined as an int32).
+//
+// Unlike normal use of numeric values in Go, values will be automatically
+// widened when assigned. So, for example, an int64 field can be set using an
+// int32 value since it can be safely widened without truncation or loss of
+// precision. Similar goes for uint32 values being converted to uint64 and
+// float32 being converted to float64. Narrowing conversions are not done,
+// however. Also, unsigned values will never be automatically converted to
+// signed (and vice versa), and floating point values will never be
+// automatically converted to integral values (and vice versa). Since the bit
+// width of int and uint fields is allowed to be platform dependent, but will
+// always be less than or equal to 64, they can only be used as values for
+// int64 and uint64 fields, respectively. They cannot be used to set int32 or
+// uint32 fields, which includes enums fields.
+//
+// Fields whose type is a nested message can have values set to either other
+// dynamic messages or generated messages (e.g. pointers to structs generated by
+// protoc). Getting a value for such a field will return the actual type it is
+// set to (e.g. either a dynamic message or a generated message). If the value
+// is not set and the message uses proto2 syntax, the default message returned
+// will be whatever is returned by the dynamic message's MessageFactory (if the
+// dynamic message was not created with a factory, it will use the logic of the
+// zero value factory). In most typical cases, it will return a dynamic message,
+// but if the factory is configured with a KnownTypeRegistry, or if the field's
+// type is a well-known type, it will return a zero value generated message.
+//
+//
+// Unrecognized Fields
+//
+// Unrecognized fields are preserved by the dynamic message when unmarshaling
+// from the standard binary format. If the message's MessageFactory was
+// configured with an ExtensionRegistry, it will be used to identify and parse
+// extension fields for the message.
+//
+// Unrecognized fields can dynamically become recognized fields if the
+// application attempts to retrieve an unrecognized field's value using a
+// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the
+// unknown field and move the parsed value into the message's set of known
+// fields. This behavior is most suited to the use of extensions, where an
+// ExtensionRegistry is not setup with all known extensions ahead of time. But
+// it can even happen for non-extension fields! Here's an example scenario where
+// a non-extension field can initially be unknown and become known:
+//
+// 1. A dynamic message is created with a descriptor, A, and then
+// de-serialized from a stream of bytes. The stream includes an
+// unrecognized tag T. The message will include tag T in its unrecognized
+// field set.
+// 2. Another call site retrieves a newer descriptor, A', which includes a
+// newly added field with tag T.
+// 3. That other call site then uses a FieldDescriptor to access the value of
+// the new field. This will cause the dynamic message to parse the bytes
+// for the unknown tag T and store them as a known field.
+// 4. Subsequent operations for tag T, including setting the field using only
+// tag number or de-serializing a stream that includes tag T, will operate
+// as if that tag were part of the original descriptor, A.
+//
+//
+// Compatibility
+//
+// In addition to implementing the proto.Message interface, the included
+// Message type also provides an XXX_MessageName() method, so it can work with
+// proto.MessageName. And it provides a Descriptor() method that behaves just
+// like the method of the same signature in messages generated by protoc.
+// Because of this, it is actually compatible with proto.Message in many (though
+// not all) contexts. In particular, it is compatible with proto.Marshal and
+// proto.Unmarshal for serializing and de-serializing messages.
+//
+// The dynamic message supports binary and text marshaling, using protobuf's
+// well-defined binary format and the same text format that protoc-generated
+// types use. It also supports JSON serialization/de-serialization by
+// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic
+// messages can safely be used with the jsonpb package for JSON serialization
+// and de-serialization.
+//
+// In addition to implementing the proto.Message interface and numerous related
+// methods, it also provides inter-op with generated messages via conversion.
+// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message
+// contents from a dynamic message to a generated message and vice versa.
+//
+// When copying from a generated message into a dynamic message, if the
+// generated message contains fields unknown to the dynamic message (e.g. not
+// present in the descriptor used to create the dynamic message), these fields
+// become known to the dynamic message (as per behavior described above in
+// "Unrecognized Fields"). If the generated message has unrecognized fields of
+// its own, including unrecognized extensions, they are preserved in the dynamic
+// message. It is possible that the dynamic message knows about fields that the
+// generated message did not, like if it has a different version of the
+// descriptor or its MessageFactory has an ExtensionRegistry that knows about
+// different extensions than were linked into the program. In this case, these
+// unrecognized fields in the generated message will be known fields in the
+// dynamic message.
+//
+// Similarly, when copying from a dynamic message into a generated message, if
+// the dynamic message has unrecognized fields they can be preserved in the
+// generated message (currently only for syntax proto2 since proto3 generated
+// messages do not preserve unrecognized fields). If the generated message knows
+// about fields that the dynamic message does not, these unrecognized fields may
+// become known fields in the generated message.
+//
+//
+// Registries
+//
+// This package also contains a couple of registries, for managing known types
+// and descriptors.
+//
+// The KnownTypeRegistry allows de-serialization of a dynamic message to use
+// generated message types, instead of dynamic messages, for some kinds of
+// nested message fields. This is particularly useful for working with proto
+// messages that have special encodings as JSON (e.g. the well-known types),
+// since the dynamic message does not try to handle these special cases in its
+// JSON marshaling facilities.
+//
+// The ExtensionRegistry allows for recognizing and parsing extensions fields
+// (for proto2 messages).
+package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
new file mode 100644
index 0000000..3f19d6b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -0,0 +1,2688 @@
+package dynamic
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/codec"
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ErrUnknownTagNumber is an error that is returned when an operation refers
+// to an unknown tag number.
+var ErrUnknownTagNumber = errors.New("unknown tag number")
+
+// UnknownTagNumberError is the same as ErrUnknownTagNumber.
+// Deprecated: use ErrUnknownTagNumber
+var UnknownTagNumberError = ErrUnknownTagNumber
+
+// ErrUnknownFieldName is an error that is returned when an operation refers
+// to an unknown field name.
+var ErrUnknownFieldName = errors.New("unknown field name")
+
+// UnknownFieldNameError is the same as ErrUnknownFieldName.
+// Deprecated: use ErrUnknownFieldName
+var UnknownFieldNameError = ErrUnknownFieldName
+
+// ErrFieldIsNotMap is an error that is returned when map-related operations
+// are attempted with fields that are not maps.
+var ErrFieldIsNotMap = errors.New("field is not a map type")
+
+// FieldIsNotMapError is the same as ErrFieldIsNotMap.
+// Deprecated: use ErrFieldIsNotMap
+var FieldIsNotMapError = ErrFieldIsNotMap
+
+// ErrFieldIsNotRepeated is an error that is returned when repeated field
+// operations are attempted with fields that are not repeated.
+var ErrFieldIsNotRepeated = errors.New("field is not repeated")
+
+// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated.
+// Deprecated: use ErrFieldIsNotRepeated
+var FieldIsNotRepeatedError = ErrFieldIsNotRepeated
+
+// ErrIndexOutOfRange is an error that is returned when an invalid index is
+// provided when access a single element of a repeated field.
+var ErrIndexOutOfRange = errors.New("index is out of range")
+
+// IndexOutOfRangeError is the same as ErrIndexOutOfRange.
+// Deprecated: use ErrIndexOutOfRange
+var IndexOutOfRangeError = ErrIndexOutOfRange
+
+// ErrNumericOverflow is an error returned by operations that encounter a
+// numeric value that is too large, for example de-serializing a value into an
+// int32 field when the value is larger that can fit into a 32-bit value.
+var ErrNumericOverflow = errors.New("numeric value is out of range")
+
+// NumericOverflowError is the same as ErrNumericOverflow.
+// Deprecated: use ErrNumericOverflow
+var NumericOverflowError = ErrNumericOverflow
+
+var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil))
+var typeOfBytes = reflect.TypeOf(([]byte)(nil))
+
+// Message is a dynamic protobuf message. Instead of a generated struct,
+// like most protobuf messages, this is a map of field number to values and
+// a message descriptor, which is used to validate the field values and
+// also to de-serialize messages (from the standard binary format, as well
+// as from the text format and from JSON).
+type Message struct {
+ md *desc.MessageDescriptor
+ er *ExtensionRegistry
+ mf *MessageFactory
+ extraFields map[int32]*desc.FieldDescriptor
+ values map[int32]interface{}
+ unknownFields map[int32][]UnknownField
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+ // Encoding indicates how the unknown field was encoded on the wire. If it
+ // is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+ // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+ // significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+ // Value.
+ Encoding int8
+ Contents []byte
+ Value uint64
+}
+
+// NewMessage creates a new dynamic message for the type represented by the given
+// message descriptor. During de-serialization, a default MessageFactory is used to
+// instantiate any nested message fields and no extension fields will be parsed. To
+// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage.
+func NewMessage(md *desc.MessageDescriptor) *Message {
+ return NewMessageWithMessageFactory(md, nil)
+}
+
+// NewMessageWithExtensionRegistry creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// ExtensionRegistry is used to parse extension fields and nested messages will be
+// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er).
+func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message {
+ mf := NewMessageFactoryWithExtensionRegistry(er)
+ return NewMessageWithMessageFactory(md, mf)
+}
+
+// NewMessageWithMessageFactory creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// MessageFactory is used to instantiate nested messages.
+func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message {
+ var er *ExtensionRegistry
+ if mf != nil {
+ er = mf.er
+ }
+ return &Message{
+ md: md,
+ mf: mf,
+ er: er,
+ }
+}
+
+// AsDynamicMessage converts the given message to a dynamic message. If the
+// given message is dynamic, it is returned. Otherwise, a dynamic message is
+// created using NewMessage.
+func AsDynamicMessage(msg proto.Message) (*Message, error) {
+ return AsDynamicMessageWithMessageFactory(msg, nil)
+}
+
+// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithExtensionRegistry.
+func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) {
+ mf := NewMessageFactoryWithExtensionRegistry(er)
+ return AsDynamicMessageWithMessageFactory(msg, mf)
+}
+
+// AsDynamicMessageWithMessageFactory converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithMessageFactory.
+func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) {
+ if dm, ok := msg.(*Message); ok {
+ return dm, nil
+ }
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return nil, err
+ }
+ dm := NewMessageWithMessageFactory(md, mf)
+ err = dm.mergeFrom(msg)
+ if err != nil {
+ return nil, err
+ }
+ return dm, nil
+}
+
+// GetMessageDescriptor returns a descriptor for this message's type.
+func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor {
+ return m.md
+}
+
+// GetKnownFields returns a slice of descriptors for all known fields. The
+// fields will not be in any defined order.
+func (m *Message) GetKnownFields() []*desc.FieldDescriptor {
+ if len(m.extraFields) == 0 {
+ return m.md.GetFields()
+ }
+ flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields))
+ copy(flds, m.md.GetFields())
+ for _, fld := range m.extraFields {
+ if !fld.IsExtension() {
+ flds = append(flds, fld)
+ }
+ }
+ return flds
+}
+
+// GetKnownExtensions returns a slice of descriptors for all extensions known by
+// the message's extension registry. The fields will not be in any defined order.
+func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor {
+ if !m.md.IsExtendable() {
+ return nil
+ }
+ exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName())
+ for _, fld := range m.extraFields {
+ if fld.IsExtension() {
+ exts = append(exts, fld)
+ }
+ }
+ return exts
+}
+
+// GetUnknownFields returns a slice of tag numbers for all unknown fields that
+// this message contains. The tags will not be in any defined order.
+func (m *Message) GetUnknownFields() []int32 {
+ flds := make([]int32, 0, len(m.unknownFields))
+ for tag := range m.unknownFields {
+ flds = append(flds, tag)
+ }
+ return flds
+}
+
+// Descriptor returns the serialized form of the file descriptor in which the
+// message was defined and a path to the message type therein. This mimics the
+// method of the same name on message types generated by protoc.
+func (m *Message) Descriptor() ([]byte, []int) {
+ // get encoded file descriptor
+ b, err := proto.Marshal(m.md.GetFile().AsProto())
+ if err != nil {
+ panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+ var zippedBytes bytes.Buffer
+ w := gzip.NewWriter(&zippedBytes)
+ if _, err := w.Write(b); err != nil {
+ panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+ if err := w.Close(); err != nil {
+ panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+
+ // and path to message
+ path := []int{}
+ var d desc.Descriptor
+ name := m.md.GetFullyQualifiedName()
+ for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() {
+ found := false
+ switch d := d.(type) {
+ case (*desc.FileDescriptor):
+ for i, md := range d.GetMessageTypes() {
+ if md.GetFullyQualifiedName() == name {
+ found = true
+ path = append(path, i)
+ }
+ }
+ case (*desc.MessageDescriptor):
+ for i, md := range d.GetNestedMessageTypes() {
+ if md.GetFullyQualifiedName() == name {
+ found = true
+ path = append(path, i)
+ }
+ }
+ }
+ if !found {
+ panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName()))
+ }
+ }
+ // reverse the path
+ i := 0
+ j := len(path) - 1
+ for i < j {
+ path[i], path[j] = path[j], path[i]
+ i++
+ j--
+ }
+
+ return zippedBytes.Bytes(), path
+}
+
+// XXX_MessageName returns the fully qualified name of this message's type. This
+// allows dynamic messages to be used with proto.MessageName.
+func (m *Message) XXX_MessageName() string {
+ return m.md.GetFullyQualifiedName()
+}
+
+// FindFieldDescriptor returns a field descriptor for the given tag number. This
+// searches known fields in the descriptor, known fields discovered during calls
+// to GetField or SetField, and extension fields known by the message's extension
+// registry. It returns nil if the tag is unknown.
+func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor {
+ fd := m.md.FindFieldByNumber(tagNumber)
+ if fd != nil {
+ return fd
+ }
+ fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber)
+ if fd != nil {
+ return fd
+ }
+ return m.extraFields[tagNumber]
+}
+
+// FindFieldDescriptorByName returns a field descriptor for the given field
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. It returns nil if the name is unknown. If the
+// given name refers to an extension, it should be fully qualified and may be
+// optionally enclosed in parentheses or brackets.
+func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor {
+ if name == "" {
+ return nil
+ }
+ fd := m.md.FindFieldByName(name)
+ if fd != nil {
+ return fd
+ }
+ mustBeExt := false
+ if name[0] == '(' {
+ if name[len(name)-1] != ')' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ } else if name[0] == '[' {
+ if name[len(name)-1] != ']' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ }
+ fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name)
+ if fd != nil {
+ return fd
+ }
+ for _, fd := range m.extraFields {
+ if fd.IsExtension() && name == fd.GetFullyQualifiedName() {
+ return fd
+ } else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() {
+ return fd
+ }
+ }
+
+ return nil
+}
+
+// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. If no field matches the given JSON name, it
+// will fall back to searching field names (e.g. FindFieldDescriptorByName). If
+// this also yields no match, nil is returned.
+func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor {
+ if name == "" {
+ return nil
+ }
+ fd := m.md.FindFieldByJSONName(name)
+ if fd != nil {
+ return fd
+ }
+ mustBeExt := false
+ if name[0] == '(' {
+ if name[len(name)-1] != ')' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ } else if name[0] == '[' {
+ if name[len(name)-1] != ']' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ }
+ fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name)
+ if fd != nil {
+ return fd
+ }
+ for _, fd := range m.extraFields {
+ if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() {
+ return fd
+ } else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() {
+ return fd
+ }
+ }
+
+ // try non-JSON names
+ return m.FindFieldDescriptorByName(name)
+}
+
+func (m *Message) checkField(fd *desc.FieldDescriptor) error {
+ return checkField(fd, m.md)
+}
+
+func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error {
+ if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() {
+ return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName())
+ }
+ if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) {
+ return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges())
+ }
+ return nil
+}
+
+// GetField returns the value for the given field descriptor. It panics if an
+// error is encountered. See TryGetField.
+func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} {
+ if v, err := m.TryGetField(fd); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetField returns the value for the given field descriptor. An error is
+// returned if the given field descriptor does not belong to the right message
+// type.
+//
+// The Go type of the returned value, for scalar fields, is the same as protoc
+// would generate for the field (in a non-dynamic message). The table below
+// lists the scalar types and the corresponding Go types.
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
+//
+// Values for enum fields will always be int32 values. You can use the enum
+// descriptor associated with the field to lookup value names with those values.
+// Values for message type fields may be an instance of the generated type *or*
+// may be another *dynamic.Message that represents the type.
+//
+// If the given field is a map field, the returned type will be
+// map[interface{}]interface{}. The actual concrete types of keys and values is
+// as described above. If the given field is a (non-map) repeated field, the
+// returned type is always []interface{}; the type of the actual elements is as
+// described above.
+//
+// If this message has no value for the given field, its default value is
+// returned. If the message is defined in a file with "proto3" syntax, the
+// default is always the zero value for the field. The default value for map and
+// repeated fields is a nil map or slice (respectively). For field's whose types
+// is a message, the default value is an empty message for "proto2" syntax or a
+// nil message for "proto3" syntax. Note that the in the latter case, a non-nil
+// interface with a nil pointer is returned, not a nil interface. Also note that
+// whether the returned value is an empty message or nil depends on if *this*
+// message was defined as "proto3" syntax, not the message type referred to by
+// the field's type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be returned, or an error will
+// be returned if the unknown value cannot be parsed according to the field
+// descriptor's type information.
+func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) {
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getField(fd)
+}
+
+// GetFieldByName returns the value for the field with the given name. It panics
+// if an error is encountered. See TryGetFieldByName.
+func (m *Message) GetFieldByName(name string) interface{} {
+ if v, err := m.TryGetFieldByName(name); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetFieldByName returns the value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByName(name string) (interface{}, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getField(fd)
+}
+
+// GetFieldByNumber returns the value for the field with the given tag number.
+// It panics if an error is encountered. See TryGetFieldByNumber.
+func (m *Message) GetFieldByNumber(tagNumber int) interface{} {
+ if v, err := m.TryGetFieldByNumber(tagNumber); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetFieldByNumber returns the value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getField(fd)
+}
+
+func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) {
+ return m.doGetField(fd, false)
+}
+
+func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) {
+ res := m.values[fd.GetNumber()]
+ if res == nil {
+ var err error
+ if res, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ }
+ if res == nil {
+ if nilIfAbsent {
+ return nil, nil
+ } else {
+ def := fd.GetDefaultValue()
+ if def != nil {
+ return def, nil
+ }
+ // GetDefaultValue only returns nil for message types
+ md := fd.GetMessageType()
+ if md.IsProto3() {
+ // try to return a proper nil pointer
+ msgType := proto.MessageType(md.GetFullyQualifiedName())
+ if msgType != nil && msgType.Implements(typeOfProtoMessage) {
+ return reflect.Zero(msgType).Interface().(proto.Message), nil
+ }
+ // fallback to nil dynamic message pointer
+ return (*Message)(nil), nil
+ } else {
+ // for proto2, return default instance of message
+ return m.mf.NewMessage(md), nil
+ }
+ }
+ }
+ }
+ rt := reflect.TypeOf(res)
+ if rt.Kind() == reflect.Map {
+ // make defensive copies to prevent caller from storing illegal keys and values
+ m := res.(map[interface{}]interface{})
+ res := map[interface{}]interface{}{}
+ for k, v := range m {
+ res[k] = v
+ }
+ return res, nil
+ } else if rt.Kind() == reflect.Slice && rt != typeOfBytes {
+ // make defensive copies to prevent caller from storing illegal elements
+ sl := res.([]interface{})
+ res := make([]interface{}, len(sl))
+ copy(res, sl)
+ return res, nil
+ }
+ return res, nil
+}
+
+// HasField returns true if this message has a value for the given field. If the
+// given field is not valid (e.g. belongs to a different message type), false is
+// returned. If this message is defined in a file with "proto3" syntax, this
+// will return false even if a field was explicitly assigned its zero value (the
+// zero values for a field are intentionally indistinguishable from absent).
+func (m *Message) HasField(fd *desc.FieldDescriptor) bool {
+ if err := m.checkField(fd); err != nil {
+ return false
+ }
+ return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldName returns true if this message has a value for a field with the
+// given name. If the given name is unknown, this returns false.
+func (m *Message) HasFieldName(name string) bool {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return false
+ }
+ return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldNumber returns true if this message has a value for a field with the
+// given tag number. If the given tag is unknown, this returns false.
+func (m *Message) HasFieldNumber(tagNumber int) bool {
+ if _, ok := m.values[int32(tagNumber)]; ok {
+ return true
+ }
+ _, ok := m.unknownFields[int32(tagNumber)]
+ return ok
+}
+
+// SetField sets the value for the given field descriptor to the given value. It
+// panics if an error is encountered. See TrySetField.
+func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) {
+ if err := m.TrySetField(fd, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetField sets the value for the given field descriptor to the given value.
+// An error is returned if the given field descriptor does not belong to the
+// right message type or if the given value is not a correct/compatible type for
+// the given field.
+//
+// The Go type expected for a field is the same as TryGetField would return for
+// the field. So message values can be supplied as either the correct generated
+// message type or as a *dynamic.Message.
+//
+// Since it is cumbersome to work with dynamic messages, some concessions are
+// made to simplify usage regarding types:
+//
+// 1. If a numeric type is provided that can be converted *without loss or
+// overflow*, it is accepted. This allows for setting int64 fields using int
+// or int32 values. Similarly for uint64 with uint and uint32 values and for
+// float64 fields with float32 values.
+// 2. The value can be a named type, as long as its underlying type is correct.
+// 3. Map and repeated fields can be set using any kind of concrete map or
+// slice type, as long as the values within are all of the correct type. So
+// a field defined as a 'map<string, int32>` can be set using a
+// map[string]int32, a map[string]interface{}, or even a
+// map[interface{}]interface{}.
+// 4. Finally, dynamic code that chooses to not treat maps as a special-case
+// find that they can set map fields using a slice where each element is a
+// message that matches the implicit map-entry field message type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.setField(fd, val)
+}
+
+// SetFieldByName sets the value for the field with the given name to the given
+// value. It panics if an error is encountered. See TrySetFieldByName.
+func (m *Message) SetFieldByName(name string, val interface{}) {
+ if err := m.TrySetFieldByName(name, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetFieldByName sets the value for the field with the given name to the
+// given value. An error is returned if the given name is unknown or if the
+// given value has an incorrect type. If the given name refers to an extension
+// field, it should be fully qualified and optionally enclosed in parenthesis or
+// brackets.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByName(name string, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.setField(fd, val)
+}
+
+// SetFieldByNumber sets the value for the field with the given tag number to
+// the given value. It panics if an error is encountered. See
+// TrySetFieldByNumber.
+func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) {
+ if err := m.TrySetFieldByNumber(tagNumber, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetFieldByNumber sets the value for the field with the given tag number to
+// the given value. An error is returned if the given tag is unknown or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.setField(fd, val)
+}
+
+func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error {
+ var err error
+ if val, err = validFieldValue(fd, val); err != nil {
+ return err
+ }
+ m.internalSetField(fd, val)
+ return nil
+}
+
+func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) {
+ if fd.IsRepeated() {
+ // Unset fields and zero-length fields are indistinguishable, in both
+ // proto2 and proto3 syntax
+ if reflect.ValueOf(val).Len() == 0 {
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ return
+ }
+ } else if m.md.IsProto3() && fd.GetOneOf() == nil {
+ // proto3 considers fields that are set to their zero value as unset
+ // (we already handled repeated fields above)
+ var equal bool
+ if b, ok := val.([]byte); ok {
+ // can't compare slices, so we have to special-case []byte values
+ equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte))
+ } else {
+ defVal := fd.GetDefaultValue()
+ equal = defVal == val
+ if !equal && defVal == nil {
+ // above just checks if value is the nil interface,
+ // but we should also test if the given value is a
+ // nil pointer
+ rv := reflect.ValueOf(val)
+ if rv.Kind() == reflect.Ptr && rv.IsNil() {
+ equal = true
+ }
+ }
+ }
+ if equal {
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ return
+ }
+ }
+ if m.values == nil {
+ m.values = map[int32]interface{}{}
+ }
+ m.values[fd.GetNumber()] = val
+ // if this field is part of a one-of, make sure all other one-of choices are cleared
+ od := fd.GetOneOf()
+ if od != nil {
+ for _, other := range od.GetChoices() {
+ if other.GetNumber() != fd.GetNumber() {
+ delete(m.values, other.GetNumber())
+ }
+ }
+ }
+ // also clear any unknown fields
+ if m.unknownFields != nil {
+ delete(m.unknownFields, fd.GetNumber())
+ }
+ // and add this field if it was previously unknown
+ if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+ m.addField(fd)
+ }
+}
+
+func (m *Message) addField(fd *desc.FieldDescriptor) {
+ if m.extraFields == nil {
+ m.extraFields = map[int32]*desc.FieldDescriptor{}
+ }
+ m.extraFields[fd.GetNumber()] = fd
+}
+
+// ClearField removes any value for the given field. It panics if an error is
+// encountered. See TryClearField.
+func (m *Message) ClearField(fd *desc.FieldDescriptor) {
+ if err := m.TryClearField(fd); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearField removes any value for the given field. An error is returned if
+// the given field descriptor does not belong to the right message type.
+func (m *Message) TryClearField(fd *desc.FieldDescriptor) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ m.clearField(fd)
+ return nil
+}
+
+// ClearFieldByName removes any value for the field with the given name. It
+// panics if an error is encountered. See TryClearFieldByName.
+func (m *Message) ClearFieldByName(name string) {
+ if err := m.TryClearFieldByName(name); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearFieldByName removes any value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+func (m *Message) TryClearFieldByName(name string) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ m.clearField(fd)
+ return nil
+}
+
+// ClearFieldByNumber removes any value for the field with the given tag number.
+// It panics if an error is encountered. See TryClearFieldByNumber.
+func (m *Message) ClearFieldByNumber(tagNumber int) {
+ if err := m.TryClearFieldByNumber(tagNumber); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearFieldByNumber removes any value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+func (m *Message) TryClearFieldByNumber(tagNumber int) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ m.clearField(fd)
+ return nil
+}
+
+func (m *Message) clearField(fd *desc.FieldDescriptor) {
+ // clear value
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ // also clear any unknown fields
+ if m.unknownFields != nil {
+ delete(m.unknownFields, fd.GetNumber())
+ }
+ // and add this field if it was previously unknown
+ if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+ m.addField(fd)
+ }
+}
+
+// GetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. It panics if an error is encountered. See
+// TryGetOneOfField.
+func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) {
+ if fd, val, err := m.TryGetOneOfField(od); err != nil {
+ panic(err.Error())
+ } else {
+ return fd, val
+ }
+}
+
+// TryGetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. An error is returned if the given one-of belongs to the
+// wrong message type. If the given one-of has no field set, this method will
+// return nil, nil.
+//
+// The type of the value, if one is set, is the same as would be returned by
+// TryGetField using the returned field descriptor.
+//
+// Like with TryGetField, if the given one-of contains any fields that are not
+// known (e.g. not present in this message's descriptor), they will become known
+// and any unknown value will be parsed (and become a known value on success).
+func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) {
+ if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ for _, fd := range od.GetChoices() {
+ val, err := m.doGetField(fd, true)
+ if err != nil {
+ return nil, nil, err
+ }
+ if val != nil {
+ return fd, val, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// ClearOneOfField removes any value for any of the given one-of's fields. It
+// panics if an error is encountered. See TryClearOneOfField.
+func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) {
+ if err := m.TryClearOneOfField(od); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearOneOfField removes any value for any of the given one-of's fields. An
+// error is returned if the given one-of descriptor does not belong to the right
+// message type.
+func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error {
+ if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ for _, fd := range od.GetChoices() {
+ m.clearField(fd)
+ }
+ return nil
+}
+
+// GetMapField returns the value for the given map field descriptor and given
+// key. It panics if an error is encountered. See TryGetMapField.
+func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} {
+ if v, err := m.TryGetMapField(fd, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapField returns the value for the given map field descriptor and given
+// key. An error is returned if the given field descriptor does not belong to
+// the right message type or if it is not a map field.
+//
+// If the map field does not contain the requested key, this method returns
+// nil, nil. The Go type of the value returned mirrors the type that protoc
+// would generate for the field. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getMapField(fd, key)
+}
+
+// GetMapFieldByName returns the value for the map field with the given name and
+// given key. It panics if an error is encountered. See TryGetMapFieldByName.
+func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} {
+ if v, err := m.TryGetMapFieldByName(name, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapFieldByName returns the value for the map field with the given name
+// and given key. An error is returned if the given name is unknown or if it
+// names a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getMapField(fd, key)
+}
+
+// GetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. It panics if an error is encountered. See
+// TryGetMapFieldByNumber.
+func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} {
+ if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. An error is returned if the given tag is unknown or if
+// it indicates a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getMapField(fd, key)
+}
+
+func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+ if !fd.IsMap() {
+ return nil, FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key)
+ if err != nil {
+ return nil, err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ } else if mp == nil {
+ return nil, nil
+ }
+ }
+ return mp.(map[interface{}]interface{})[ki], nil
+}
+
+// ForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. It stops iteration if the function
+// returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntry.
+func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntry(fd, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. An error is returned if the given field
+// descriptor does not belong to the right message type or if it is not a map
+// field.
+//
+// Iteration ends either when all entries have been examined or when the given
+// function returns false. So the function is expected to return true for normal
+// iteration and false to break out. If this message has no value for the given
+// field, it returns without invoking the given function.
+//
+// The Go type of the key and value supplied to the function mirrors the type
+// that protoc would generate for the field. (See TryGetField for more details
+// on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByName executes the given function for each entry in the
+// map value for the field with the given name. It stops iteration if the
+// function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByName.
+func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntryByName executes the given function for each entry in
+// the map value for the field with the given name. It stops iteration if the
+// function returns false. An error is returned if the given name is unknown or
+// if it names a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByNumber.
+func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. An error is returned if the given tag is unknown
+// or if it indicates a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err := m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ return nil
+ }
+ }
+ for k, v := range mp.(map[interface{}]interface{}) {
+ if !fn(k, v) {
+ break
+ }
+ }
+ return nil
+}
+
+// PutMapField sets the value for the given map field descriptor and given key
+// to the given value. It panics if an error is encountered. See TryPutMapField.
+func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) {
+ if err := m.TryPutMapField(fd, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapField sets the value for the given map field descriptor and given
+// key to the given value. An error is returned if the given field descriptor
+// does not belong to the right message type, if the given field is not a map
+// field, or if the given value is not a correct/compatible type for the given
+// field.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a field with the same type as the map's value type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByName sets the value for the map field with the given name and
+// given key to the given value. It panics if an error is encountered. See
+// TryPutMapFieldByName.
+func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) {
+ if err := m.TryPutMapFieldByName(name, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapFieldByName sets the value for the map field with the given name and
+// the given key to the given value. An error is returned if the given name is
+// unknown, if it names a field that is not a map, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByNumber sets the value for the map field with the given tag
+// number and given key to the given value. It panics if an error is
+// encountered. See TryPutMapFieldByNumber.
+func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) {
+ if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapFieldByNumber sets the value for the map field with the given tag
+// number and the given key to the given value. An error is returned if the
+// given tag is unknown, if it indicates a field that is not a map, or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.putMapField(fd, key, val)
+}
+
+func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key)
+ if err != nil {
+ return err
+ }
+ vfd := fd.GetMessageType().GetFields()[1]
+ vi, err := validElementFieldValue(vfd, val)
+ if err != nil {
+ return err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ m.internalSetField(fd, map[interface{}]interface{}{ki: vi})
+ return nil
+ }
+ }
+ mp.(map[interface{}]interface{})[ki] = vi
+ return nil
+}
+
+// RemoveMapField changes the value for the given field descriptor by removing
+// any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapField.
+func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) {
+ if err := m.TryRemoveMapField(fd, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapField changes the value for the given field descriptor by
+// removing any value associated with the given key. An error is returned if the
+// given field descriptor does not belong to the right message type or if the
+// given field is not a map field.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and any value for the given key
+// removed.
+func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByName changes the value for the field with the given name by
+// removing any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapFieldByName.
+func (m *Message) RemoveMapFieldByName(name string, key interface{}) {
+ if err := m.TryRemoveMapFieldByName(name, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapFieldByName changes the value for the field with the given name
+// by removing any value associated with the given key. An error is returned if
+// the given name is unknown or if it names a field that is not a map.
+func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. It panics if an
+// error is encountered. See TryRemoveMapFieldByNumber.
+func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) {
+ if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. An error is
+// returned if the given tag is unknown or if it indicates a field that is not
+// a map.
+func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.removeMapField(fd, key)
+}
+
+func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key)
+ if err != nil {
+ return err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ return nil
+ }
+ }
+ res := mp.(map[interface{}]interface{})
+ delete(res, ki)
+ if len(res) == 0 {
+ delete(m.values, fd.GetNumber())
+ }
+ return nil
+}
+
+// FieldLength returns the number of elements in this message for the given
+// field descriptor. It panics if an error is encountered. See TryFieldLength.
+func (m *Message) FieldLength(fd *desc.FieldDescriptor) int {
+ l, err := m.TryFieldLength(fd)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLength returns the number of elements in this message for the given
+// field descriptor. An error is returned if the given field descriptor does not
+// belong to the right message type or if it is neither a map field nor a
+// repeated field.
+func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) {
+ if err := m.checkField(fd); err != nil {
+ return 0, err
+ }
+ return m.fieldLength(fd)
+}
+
+// FieldLengthByName returns the number of elements in this message for the
+// field with the given name. It panics if an error is encountered. See
+// TryFieldLengthByName.
+func (m *Message) FieldLengthByName(name string) int {
+ l, err := m.TryFieldLengthByName(name)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLengthByName returns the number of elements in this message for the
+// field with the given name. An error is returned if the given name is unknown
+// or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByName(name string) (int, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return 0, UnknownFieldNameError
+ }
+ return m.fieldLength(fd)
+}
+
+// FieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. It panics if an error is encountered. See
+// TryFieldLengthByNumber.
+func (m *Message) FieldLengthByNumber(tagNumber int32) int {
+ l, err := m.TryFieldLengthByNumber(tagNumber)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. An error is returned if the given tag is
+// unknown or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return 0, UnknownTagNumberError
+ }
+ return m.fieldLength(fd)
+}
+
+func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) {
+ if !fd.IsRepeated() {
+ return 0, FieldIsNotRepeatedError
+ }
+ val := m.values[fd.GetNumber()]
+ if val == nil {
+ var err error
+ if val, err = m.parseUnknownField(fd); err != nil {
+ return 0, err
+ } else if val == nil {
+ return 0, nil
+ }
+ }
+ if sl, ok := val.([]interface{}); ok {
+ return len(sl), nil
+ } else if mp, ok := val.(map[interface{}]interface{}); ok {
+ return len(mp), nil
+ }
+ return 0, nil
+}
+
+// GetRepeatedField returns the value for the given repeated field descriptor at
+// the given index. It panics if an error is encountered. See
+// TryGetRepeatedField.
+func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} {
+ if v, err := m.TryGetRepeatedField(fd, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedField returns the value for the given repeated field descriptor
+// at the given index. An error is returned if the given field descriptor does
+// not belong to the right message type, if it is not a repeated field, or if
+// the given index is out of range (less than zero or greater than or equal to
+// the length of the repeated field). Also, even though map fields technically
+// are repeated fields, if the given field is a map field an error will result:
+// map representation does not lend itself to random access by index.
+//
+// The Go type of the value returned mirrors the type that protoc would generate
+// for the field's element type. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The value at the given index in the parsed value
+// will be returned. An error will be returned if the unknown value cannot be
+// parsed according to the field descriptor's type information.
+func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. It panics if an error is encountered. See
+// TryGetRepeatedFieldByName.
+func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} {
+ if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. An error is returned if the given name is
+// unknown, if it names a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater
+// than or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. It panics if an error is encountered.
+// See TryGetRepeatedFieldByNumber.
+func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} {
+ if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. An error is returned if the given tag is
+// unknown, if it indicates a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater than
+// or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+ if fd.IsMap() || !fd.IsRepeated() {
+ return nil, FieldIsNotRepeatedError
+ }
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ var err error
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ } else if sl == nil {
+ return nil, IndexOutOfRangeError
+ }
+ }
+ res := sl.([]interface{})
+ if index >= len(res) {
+ return nil, IndexOutOfRangeError
+ }
+ return res[index], nil
+}
+
+// AddRepeatedField appends the given value to the given repeated field. It
+// panics if an error is encountered. See TryAddRepeatedField.
+func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) {
+ if err := m.TryAddRepeatedField(fd, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedField appends the given value to the given repeated field. An
+// error is returned if the given field descriptor does not belong to the right
+// message type, if the given field is not repeated, or if the given value is
+// not a correct/compatible type for the given field. If the given field is a
+// map field, the call will succeed if the given value is an instance of the
+// map's entry message type.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the given value is appended to
+// it.
+func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByName appends the given value to the repeated field with the
+// given name. It panics if an error is encountered. See
+// TryAddRepeatedFieldByName.
+func (m *Message) AddRepeatedFieldByName(name string, val interface{}) {
+ if err := m.TryAddRepeatedFieldByName(name, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedFieldByName appends the given value to the repeated field with
+// the given name. An error is returned if the given name is unknown, if it
+// names a field that is not repeated, or if the given value has an incorrect
+// type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByNumber appends the given value to the repeated field with
+// the given tag number. It panics if an error is encountered. See
+// TryAddRepeatedFieldByNumber.
+func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) {
+ if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedFieldByNumber appends the given value to the repeated field
+// with the given tag number. An error is returned if the given tag is unknown,
+// if it indicates a field that is not repeated, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+ if !fd.IsRepeated() {
+ return FieldIsNotRepeatedError
+ }
+ val, err := validElementFieldValue(fd, val)
+ if err != nil {
+ return err
+ }
+
+ if fd.IsMap() {
+ // We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow
+ // adding entries one at a time (as if the field were a normal repeated field).
+ msg := val.(proto.Message)
+ dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf)
+ if err != nil {
+ return err
+ }
+ k, err := dm.TryGetFieldByNumber(1)
+ if err != nil {
+ return err
+ }
+ v, err := dm.TryGetFieldByNumber(2)
+ if err != nil {
+ return err
+ }
+ return m.putMapField(fd, k, v)
+ }
+
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if sl == nil {
+ sl = []interface{}{}
+ }
+ }
+ res := sl.([]interface{})
+ res = append(res, val)
+ m.internalSetField(fd, res)
+ return nil
+}
+
+// SetRepeatedField sets the value for the given repeated field descriptor and
+// given index to the given value. It panics if an error is encountered. See
+// SetRepeatedField.
+func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) {
+ if err := m.TrySetRepeatedField(fd, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedField sets the value for the given repeated field descriptor
+// and given index to the given value. An error is returned if the given field
+// descriptor does not belong to the right message type, if the given field is
+// not repeated, or if the given value is not a correct/compatible type for the
+// given field. Also, even though map fields technically are repeated fields, if
+// the given field is a map field an error will result: map representation does
+// not lend itself to random access by index.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the element at the given index
+// is replaced with the given value.
+func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByName sets the value for the repeated field with the given
+// name and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByName.
+func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) {
+ if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedFieldByName sets the value for the repeated field with the
+// given name and the given index to the given value. An error is returned if
+// the given name is unknown, if it names a field that is not repeated (or is a
+// map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByNumber sets the value for the repeated field with the given
+// tag number and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByNumber.
+func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) {
+ if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedFieldByNumber sets the value for the repeated field with the
+// given tag number and the given index to the given value. An error is returned
+// if the given tag is unknown, if it indicates a field that is not repeated (or
+// is a map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+ if fd.IsMap() || !fd.IsRepeated() {
+ return FieldIsNotRepeatedError
+ }
+ val, err := validElementFieldValue(fd, val)
+ if err != nil {
+ return err
+ }
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if sl == nil {
+ return IndexOutOfRangeError
+ }
+ }
+ res := sl.([]interface{})
+ if index >= len(res) {
+ return IndexOutOfRangeError
+ }
+ res[index] = val
+ return nil
+}
+
+// GetUnknownField gets the value(s) for the given unknown tag number. If this
+// message has no unknown fields with the given tag, nil is returned.
+func (m *Message) GetUnknownField(tagNumber int32) []UnknownField {
+ if u, ok := m.unknownFields[tagNumber]; ok {
+ return u
+ } else {
+ return nil
+ }
+}
+
+func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) {
+ unks, ok := m.unknownFields[fd.GetNumber()]
+ if !ok {
+ return nil, nil
+ }
+ var v interface{}
+ var sl []interface{}
+ var mp map[interface{}]interface{}
+ if fd.IsMap() {
+ mp = map[interface{}]interface{}{}
+ }
+ var err error
+ for _, unk := range unks {
+ var val interface{}
+ if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup {
+ val, err = codec.DecodeLengthDelimitedField(fd, unk.Contents, m.mf)
+ } else {
+ val, err = codec.DecodeScalarField(fd, unk.Value)
+ }
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsMap() {
+ newEntry := val.(*Message)
+ kk, err := newEntry.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := newEntry.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ v = mp
+ } else if fd.IsRepeated() {
+ t := reflect.TypeOf(val)
+ if t.Kind() == reflect.Slice && t != typeOfBytes {
+ // append slices if we unmarshalled a packed repeated field
+ newVals := val.([]interface{})
+ sl = append(sl, newVals...)
+ } else {
+ sl = append(sl, val)
+ }
+ v = sl
+ } else {
+ v = val
+ }
+ }
+ m.internalSetField(fd, v)
+ return v, nil
+}
+
+func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+ return validFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ if fd.IsMap() && val.Kind() == reflect.Map {
+ return validFieldValueForMapField(fd, val)
+ }
+
+ if fd.IsRepeated() { // this will also catch map fields where given value was not a map
+ if val.Kind() != reflect.Array && val.Kind() != reflect.Slice {
+ if fd.IsMap() {
+ return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type())
+ } else {
+ return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type())
+ }
+ }
+
+ if fd.IsMap() {
+ // value should be a slice of entry messages that we need convert into a map[interface{}]interface{}
+ m := map[interface{}]interface{}{}
+ for i := 0; i < val.Len(); i++ {
+ e, err := validElementFieldValue(fd, val.Index(i).Interface())
+ if err != nil {
+ return nil, err
+ }
+ msg := e.(proto.Message)
+ dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil)
+ if err != nil {
+ return nil, err
+ }
+ k, err := dm.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ v, err := dm.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ m[k] = v
+ }
+ return m, nil
+ }
+
+ // make a defensive copy while checking contents (also converts to []interface{})
+ s := make([]interface{}, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ ev := val.Index(i)
+ if ev.Kind() == reflect.Interface {
+ // unwrap it
+ ev = reflect.ValueOf(ev.Interface())
+ }
+ e, err := validElementFieldValueForRv(fd, ev)
+ if err != nil {
+ return nil, err
+ }
+ s[i] = e
+ }
+
+ return s, nil
+ }
+
+ return validElementFieldValueForRv(fd, val)
+}
+
+func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) {
+ if dm, ok := m.(*Message); ok {
+ return dm, nil
+ }
+ dm := NewMessageWithMessageFactory(md, mf)
+ if err := dm.mergeFrom(m); err != nil {
+ return nil, err
+ }
+ return dm, nil
+}
+
+func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+ return validElementFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ t := fd.GetType()
+ if !val.IsValid() {
+ return nil, typeError(fd, nil)
+ }
+
+ switch t {
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM:
+ return toInt32(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return toInt64(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_UINT32:
+ return toUint32(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64:
+ return toUint64(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ return toFloat32(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return toFloat64(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return toBool(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return toBytes(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return toString(reflect.Indirect(val), fd)
+
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_GROUP:
+ m, err := asMessage(val, fd.GetFullyQualifiedName())
+ // check that message is correct type
+ if err != nil {
+ return nil, err
+ }
+ var msgType string
+ if dm, ok := m.(*Message); ok {
+ msgType = dm.GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ msgType = proto.MessageName(m)
+ }
+ if msgType != fd.GetMessageType().GetFullyQualifiedName() {
+ return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType)
+ }
+ return m, nil
+
+ default:
+ return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType())
+ }
+}
+
+func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) {
+ if v.Kind() == reflect.Int32 {
+ return int32(v.Int()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) {
+ if v.Kind() == reflect.Uint32 {
+ return uint32(v.Uint()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) {
+ if v.Kind() == reflect.Float32 {
+ return float32(v.Float()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) {
+ if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 {
+ return v.Int(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) {
+ if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 {
+ return v.Uint(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) {
+ if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 {
+ return v.Float(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) {
+ if v.Kind() == reflect.Bool {
+ return v.Bool(), nil
+ }
+ return false, typeError(fd, v.Type())
+}
+
+func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) {
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+ return v.Bytes(), nil
+ }
+ return nil, typeError(fd, v.Type())
+}
+
+func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) {
+ if v.Kind() == reflect.String {
+ return v.String(), nil
+ }
+ return "", typeError(fd, v.Type())
+}
+
+func typeError(fd *desc.FieldDescriptor, t reflect.Type) error {
+ return fmt.Errorf(
+ "%s field %s is not compatible with value of type %v",
+ getTypeString(fd), fd.GetFullyQualifiedName(), t)
+}
+
+func getTypeString(fd *desc.FieldDescriptor) string {
+ return strings.ToLower(fd.GetType().String())
+}
+
+func asMessage(v reflect.Value, fieldName string) (proto.Message, error) {
+ t := v.Type()
+ // we need a pointer to a struct that implements proto.Message
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) {
+ return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type())
+ }
+ return v.Interface().(proto.Message), nil
+}
+
+// Reset resets this message to an empty message. It removes all values set in
+// the message.
+func (m *Message) Reset() {
+ for k := range m.values {
+ delete(m.values, k)
+ }
+ for k := range m.unknownFields {
+ delete(m.unknownFields, k)
+ }
+}
+
+// String returns this message rendered in compact text format.
+func (m *Message) String() string {
+ b, err := m.MarshalText()
+ if err != nil {
+ panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error()))
+ }
+ return string(b)
+}
+
+// ProtoMessage is present to satisfy the proto.Message interface.
+func (m *Message) ProtoMessage() {
+}
+
+// ConvertTo converts this dynamic message into the given message. This is
+// shorthand for resetting then merging:
+// target.Reset()
+// m.MergeInto(target)
+func (m *Message) ConvertTo(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ target.Reset()
+ return m.mergeInto(target)
+}
+
+// ConvertFrom converts the given message into this dynamic message. This is
+// shorthand for resetting then merging:
+// m.Reset()
+// m.MergeFrom(target)
+func (m *Message) ConvertFrom(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ m.Reset()
+ return m.mergeFrom(target)
+}
+
+// MergeInto merges this dynamic message into the given message. All field
+// values in this message will be set on the given message. For map fields,
+// entries are added to the given message (if the given message has existing
+// values for like keys, they are overwritten). For slice fields, elements are
+// added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in this message to be represented as unknown fields in the
+// given message after merging, and vice versa.
+func (m *Message) MergeInto(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+ return m.mergeInto(target)
+}
+
+// MergeFrom merges the given message into this dynamic message. All field
+// values in the given message will be set on this message. For map fields,
+// entries are added to this message (if this message has existing values for
+// like keys, they are overwritten). For slice fields, elements are added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in that message to be represented as unknown fields in this
+// message after merging, and vice versa.
+func (m *Message) MergeFrom(source proto.Message) error {
+ if err := m.checkType(source); err != nil {
+ return err
+ }
+ return m.mergeFrom(source)
+}
+
+// Merge implements the proto.Merger interface so that dynamic messages are
+// compatible with the proto.Merge function. It delegates to MergeFrom but will
+// panic on error as the proto.Merger interface doesn't allow for returning an
+// error.
+//
+// Unlike nearly all other methods, this method can work if this message's type
+// is not defined (such as instantiating the message without using NewMessage).
+// This is strictly so that dynamic message's are compatible with the
+// proto.Clone function, which instantiates a new message via reflection (thus
+// its message descriptor will not be set) and than calls Merge.
+func (m *Message) Merge(source proto.Message) {
+ if m.md == nil {
+ // To support proto.Clone, initialize the descriptor from the source.
+ if dm, ok := source.(*Message); ok {
+ m.md = dm.md
+ // also make sure the clone uses the same message factory and
+ // extensions and also knows about the same extra fields (if any)
+ m.mf = dm.mf
+ m.er = dm.er
+ m.extraFields = dm.extraFields
+ } else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil {
+ panic(err.Error())
+ } else {
+ m.md = md
+ }
+ }
+
+ if err := m.MergeFrom(source); err != nil {
+ panic(err.Error())
+ }
+}
+
+func (m *Message) checkType(target proto.Message) error {
+ if dm, ok := target.(*Message); ok {
+ if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ return nil
+ }
+
+ msgName := proto.MessageName(target)
+ if msgName != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName())
+ }
+ return nil
+}
+
+func (m *Message) mergeInto(pm proto.Message) error {
+ if dm, ok := pm.(*Message); ok {
+ return dm.mergeFrom(m)
+ }
+
+ target := reflect.ValueOf(pm)
+ if target.Kind() == reflect.Ptr {
+ target = target.Elem()
+ }
+
+ // track tags for which the dynamic message has data but the given
+ // message doesn't know about it
+ u := target.FieldByName("XXX_unrecognized")
+ var unknownTags map[int32]struct{}
+ if u.IsValid() && u.Type() == typeOfBytes {
+ unknownTags = map[int32]struct{}{}
+ for tag := range m.values {
+ unknownTags[tag] = struct{}{}
+ }
+ }
+
+ // check that we can successfully do the merge
+ structProps := proto.GetProperties(reflect.TypeOf(pm).Elem())
+ for _, prop := range structProps.Prop {
+ if prop.Tag == 0 {
+ continue // one-of or special field (such as XXX_unrecognized, etc.)
+ }
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ f := target.FieldByName(prop.Name)
+ ft := f.Type()
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+ // check one-of fields
+ for _, oop := range structProps.OneofTypes {
+ prop := oop.Prop
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ stf, ok := oop.Type.Elem().FieldByName(prop.Name)
+ if !ok {
+ return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem())
+ }
+ ft := stf.Type
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+ // and check extensions, too
+ for tag, ext := range proto.RegisteredExtensions(pm) {
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ ft := reflect.TypeOf(ext.ExtensionType)
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+
+ // now actually perform the merge
+ for _, prop := range structProps.Prop {
+ v, ok := m.values[int32(prop.Tag)]
+ if !ok {
+ continue
+ }
+ f := target.FieldByName(prop.Name)
+ if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+ return err
+ }
+ }
+ // merge one-ofs
+ for _, oop := range structProps.OneofTypes {
+ prop := oop.Prop
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ oov := reflect.New(oop.Type.Elem())
+ f := oov.Elem().FieldByName(prop.Name)
+ if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+ return err
+ }
+ target.Field(oop.Field).Set(oov)
+ }
+ // merge extensions, too
+ for tag, ext := range proto.RegisteredExtensions(pm) {
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem()
+ if err := mergeVal(reflect.ValueOf(v), e); err != nil {
+ return err
+ }
+ if err := proto.SetExtension(pm, ext, e.Interface()); err != nil {
+ // shouldn't happen since we already checked that the extension type was compatible above
+ return err
+ }
+ }
+
+ // if we have fields that the given message doesn't know about, add to its unknown fields
+ if len(unknownTags) > 0 {
+ ub := u.Interface().([]byte)
+ var b codec.Buffer
+ b.SetDeterministic(defaultDeterminism)
+ for tag := range unknownTags {
+ fd := m.FindFieldDescriptor(tag)
+ if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil {
+ return err
+ }
+ }
+ ub = append(ub, b.Bytes()...)
+ u.Set(reflect.ValueOf(ub))
+ }
+
+ // finally, convey unknown fields into the given message by letting it unmarshal them
+ // (this will append to its unknown fields if not known; if somehow the given message recognizes
+ // a field even though the dynamic message did not, it will get correctly unmarshalled)
+ if unknownTags != nil && len(m.unknownFields) > 0 {
+ var b codec.Buffer
+ _ = m.marshalUnknownFields(&b)
+ _ = proto.UnmarshalMerge(b.Bytes(), pm)
+ }
+
+ return nil
+}
+
+func canConvert(src reflect.Value, target reflect.Type) bool {
+ if src.Kind() == reflect.Interface {
+ src = reflect.ValueOf(src.Interface())
+ }
+ srcType := src.Type()
+ // we allow convertible types instead of requiring exact types so that calling
+ // code can, for example, assign an enum constant to an enum field. In that case,
+ // one type is the enum type (a sub-type of int32) and the other may be the int32
+ // type. So we automatically do the conversion in that case.
+ if srcType.ConvertibleTo(target) {
+ return true
+ } else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) {
+ return true
+ } else if target.Kind() == reflect.Slice {
+ if srcType.Kind() != reflect.Slice {
+ return false
+ }
+ et := target.Elem()
+ for i := 0; i < src.Len(); i++ {
+ if !canConvert(src.Index(i), et) {
+ return false
+ }
+ }
+ return true
+ } else if target.Kind() == reflect.Map {
+ if srcType.Kind() != reflect.Map {
+ return false
+ }
+ return canConvertMap(src, target)
+ } else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) {
+ z := reflect.Zero(target).Interface()
+ msgType := proto.MessageName(z.(proto.Message))
+ return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ return false
+ }
+}
+
+func mergeVal(src, target reflect.Value) error {
+ if src.Kind() == reflect.Interface && !src.IsNil() {
+ src = src.Elem()
+ }
+ srcType := src.Type()
+ targetType := target.Type()
+ if srcType.ConvertibleTo(targetType) {
+ if targetType.Implements(typeOfProtoMessage) && !target.IsNil() {
+ Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message))
+ } else {
+ target.Set(src.Convert(targetType))
+ }
+ } else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) {
+ if !src.CanAddr() {
+ target.Set(reflect.New(targetType.Elem()))
+ target.Elem().Set(src.Convert(targetType.Elem()))
+ } else {
+ target.Set(src.Addr().Convert(targetType))
+ }
+ } else if targetType.Kind() == reflect.Slice {
+ l := target.Len()
+ newL := l + src.Len()
+ if target.Cap() < newL {
+ // expand capacity of the slice and copy
+ newSl := reflect.MakeSlice(targetType, newL, newL)
+ for i := 0; i < target.Len(); i++ {
+ newSl.Index(i).Set(target.Index(i))
+ }
+ target.Set(newSl)
+ } else {
+ target.SetLen(newL)
+ }
+ for i := 0; i < src.Len(); i++ {
+ dest := target.Index(l + i)
+ if dest.Kind() == reflect.Ptr {
+ dest.Set(reflect.New(dest.Type().Elem()))
+ }
+ if err := mergeVal(src.Index(i), dest); err != nil {
+ return err
+ }
+ }
+ } else if targetType.Kind() == reflect.Map {
+ return mergeMapVal(src, target, targetType)
+ } else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) {
+ dm := src.Interface().(*Message)
+ if target.IsNil() {
+ target.Set(reflect.New(targetType.Elem()))
+ }
+ m := target.Interface().(proto.Message)
+ if err := dm.mergeInto(m); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("cannot convert %v to %v", srcType, targetType)
+ }
+ return nil
+}
+
+func (m *Message) mergeFrom(pm proto.Message) error {
+ if dm, ok := pm.(*Message); ok {
+ // if given message is also a dynamic message, we merge differently
+ for tag, v := range dm.values {
+ fd := m.FindFieldDescriptor(tag)
+ if fd == nil {
+ fd = dm.FindFieldDescriptor(tag)
+ }
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ pmrv := reflect.ValueOf(pm)
+ if pmrv.IsNil() {
+ // nil is an empty message, so nothing to do
+ return nil
+ }
+
+ // check that we can successfully do the merge
+ src := pmrv.Elem()
+ values := map[*desc.FieldDescriptor]interface{}{}
+ props := proto.GetProperties(reflect.TypeOf(pm).Elem())
+ if props == nil {
+ return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem())
+ }
+
+ // regular fields
+ for _, prop := range props.Prop {
+ if prop.Tag == 0 {
+ continue // one-of or special field (such as XXX_unrecognized, etc.)
+ }
+ fd := m.FindFieldDescriptor(int32(prop.Tag))
+ if fd == nil {
+ // Our descriptor has different fields than this message object. So
+ // try to reflect on the message object's fields.
+ md, err := desc.LoadMessageDescriptorForMessage(pm)
+ if err != nil {
+ return err
+ }
+ fd = md.FindFieldByNumber(int32(prop.Tag))
+ if fd == nil {
+ return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name)
+ }
+ }
+ rv := src.FieldByName(prop.Name)
+ if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() {
+ continue
+ }
+ if v, err := validFieldValueForRv(fd, rv); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // one-of fields
+ for _, oop := range props.OneofTypes {
+ oov := src.Field(oop.Field).Elem()
+ if !oov.IsValid() || oov.Type() != oop.Type {
+ // this field is unset (in other words, one-of message field is not currently set to this option)
+ continue
+ }
+ prop := oop.Prop
+ rv := oov.Elem().FieldByName(prop.Name)
+ fd := m.FindFieldDescriptor(int32(prop.Tag))
+ if fd == nil {
+ // Our descriptor has different fields than this message object. So
+ // try to reflect on the message object's fields.
+ md, err := desc.LoadMessageDescriptorForMessage(pm)
+ if err != nil {
+ return err
+ }
+ fd = md.FindFieldByNumber(int32(prop.Tag))
+ if fd == nil {
+ return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name)
+ }
+ }
+ if v, err := validFieldValueForRv(fd, rv); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // extension fields
+ rexts, _ := proto.ExtensionDescs(pm)
+ var unknownExtensions []byte
+ for _, ed := range rexts {
+ v, _ := proto.GetExtension(pm, ed)
+ if v == nil {
+ continue
+ }
+ if ed.ExtensionType == nil {
+ extBytes, _ := v.([]byte)
+ if len(extBytes) > 0 {
+ unknownExtensions = append(unknownExtensions, extBytes...)
+ }
+ continue
+ }
+ fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field)
+ if fd == nil {
+ var err error
+ if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil {
+ return err
+ }
+ }
+ if v, err := validFieldValue(fd, v); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // now actually perform the merge
+ for fd, v := range values {
+ mergeField(m, fd, v)
+ }
+
+ u := src.FieldByName("XXX_unrecognized")
+ if u.IsValid() && u.Type() == typeOfBytes {
+ // ignore any error returned: pulling in unknown fields is best-effort
+ _ = m.UnmarshalMerge(u.Interface().([]byte))
+ }
+
+ // lastly, also extract any unknown extensions the message may have (unknown extensions
+ // are stored with other extensions, not in the XXX_unrecognized field, so we have to do
+ // more than just the step above...)
+ if len(unknownExtensions) > 0 {
+ // pulling in unknown fields is best-effort, so we just ignore errors
+ _ = m.UnmarshalMerge(unknownExtensions)
+ }
+ return nil
+}
+
+// Validate checks that all required fields are present. It returns an error if any are absent.
+func (m *Message) Validate() error {
+ missingFields := m.findMissingFields()
+ if len(missingFields) == 0 {
+ return nil
+ }
+ return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+}
+
+func (m *Message) findMissingFields() []string {
+ if m.md.IsProto3() {
+ // proto3 does not allow required fields
+ return nil
+ }
+ var missingFields []string
+ for _, fd := range m.md.GetFields() {
+ if fd.IsRequired() {
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ missingFields = append(missingFields, fd.GetName())
+ }
+ }
+ }
+ return missingFields
+}
+
+// ValidateRecursive checks that all required fields are present and also
+// recursively validates all fields who are also messages. It returns an error
+// if any required fields, in this message or nested within, are absent.
+func (m *Message) ValidateRecursive() error {
+ return m.validateRecursive("")
+}
+
+func (m *Message) validateRecursive(prefix string) error {
+ if missingFields := m.findMissingFields(); len(missingFields) > 0 {
+ for i := range missingFields {
+ missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i])
+ }
+ return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+ }
+
+ for tag, fld := range m.values {
+ fd := m.FindFieldDescriptor(tag)
+ var chprefix string
+ var md *desc.MessageDescriptor
+ checkMsg := func(pm proto.Message) error {
+ var dm *Message
+ if d, ok := pm.(*Message); ok {
+ dm = d
+ } else {
+ dm = m.mf.NewDynamicMessage(md)
+ if err := dm.ConvertFrom(pm); err != nil {
+ return nil
+ }
+ }
+ if err := dm.validateRecursive(chprefix); err != nil {
+ return err
+ }
+ return nil
+ }
+ isMap := fd.IsMap()
+ if isMap && fd.GetMapValueType().GetMessageType() != nil {
+ md = fd.GetMapValueType().GetMessageType()
+ mp := fld.(map[interface{}]interface{})
+ for k, v := range mp {
+ chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k)
+ if err := checkMsg(v.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ } else if !isMap && fd.GetMessageType() != nil {
+ md = fd.GetMessageType()
+ if fd.IsRepeated() {
+ sl := fld.([]interface{})
+ for i, v := range sl {
+ chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i)
+ if err := checkMsg(v.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ } else {
+ chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd))
+ if err := checkMsg(fld.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func getName(fd *desc.FieldDescriptor) string {
+ if fd.IsExtension() {
+ return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName())
+ } else {
+ return fd.GetName()
+ }
+}
+
+// knownFieldTags return tags of present and recognized fields, in sorted order.
+func (m *Message) knownFieldTags() []int {
+ if len(m.values) == 0 {
+ return []int(nil)
+ }
+
+ keys := make([]int, len(m.values))
+ i := 0
+ for k := range m.values {
+ keys[i] = int(k)
+ i++
+ }
+
+ sort.Ints(keys)
+ return keys
+}
+
+// allKnownFieldTags return tags of present and recognized fields, including
+// those that are unset, in sorted order. This only includes extensions that are
+// present. Known but not-present extensions are not included in the returned
+// set of tags.
+func (m *Message) allKnownFieldTags() []int {
+ fds := m.md.GetFields()
+ keys := make([]int, 0, len(fds)+len(m.extraFields))
+
+ for k := range m.values {
+ keys = append(keys, int(k))
+ }
+
+ // also include known fields that are not present
+ for _, fd := range fds {
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ keys = append(keys, int(fd.GetNumber()))
+ }
+ }
+ for _, fd := range m.extraFields {
+ if !fd.IsExtension() { // skip extensions that are not present
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ keys = append(keys, int(fd.GetNumber()))
+ }
+ }
+ }
+
+ sort.Ints(keys)
+ return keys
+}
+
+// unknownFieldTags return tags of present but unrecognized fields, in sorted order.
+func (m *Message) unknownFieldTags() []int {
+ if len(m.unknownFields) == 0 {
+ return []int(nil)
+ }
+ keys := make([]int, len(m.unknownFields))
+ i := 0
+ for k := range m.unknownFields {
+ keys[i] = int(k)
+ i++
+ }
+ sort.Ints(keys)
+ return keys
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
new file mode 100644
index 0000000..5fbcc24
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
@@ -0,0 +1,152 @@
+package dynamic
+
+import (
+ "bytes"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
+// have the same message type and same fields set to equal values. For proto3 messages, fields set
+// to their zero value are considered unset.
+func Equal(a, b *Message) bool {
+ if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
+ return false
+ }
+ if len(a.values) != len(b.values) {
+ return false
+ }
+ if len(a.unknownFields) != len(b.unknownFields) {
+ return false
+ }
+ for tag, aval := range a.values {
+ bval, ok := b.values[tag]
+ if !ok {
+ return false
+ }
+ if !fieldsEqual(aval, bval) {
+ return false
+ }
+ }
+ for tag, au := range a.unknownFields {
+ bu, ok := b.unknownFields[tag]
+ if !ok {
+ return false
+ }
+ if len(au) != len(bu) {
+ return false
+ }
+ for i, aval := range au {
+ bval := bu[i]
+ if aval.Encoding != bval.Encoding {
+ return false
+ }
+ if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
+ if !bytes.Equal(aval.Contents, bval.Contents) {
+ return false
+ }
+ } else if aval.Value != bval.Value {
+ return false
+ }
+ }
+ }
+ // all checks pass!
+ return true
+}
+
+func fieldsEqual(aval, bval interface{}) bool {
+ arv := reflect.ValueOf(aval)
+ brv := reflect.ValueOf(bval)
+ if arv.Type() != brv.Type() {
+ // it is possible that one is a dynamic message and one is not
+ apm, ok := aval.(proto.Message)
+ if !ok {
+ return false
+ }
+ bpm, ok := bval.(proto.Message)
+ if !ok {
+ return false
+ }
+ return MessagesEqual(apm, bpm)
+
+ } else {
+ switch arv.Kind() {
+ case reflect.Ptr:
+ apm, ok := aval.(proto.Message)
+ if !ok {
+ // Don't know how to compare pointer values that aren't messages!
+ // Maybe this should panic?
+ return false
+ }
+ bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
+ return MessagesEqual(apm, bpm)
+
+ case reflect.Map:
+ return mapsEqual(arv, brv)
+
+ case reflect.Slice:
+ if arv.Type() == typeOfBytes {
+ return bytes.Equal(aval.([]byte), bval.([]byte))
+ } else {
+ return slicesEqual(arv, brv)
+ }
+
+ default:
+ return aval == bval
+ }
+ }
+}
+
+func slicesEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ for i := 0; i < a.Len(); i++ {
+ ai := a.Index(i)
+ bi := b.Index(i)
+ if !fieldsEqual(ai.Interface(), bi.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
+// when one or both of the messages might be a dynamic message.
+func MessagesEqual(a, b proto.Message) bool {
+ da, aok := a.(*Message)
+ db, bok := b.(*Message)
+ // Both dynamic messages
+ if aok && bok {
+ return Equal(da, db)
+ }
+ // Neither dynamic messages
+ if !aok && !bok {
+ return proto.Equal(a, b)
+ }
+ // Mixed
+ if aok {
+ md, err := desc.LoadMessageDescriptorForMessage(b)
+ if err != nil {
+ return false
+ }
+ db = NewMessageWithMessageFactory(md, da.mf)
+ if db.ConvertFrom(b) != nil {
+ return false
+ }
+ return Equal(da, db)
+ } else {
+ md, err := desc.LoadMessageDescriptorForMessage(a)
+ if err != nil {
+ return false
+ }
+ da = NewMessageWithMessageFactory(md, db.mf)
+ if da.ConvertFrom(a) != nil {
+ return false
+ }
+ return Equal(da, db)
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
new file mode 100644
index 0000000..1d38161
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
@@ -0,0 +1,46 @@
+package dynamic
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/codec"
+ "github.com/jhump/protoreflect/desc"
+)
+
+// SetExtension sets the given extension value. If the given message is not a
+// dynamic message, the given extension may not be recognized (or may differ
+// from the compiled and linked in version of the extension. So in that case,
+// this function will serialize the given value to bytes and then use
+// proto.SetRawExtension to set the value.
+func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error {
+ if !extd.IsExtension() {
+ return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName())
+ }
+
+ if dm, ok := msg.(*Message); ok {
+ return dm.TrySetField(extd, val)
+ }
+
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return err
+ }
+ if err := checkField(extd, md); err != nil {
+ return err
+ }
+
+ val, err = validFieldValue(extd, val)
+ if err != nil {
+ return err
+ }
+
+ var b codec.Buffer
+ b.SetDeterministic(defaultDeterminism)
+ if err := b.EncodeFieldValue(extd, val); err != nil {
+ return err
+ }
+ proto.SetRawExtension(msg, extd.GetNumber(), b.Bytes())
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
new file mode 100644
index 0000000..6876827
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
@@ -0,0 +1,241 @@
+package dynamic
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ExtensionRegistry is a registry of known extension fields. This is used to parse
+// extension fields encountered when de-serializing a dynamic message.
+type ExtensionRegistry struct {
+ includeDefault bool
+ mu sync.RWMutex
+ exts map[string]map[int32]*desc.FieldDescriptor
+}
+
+// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions,
+// which are those that are statically linked into the current program (e.g. registered by
+// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the
+// registry will override any default extensions that are for the same extendee and have the
+// same tag number and/or name.
+func NewExtensionRegistryWithDefaults() *ExtensionRegistry {
+ return &ExtensionRegistry{includeDefault: true}
+}
+
+// AddExtensionDesc adds the given extensions to the registry.
+func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error {
+ flds := make([]*desc.FieldDescriptor, len(exts))
+ for i, ext := range exts {
+ fd, err := desc.LoadFieldDescriptorForExtension(ext)
+ if err != nil {
+ return err
+ }
+ flds[i] = fd
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, fd := range flds {
+ r.putExtensionLocked(fd)
+ }
+ return nil
+}
+
+// AddExtension adds the given extensions to the registry. The given extensions
+// will overwrite any previously added extensions that are for the same extendee
+// message and same extension tag number.
+func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error {
+ for _, ext := range exts {
+ if !ext.IsExtension() {
+ return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName())
+ }
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, ext := range exts {
+ r.putExtensionLocked(ext)
+ }
+ return nil
+}
+
+// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor.
+func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.addExtensionsFromFileLocked(fd, false, nil)
+}
+
+// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file
+// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds
+// extensions from the entire transitive closure for the given file.
+func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ already := map[*desc.FileDescriptor]struct{}{}
+ r.addExtensionsFromFileLocked(fd, true, already)
+}
+
+func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) {
+ if _, ok := alreadySeen[fd]; ok {
+ return
+ }
+
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, ext := range fd.GetExtensions() {
+ r.putExtensionLocked(ext)
+ }
+ for _, msg := range fd.GetMessageTypes() {
+ r.addExtensionsFromMessageLocked(msg)
+ }
+
+ if recursive {
+ alreadySeen[fd] = struct{}{}
+ for _, dep := range fd.GetDependencies() {
+ r.addExtensionsFromFileLocked(dep, recursive, alreadySeen)
+ }
+ }
+}
+
+func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) {
+ for _, ext := range md.GetNestedExtensions() {
+ r.putExtensionLocked(ext)
+ }
+ for _, msg := range md.GetNestedMessageTypes() {
+ r.addExtensionsFromMessageLocked(msg)
+ }
+}
+
+func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) {
+ msgName := fd.GetOwner().GetFullyQualifiedName()
+ m := r.exts[msgName]
+ if m == nil {
+ m = map[int32]*desc.FieldDescriptor{}
+ r.exts[msgName] = m
+ }
+ m[fd.GetNumber()] = fd
+}
+
+// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and tag number. If no extension is known, nil is returned.
+func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ fd := r.exts[messageName][tagNumber]
+ if fd == nil && r.includeDefault {
+ ext := getDefaultExtensions(messageName)[tagNumber]
+ if ext != nil {
+ fd, _ = desc.LoadFieldDescriptorForExtension(ext)
+ }
+ }
+ return fd
+}
+
+// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil
+// is returned.
+func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for _, fd := range r.exts[messageName] {
+ if fd.GetFullyQualifiedName() == fieldName {
+ return fd
+ }
+ }
+ if r.includeDefault {
+ for _, ext := range getDefaultExtensions(messageName) {
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd.GetFullyQualifiedName() == fieldName {
+ return fd
+ }
+ }
+ }
+ return nil
+}
+
+// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned.
+// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last
+// component uses the field's JSON name (if present).
+func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for _, fd := range r.exts[messageName] {
+ if fd.GetFullyQualifiedJSONName() == fieldName {
+ return fd
+ }
+ }
+ if r.includeDefault {
+ for _, ext := range getDefaultExtensions(messageName) {
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd.GetFullyQualifiedJSONName() == fieldName {
+ return fd
+ }
+ }
+ }
+ return nil
+}
+
+func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc {
+ t := proto.MessageType(messageName)
+ if t != nil {
+ msg := reflect.Zero(t).Interface().(proto.Message)
+ return proto.RegisteredExtensions(msg)
+ }
+ return nil
+}
+
+// AllExtensionsForType returns all known extension fields for the given extendee name (must be a
+// fully-qualified message name).
+func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor {
+ if r == nil {
+ return []*desc.FieldDescriptor(nil)
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ flds := r.exts[messageName]
+ var ret []*desc.FieldDescriptor
+ if r.includeDefault {
+ exts := getDefaultExtensions(messageName)
+ if len(exts) > 0 || len(flds) > 0 {
+ ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds))
+ }
+ for tag, ext := range exts {
+ if _, ok := flds[tag]; ok {
+ // skip default extension and use the one explicitly registered instead
+ continue
+ }
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd != nil {
+ ret = append(ret, fd)
+ }
+ }
+ } else if len(flds) > 0 {
+ ret = make([]*desc.FieldDescriptor, 0, len(flds))
+ }
+
+ for _, ext := range flds {
+ ret = append(ret, ext)
+ }
+ return ret
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
new file mode 100644
index 0000000..1eaedfa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -0,0 +1,303 @@
+// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC
+// method where only method descriptors are known. The actual request and response
+// messages may be dynamic messages.
+package grpcdynamic
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server.
+type Stub struct {
+ channel Channel
+ mf *dynamic.MessageFactory
+}
+
+// Channel represents the operations necessary to issue RPCs via gRPC. The
+// *grpc.ClientConn type provides this interface and will typically the concrete
+// type used to construct Stubs. But the use of this interface allows
+// construction of stubs that use alternate concrete types as the transport for
+// RPC operations.
+type Channel interface {
+ Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
+ NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
+}
+
+var _ Channel = (*grpc.ClientConn)(nil)
+
+// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
+func NewStub(channel Channel) Stub {
+ return NewStubWithMessageFactory(channel, nil)
+}
+
+// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for
+// dispatching RPCs and the given MessageFactory for creating response messages.
+func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub {
+ return Stub{channel: channel, mf: mf}
+}
+
+func requestMethod(md *desc.MethodDescriptor) string {
+ return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName())
+}
+
+// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods.
+func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) {
+ if method.IsClientStreaming() || method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ if err := checkMessageType(method.GetInputType(), request); err != nil {
+ return nil, err
+ }
+ resp := s.mf.NewMessage(method.GetOutputType())
+ if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods.
+func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) {
+ if method.IsClientStreaming() || !method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ if err := checkMessageType(method.GetInputType(), request); err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ err = cs.SendMsg(request)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ err = cs.CloseSend()
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
+ }
+}
+
+// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end,
+// receive the response message. Use this for client-streaming methods.
+func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) {
+ if !method.IsClientStreaming() || method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ return &ClientStream{cs, method, s.mf, cancel}, nil
+ }
+}
+
+// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response
+// messages. Use this for bidi-streaming methods.
+func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) {
+ if !method.IsClientStreaming() || !method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil
+ }
+}
+
+func methodType(md *desc.MethodDescriptor) string {
+ if md.IsClientStreaming() && md.IsServerStreaming() {
+ return "bidi-streaming"
+ } else if md.IsClientStreaming() {
+ return "client-streaming"
+ } else if md.IsServerStreaming() {
+ return "server-streaming"
+ } else {
+ return "unary"
+ }
+}
+
+func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error {
+ var typeName string
+ if dm, ok := msg.(*dynamic.Message); ok {
+ typeName = dm.GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ typeName = proto.MessageName(msg)
+ }
+ if typeName != md.GetFullyQualifiedName() {
+ return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName)
+ }
+ return nil
+}
+
+// ServerStream represents a response stream from a server. Messages in the stream can be queried
+// as can header and trailer metadata sent by the server.
+type ServerStream struct {
+ stream grpc.ClientStream
+ respType *desc.MessageDescriptor
+ mf *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ServerStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ServerStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ServerStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *ServerStream) RecvMsg() (proto.Message, error) {
+ resp := s.mf.NewMessage(s.respType)
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
+
+// ClientStream represents a response stream from a client. Messages in the stream can be sent
+// and, when done, the unary server message and header and trailer metadata can be queried.
+type ClientStream struct {
+ stream grpc.ClientStream
+ method *desc.MethodDescriptor
+ mf *dynamic.MessageFactory
+ cancel context.CancelFunc
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ClientStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ClientStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ClientStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *ClientStream) SendMsg(m proto.Message) error {
+ if err := checkMessageType(s.method.GetInputType(), m); err != nil {
+ return err
+ }
+ return s.stream.SendMsg(m)
+}
+
+// CloseAndReceive closes the outgoing request stream and then blocks for the server's response.
+func (s *ClientStream) CloseAndReceive() (proto.Message, error) {
+ if err := s.stream.CloseSend(); err != nil {
+ return nil, err
+ }
+ resp := s.mf.NewMessage(s.method.GetOutputType())
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ }
+ // make sure we get EOF for a second message
+ if err := s.stream.RecvMsg(resp); err != io.EOF {
+ if err == nil {
+ s.cancel()
+ return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName())
+ } else {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+// BidiStream represents a bi-directional stream for sending messages to and receiving
+// messages from a server. The header and trailer metadata sent by the server can also be
+// queried.
+type BidiStream struct {
+ stream grpc.ClientStream
+ reqType *desc.MessageDescriptor
+ respType *desc.MessageDescriptor
+ mf *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *BidiStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *BidiStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *BidiStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *BidiStream) SendMsg(m proto.Message) error {
+ if err := checkMessageType(s.reqType, m); err != nil {
+ return err
+ }
+ return s.stream.SendMsg(m)
+}
+
+// CloseSend indicates the request stream has ended. Invoke this after all request messages
+// are sent (even if there are zero such messages).
+func (s *BidiStream) CloseSend() error {
+ return s.stream.CloseSend()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *BidiStream) RecvMsg() (proto.Message, error) {
+ resp := s.mf.NewMessage(s.respType)
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
new file mode 100644
index 0000000..bd7fcaa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
@@ -0,0 +1,76 @@
+package dynamic
+
+import "bytes"
+
+type indentBuffer struct {
+ bytes.Buffer
+ indent string
+ indentCount int
+ comma bool
+}
+
+func (b *indentBuffer) start() error {
+ if b.indentCount >= 0 {
+ b.indentCount++
+ return b.newLine(false)
+ }
+ return nil
+}
+
+func (b *indentBuffer) sep() error {
+ if b.indentCount >= 0 {
+ _, err := b.WriteString(": ")
+ return err
+ } else {
+ return b.WriteByte(':')
+ }
+}
+
+func (b *indentBuffer) end() error {
+ if b.indentCount >= 0 {
+ b.indentCount--
+ return b.newLine(false)
+ }
+ return nil
+}
+
+func (b *indentBuffer) maybeNext(first *bool) error {
+ if *first {
+ *first = false
+ return nil
+ } else {
+ return b.next()
+ }
+}
+
+func (b *indentBuffer) next() error {
+ if b.indentCount >= 0 {
+ return b.newLine(b.comma)
+ } else if b.comma {
+ return b.WriteByte(',')
+ } else {
+ return b.WriteByte(' ')
+ }
+}
+
+func (b *indentBuffer) newLine(comma bool) error {
+ if comma {
+ err := b.WriteByte(',')
+ if err != nil {
+ return err
+ }
+ }
+
+ err := b.WriteByte('\n')
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < b.indentCount; i++ {
+ _, err := b.WriteString(b.indent)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
new file mode 100644
index 0000000..7dfae09
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -0,0 +1,1238 @@
+package dynamic
+
+// JSON marshalling and unmarshalling for dynamic messages
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ // link in the well-known-types that have a special JSON format
+ _ "github.com/golang/protobuf/ptypes/any"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ _ "github.com/golang/protobuf/ptypes/empty"
+ _ "github.com/golang/protobuf/ptypes/struct"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
+ _ "github.com/golang/protobuf/ptypes/wrappers"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+var wellKnownTypeNames = map[string]struct{}{
+ "google.protobuf.Any": {},
+ "google.protobuf.Empty": {},
+ "google.protobuf.Duration": {},
+ "google.protobuf.Timestamp": {},
+ // struct.proto
+ "google.protobuf.Struct": {},
+ "google.protobuf.Value": {},
+ "google.protobuf.ListValue": {},
+ // wrappers.proto
+ "google.protobuf.DoubleValue": {},
+ "google.protobuf.FloatValue": {},
+ "google.protobuf.Int64Value": {},
+ "google.protobuf.UInt64Value": {},
+ "google.protobuf.Int32Value": {},
+ "google.protobuf.UInt32Value": {},
+ "google.protobuf.BoolValue": {},
+ "google.protobuf.StringValue": {},
+ "google.protobuf.BytesValue": {},
+}
+
+// MarshalJSON serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a compact form: no newlines, and spaces between fields and
+// between field identifiers and values are elided.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+// m.MarshalJSONPB(&jsonpb.Marshaler{})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSON() ([]byte, error) {
+ return m.MarshalJSONPB(&jsonpb.Marshaler{})
+}
+
+// MarshalJSONIndent serializes this message to bytes in JSON format, returning
+// an error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values. Indentation of two spaces is
+// used.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSONIndent() ([]byte, error) {
+ return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+}
+
+// MarshalJSONPB serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string. The given marshaler is used to convey options used during marshaling.
+//
+// If this message contains nested messages that are generated message types (as
+// opposed to dynamic messages), the given marshaler is used to marshal it.
+//
+// When marshaling any nested messages, any jsonpb.AnyResolver configured in the
+// given marshaler is augmented with knowledge of message types known to this
+// message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) {
+ var b indentBuffer
+ b.indent = opts.Indent
+ if len(opts.Indent) == 0 {
+ b.indentCount = -1
+ }
+ b.comma = true
+ if err := m.marshalJSON(&b, opts); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error {
+ if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+ newOpts := *opts
+ newOpts.AnyResolver = r
+ opts = &newOpts
+ }
+
+ if ok, err := marshalWellKnownType(m, b, opts); ok {
+ return err
+ }
+
+ err := b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ var tags []int
+ if opts.EmitDefaults {
+ tags = m.allKnownFieldTags()
+ } else {
+ tags = m.knownFieldTags()
+ }
+
+ first := true
+
+ for _, tag := range tags {
+ itag := int32(tag)
+ fd := m.FindFieldDescriptor(itag)
+
+ v, ok := m.values[itag]
+ if !ok {
+ if fd.GetOneOf() != nil {
+ // don't print defaults for fields in a oneof
+ continue
+ }
+ v = fd.GetDefaultValue()
+ }
+
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldJSON(b, fd, v, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) {
+ fqn := m.md.GetFullyQualifiedName()
+ if _, ok := wellKnownTypeNames[fqn]; !ok {
+ return false, nil
+ }
+
+ msgType := proto.MessageType(fqn)
+ if msgType == nil {
+ // wtf?
+ panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+ }
+
+ // convert dynamic message to well-known type and let jsonpb marshal it
+ msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+ if err := m.MergeInto(msg); err != nil {
+ return true, err
+ }
+ return true, opts.Marshal(b, msg)
+}
+
+func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+ var jsonName string
+ if opts.OrigName {
+ jsonName = fd.GetName()
+ } else {
+ jsonName = fd.AsFieldDescriptorProto().GetJsonName()
+ if jsonName == "" {
+ jsonName = fd.GetName()
+ }
+ }
+ if fd.IsExtension() {
+ var scope string
+ switch parent := fd.GetParent().(type) {
+ case *desc.FileDescriptor:
+ scope = parent.GetPackage()
+ default:
+ scope = parent.GetFullyQualifiedName()
+ }
+ if scope == "" {
+ jsonName = fmt.Sprintf("[%s]", jsonName)
+ } else {
+ jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName)
+ }
+ }
+ err := writeJsonString(b, jsonName)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+
+ if isNil(v) {
+ _, err := b.WriteString("null")
+ return err
+ }
+
+ if fd.IsMap() {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ md := fd.GetMessageType()
+ vfd := md.FindFieldByNumber(2)
+
+ mp := v.(map[interface{}]interface{})
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ first := true
+ for _, mk := range keys {
+ mv := mp[mk]
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+
+ err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte('}')
+
+ } else if fd.IsRepeated() {
+ err = b.WriteByte('[')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ sl := v.([]interface{})
+ first := true
+ for _, slv := range sl {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldValueJSON(b, fd, slv, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte(']')
+
+ } else {
+ return marshalKnownFieldValueJSON(b, fd, v, opts)
+ }
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+ return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+ vi := s[i]
+ vj := s[j]
+ switch reflect.TypeOf(vi).Kind() {
+ case reflect.Int32:
+ return vi.(int32) < vj.(int32)
+ case reflect.Int64:
+ return vi.(int64) < vj.(int64)
+ case reflect.Uint32:
+ return vi.(uint32) < vj.(uint32)
+ case reflect.Uint64:
+ return vi.(uint64) < vj.(uint64)
+ case reflect.String:
+ return vi.(string) < vj.(string)
+ case reflect.Bool:
+ return !vi.(bool) && vj.(bool)
+ default:
+ panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+ }
+}
+
+func (s sortable) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func isNil(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ return rv.Kind() == reflect.Ptr && rv.IsNil()
+}
+
+func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error {
+ rk := reflect.ValueOf(mk)
+ var strkey string
+ switch rk.Kind() {
+ case reflect.Bool:
+ strkey = strconv.FormatBool(rk.Bool())
+ case reflect.Int32, reflect.Int64:
+ strkey = strconv.FormatInt(rk.Int(), 10)
+ case reflect.Uint32, reflect.Uint64:
+ strkey = strconv.FormatUint(rk.Uint(), 10)
+ case reflect.String:
+ strkey = rk.String()
+ default:
+ return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type())
+ }
+ err := writeString(b, strkey)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ return marshalKnownFieldValueJSON(b, vfd, mv, opts)
+}
+
+func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int64:
+ return writeJsonString(b, strconv.FormatInt(rv.Int(), 10))
+ case reflect.Int32:
+ ed := fd.GetEnumType()
+ if !opts.EnumsAsInts && ed != nil {
+ n := int32(rv.Int())
+ vd := ed.FindValueByNumber(n)
+ if vd == nil {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ } else {
+ return writeJsonString(b, vd.GetName())
+ }
+ } else {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ }
+ case reflect.Uint64:
+ return writeJsonString(b, strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Uint32:
+ _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+ return err
+ case reflect.Float32, reflect.Float64:
+ f := rv.Float()
+ var str string
+ if math.IsNaN(f) {
+ str = `"NaN"`
+ } else if math.IsInf(f, 1) {
+ str = `"Infinity"`
+ } else if math.IsInf(f, -1) {
+ str = `"-Infinity"`
+ } else {
+ var bits int
+ if rv.Kind() == reflect.Float32 {
+ bits = 32
+ } else {
+ bits = 64
+ }
+ str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+ }
+ _, err := b.WriteString(str)
+ return err
+ case reflect.Bool:
+ _, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+ return err
+ case reflect.Slice:
+ bstr := base64.StdEncoding.EncodeToString(rv.Bytes())
+ return writeJsonString(b, bstr)
+ case reflect.String:
+ return writeJsonString(b, rv.String())
+ default:
+ // must be a message
+ if dm, ok := v.(*Message); ok {
+ return dm.marshalJSON(b, opts)
+ } else {
+ var err error
+ if b.indentCount <= 0 || len(b.indent) == 0 {
+ err = opts.Marshal(b, v.(proto.Message))
+ } else {
+ str, err := opts.MarshalToString(v.(proto.Message))
+ if err != nil {
+ return err
+ }
+ indent := strings.Repeat(b.indent, b.indentCount)
+ pos := 0
+ // add indention prefix to each line
+ for pos < len(str) {
+ start := pos
+ nextPos := strings.Index(str[pos:], "\n")
+ if nextPos == -1 {
+ nextPos = len(str)
+ } else {
+ nextPos = pos + nextPos + 1 // include newline
+ }
+ line := str[start:nextPos]
+ if pos > 0 {
+ _, err = b.WriteString(indent)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = b.WriteString(line)
+ if err != nil {
+ return err
+ }
+ pos = nextPos
+ }
+ }
+ return err
+ }
+ }
+}
+
+func writeJsonString(b *indentBuffer, s string) error {
+ if sbytes, err := json.Marshal(s); err != nil {
+ return err
+ } else {
+ _, err := b.Write(sbytes)
+ return err
+ }
+}
+
+// UnmarshalJSON de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
+// value) unmarshaler:
+//
+// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//
+// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
+// will be used when parsing google.protobuf.Any messages.
+func (m *Message) UnmarshalJSON(js []byte) error {
+ return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalMergeJSON de-serializes the message that is present, in JSON format,
+// in the given bytes into this message. Unlike UnmarshalJSON, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSON(js []byte) error {
+ return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. The given unmarshaler conveys options used
+// when parsing the JSON. This function first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// The decoding is lenient:
+// 1. The JSON can refer to fields either by their JSON name or by their
+// declared name.
+// 2. The JSON can use either numeric values or string names for enum values.
+//
+// When instantiating nested messages, if this message's associated factory
+// returns a generated message type (as opposed to a dynamic message), the given
+// unmarshaler is used to unmarshal it.
+//
+// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in
+// the given unmarshaler is augmented with knowledge of message types known to
+// this message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMergeJSONPB(opts, js); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON
+// format, in the given bytes into this message. The given unmarshaler conveys
+// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+ r := newJsReader(js)
+ err := m.unmarshalJson(r, opts)
+ if err != nil {
+ return err
+ }
+ if t, err := r.poll(); err != io.EOF {
+ b, _ := ioutil.ReadAll(r.unread())
+ s := fmt.Sprintf("%v%s", t, string(b))
+ return fmt.Errorf("superfluous data found after JSON object: %q", s)
+ }
+ return nil
+}
+
+func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) {
+ fqn := m.md.GetFullyQualifiedName()
+ if _, ok := wellKnownTypeNames[fqn]; !ok {
+ return false, nil
+ }
+
+ msgType := proto.MessageType(fqn)
+ if msgType == nil {
+ // wtf?
+ panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+ }
+
+ // extract json value from r
+ var js json.RawMessage
+ if err := json.NewDecoder(r.unread()).Decode(&js); err != nil {
+ return true, err
+ }
+ if err := r.skip(); err != nil {
+ return true, err
+ }
+
+ // unmarshal into well-known type and then convert to dynamic message
+ msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+ if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil {
+ return true, err
+ }
+ return true, m.MergeFrom(msg)
+}
+
+func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error {
+ if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+ newOpts := *opts
+ newOpts.AnyResolver = r
+ opts = &newOpts
+ }
+
+ if ok, err := unmarshalWellKnownType(m, r, opts); ok {
+ return err
+ }
+
+ t, err := r.peek()
+ if err != nil {
+ return err
+ }
+ if t == nil {
+ // if json is simply "null" we do nothing
+ r.poll()
+ return nil
+ }
+
+ if err := r.beginObject(); err != nil {
+ return err
+ }
+
+ for r.hasNext() {
+ f, err := r.nextObjectKey()
+ if err != nil {
+ return err
+ }
+ fd := m.FindFieldDescriptorByJSONName(f)
+ if fd == nil {
+ if opts.AllowUnknownFields {
+ r.skip()
+ continue
+ }
+ return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f)
+ }
+ v, err := unmarshalJsField(fd, r, m.mf, opts)
+ if err != nil {
+ return err
+ }
+ if v != nil {
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ } else if fd.GetOneOf() != nil {
+ // preserve explicit null for oneof fields (this is a little odd but
+ // mimics the behavior of jsonpb with oneofs in generated message types)
+ if fd.GetMessageType() != nil {
+ typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName())
+ if typ != nil {
+ // typed nil
+ if typ.Kind() != reflect.Ptr {
+ typ = reflect.PtrTo(typ)
+ }
+ v = reflect.Zero(typ).Interface()
+ } else {
+ // can't use nil dynamic message, so we just use empty one instead
+ v = m.mf.NewDynamicMessage(fd.GetMessageType())
+ }
+ if err := m.setField(fd, v); err != nil {
+ return err
+ }
+ } else {
+ // not a message... explicit null makes no sense
+ return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f)
+ }
+ } else {
+ m.clearField(fd)
+ }
+ }
+
+ if err := r.endObject(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func isWellKnownValue(fd *desc.FieldDescriptor) bool {
+ return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
+}
+
+func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
+ return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue"
+}
+
+func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+ t, err := r.peek()
+ if err != nil {
+ return nil, err
+ }
+ if t == nil && !isWellKnownValue(fd) {
+ // if value is null, just return nil
+ // (unless field is google.protobuf.Value, in which case
+ // we fall through to parse it as an instance where its
+ // underlying value is set to a NullValue)
+ r.poll()
+ return nil, nil
+ }
+
+ if t == json.Delim('{') && fd.IsMap() {
+ entryType := fd.GetMessageType()
+ keyType := entryType.FindFieldByNumber(1)
+ valueType := entryType.FindFieldByNumber(2)
+ mp := map[interface{}]interface{}{}
+
+ // TODO: if there are just two map keys "key" and "value" and they have the right type of values,
+ // treat this JSON object as a single map entry message. (In keeping with support of map fields as
+ // if they were normal repeated field of entry messages as well as supporting a transition from
+ // optional to repeated...)
+
+ if err := r.beginObject(); err != nil {
+ return nil, err
+ }
+ for r.hasNext() {
+ kk, err := unmarshalJsFieldElement(keyType, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := unmarshalJsFieldElement(valueType, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ }
+ if err := r.endObject(); err != nil {
+ return nil, err
+ }
+
+ return mp, nil
+ } else if t == json.Delim('[') && !isWellKnownListValue(fd) {
+ // We support parsing an array, even if field is not repeated, to mimic support in proto
+ // binary wire format that supports changing an optional field to repeated and vice versa.
+ // If the field is not repeated, we only keep the last value in the array.
+
+ if err := r.beginArray(); err != nil {
+ return nil, err
+ }
+ var sl []interface{}
+ var v interface{}
+ for r.hasNext() {
+ var err error
+ v, err = unmarshalJsFieldElement(fd, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsRepeated() && v != nil {
+ sl = append(sl, v)
+ }
+ }
+ if err := r.endArray(); err != nil {
+ return nil, err
+ }
+ if fd.IsMap() {
+ mp := map[interface{}]interface{}{}
+ for _, m := range sl {
+ msg := m.(*Message)
+ kk, err := msg.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := msg.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ }
+ return mp, nil
+ } else if fd.IsRepeated() {
+ return sl, nil
+ } else {
+ return v, nil
+ }
+ } else {
+ // We support parsing a singular value, even if field is repeated, to mimic support in proto
+ // binary wire format that supports changing an optional field to repeated and vice versa.
+ // If the field is repeated, we store value as singleton slice of that one value.
+
+ v, err := unmarshalJsFieldElement(fd, r, mf, opts)
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+ if fd.IsRepeated() {
+ return []interface{}{v}, nil
+ } else {
+ return v, nil
+ }
+ }
+}
+
+func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+ t, err := r.peek()
+ if err != nil {
+ return nil, err
+ }
+
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_GROUP:
+ m := mf.NewMessage(fd.GetMessageType())
+ if dm, ok := m.(*Message); ok {
+ if err := dm.unmarshalJson(r, opts); err != nil {
+ return nil, err
+ }
+ } else {
+ var msg json.RawMessage
+ if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil {
+ return nil, err
+ }
+ if err := r.skip(); err != nil {
+ return nil, err
+ }
+ if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ if e, err := r.nextNumber(); err != nil {
+ return nil, err
+ } else {
+ // value could be string or number
+ if i, err := e.Int64(); err != nil {
+ // number cannot be parsed, so see if it's an enum value name
+ vd := fd.GetEnumType().FindValueByName(string(e))
+ if vd != nil {
+ return vd.GetNumber(), nil
+ } else {
+ return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e)
+ }
+ } else if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, NumericOverflowError
+ } else {
+ return int32(i), err
+ }
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, err := r.nextInt(); err != nil {
+ return nil, err
+ } else if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, NumericOverflowError
+ } else {
+ return int32(i), err
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ return r.nextInt()
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ if i, err := r.nextUint(); err != nil {
+ return nil, err
+ } else if i > math.MaxUint32 {
+ return nil, NumericOverflowError
+ } else {
+ return uint32(i), err
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ return r.nextUint()
+
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ if str, ok := t.(string); ok {
+ if str == "true" {
+ r.poll() // consume token
+ return true, err
+ } else if str == "false" {
+ r.poll() // consume token
+ return false, err
+ }
+ }
+ return r.nextBool()
+
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := r.nextFloat(); err != nil {
+ return nil, err
+ } else {
+ return float32(f), nil
+ }
+
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ return r.nextFloat()
+
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return r.nextBytes()
+
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return r.nextString()
+
+ default:
+ return nil, fmt.Errorf("unknown field type: %v", fd.GetType())
+ }
+}
+
+type jsReader struct {
+ reader *bytes.Reader
+ dec *json.Decoder
+ current json.Token
+ peeked bool
+}
+
+func newJsReader(b []byte) *jsReader {
+ reader := bytes.NewReader(b)
+ dec := json.NewDecoder(reader)
+ dec.UseNumber()
+ return &jsReader{reader: reader, dec: dec}
+}
+
+func (r *jsReader) unread() io.Reader {
+ bufs := make([]io.Reader, 3)
+ var peeked []byte
+ if r.peeked {
+ if _, ok := r.current.(json.Delim); ok {
+ peeked = []byte(fmt.Sprintf("%v", r.current))
+ } else {
+ peeked, _ = json.Marshal(r.current)
+ }
+ }
+ readerCopy := *r.reader
+ decCopy := *r.dec
+
+ bufs[0] = bytes.NewReader(peeked)
+ bufs[1] = decCopy.Buffered()
+ bufs[2] = &readerCopy
+ return &concatReader{bufs: bufs}
+}
+
+func (r *jsReader) hasNext() bool {
+ return r.dec.More()
+}
+
+func (r *jsReader) peek() (json.Token, error) {
+ if r.peeked {
+ return r.current, nil
+ }
+ t, err := r.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ r.peeked = true
+ r.current = t
+ return t, nil
+}
+
+func (r *jsReader) poll() (json.Token, error) {
+ if r.peeked {
+ ret := r.current
+ r.current = nil
+ r.peeked = false
+ return ret, nil
+ }
+ return r.dec.Token()
+}
+
+func (r *jsReader) beginObject() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'")
+ return err
+}
+
+func (r *jsReader) endObject() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'")
+ return err
+}
+
+func (r *jsReader) beginArray() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['")
+ return err
+}
+
+func (r *jsReader) endArray() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'")
+ return err
+}
+
+func (r *jsReader) nextObjectKey() (string, error) {
+ return r.nextString()
+}
+
+func (r *jsReader) nextString() (string, error) {
+ t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string")
+ if err != nil {
+ return "", err
+ }
+ return t.(string), nil
+}
+
+func (r *jsReader) nextBytes() ([]byte, error) {
+ str, err := r.nextString()
+ if err != nil {
+ return nil, err
+ }
+ return base64.StdEncoding.DecodeString(str)
+}
+
+func (r *jsReader) nextBool() (bool, error) {
+ t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean")
+ if err != nil {
+ return false, err
+ }
+ return t.(bool), nil
+}
+
+func (r *jsReader) nextInt() (int64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return n.Int64()
+}
+
+func (r *jsReader) nextUint() (uint64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(string(n), 10, 64)
+}
+
+func (r *jsReader) nextFloat() (float64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return n.Float64()
+}
+
+func (r *jsReader) nextNumber() (json.Number, error) {
+ t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number")
+ if err != nil {
+ return "", err
+ }
+ switch t := t.(type) {
+ case json.Number:
+ return t, nil
+ case string:
+ return json.Number(t), nil
+ }
+ return "", fmt.Errorf("expecting a number but got %v", t)
+}
+
+func (r *jsReader) skip() error {
+ t, err := r.poll()
+ if err != nil {
+ return err
+ }
+ if t == json.Delim('[') {
+ if err := r.skipArray(); err != nil {
+ return err
+ }
+ } else if t == json.Delim('{') {
+ if err := r.skipObject(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *jsReader) skipArray() error {
+ for r.hasNext() {
+ if err := r.skip(); err != nil {
+ return err
+ }
+ }
+ if err := r.endArray(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *jsReader) skipObject() error {
+ for r.hasNext() {
+ // skip object key
+ if err := r.skip(); err != nil {
+ return err
+ }
+ // and value
+ if err := r.skip(); err != nil {
+ return err
+ }
+ }
+ if err := r.endObject(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) {
+ t, err := r.poll()
+ if err != nil {
+ return nil, err
+ }
+ if t == nil && ifNil != nil {
+ return ifNil, nil
+ }
+ if !predicate(t) {
+ return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t)
+ }
+ return t, nil
+}
+
+type concatReader struct {
+ bufs []io.Reader
+ curr int
+}
+
+func (r *concatReader) Read(p []byte) (n int, err error) {
+ for {
+ if r.curr >= len(r.bufs) {
+ err = io.EOF
+ return
+ }
+ var c int
+ c, err = r.bufs[r.curr].Read(p)
+ n += c
+ if err != io.EOF {
+ return
+ }
+ r.curr++
+ p = p[c:]
+ }
+}
+
+// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors
+// to resolve message names. It uses the given factory, which may be nil, to
+// instantiate messages. The messages that it returns when resolving a type name
+// may often be dynamic messages.
+func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver {
+ return &anyResolver{mf: mf, files: files}
+}
+
+type anyResolver struct {
+ mf *MessageFactory
+ files []*desc.FileDescriptor
+ ignored map[*desc.FileDescriptor]struct{}
+ other jsonpb.AnyResolver
+}
+
+func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) {
+ if r, ok := r.(*anyResolver); ok {
+ if _, ok := r.ignored[f]; ok {
+ // if the current resolver is ignoring this file, it's because another
+ // (upstream) resolver is already handling it, so nothing to do
+ return r, false
+ }
+ for _, file := range r.files {
+ if file == f {
+ // no need to wrap!
+ return r, false
+ }
+ }
+ // ignore files that will be checked by the resolver we're wrapping
+ // (we'll just delegate and let it search those files)
+ ignored := map[*desc.FileDescriptor]struct{}{}
+ for i := range r.ignored {
+ ignored[i] = struct{}{}
+ }
+ ignore(r.files, ignored)
+ return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true
+ }
+ return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true
+}
+
+func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) {
+ for _, f := range files {
+ if _, ok := ignored[f]; ok {
+ continue
+ }
+ ignored[f] = struct{}{}
+ ignore(f.GetDependencies(), ignored)
+ }
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+
+ // see if the user-specified resolver is able to do the job
+ if r.other != nil {
+ msg, err := r.other.Resolve(typeUrl)
+ if err == nil {
+ return msg, nil
+ }
+ }
+
+ // try to find the message in our known set of files
+ checked := map[*desc.FileDescriptor]struct{}{}
+ for _, f := range r.files {
+ md := r.findMessage(f, mname, checked)
+ if md != nil {
+ return r.mf.NewMessage(md), nil
+ }
+ }
+ // failing that, see if the message factory knows about this type
+ var ktr *KnownTypeRegistry
+ if r.mf != nil {
+ ktr = r.mf.ktr
+ } else {
+ ktr = (*KnownTypeRegistry)(nil)
+ }
+ m := ktr.CreateIfKnown(mname)
+ if m != nil {
+ return m, nil
+ }
+
+ // no other resolver to fallback to? mimic default behavior
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+ // if this is an ignored descriptor, skip
+ if _, ok := r.ignored[fd]; ok {
+ return nil
+ }
+
+ // bail if we've already checked this file
+ if _, ok := checked[fd]; ok {
+ return nil
+ }
+ checked[fd] = struct{}{}
+
+ // see if this file has the message
+ md := fd.FindMessage(msgName)
+ if md != nil {
+ return md
+ }
+
+ // if not, recursively search the file's imports
+ for _, dep := range fd.GetDependencies() {
+ md = r.findMessage(dep, msgName, checked)
+ if md != nil {
+ return md
+ }
+ }
+ return nil
+}
+
+var _ jsonpb.AnyResolver = (*anyResolver)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
new file mode 100644
index 0000000..bb68d7b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -0,0 +1,129 @@
+//+build !go1.12
+
+package dynamic
+
+import (
+ "github.com/jhump/protoreflect/desc"
+ "reflect"
+)
+
+// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
+// iterate a map. (We can be more efficient in Go 1.12 and up...)
+
+func mapsEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ if a.Len() == 0 && b.Len() == 0 {
+ // Optimize the case where maps are frequently empty because MapKeys()
+ // function allocates heavily.
+ return true
+ }
+
+ for _, k := range a.MapKeys() {
+ av := a.MapIndex(k)
+ bv := b.MapIndex(k)
+ if !bv.IsValid() {
+ return false
+ }
+ if !fieldsEqual(av.Interface(), bv.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ // make a defensive copy while we check the contents
+ // (also converts to map[interface{}]interface{} if it's some other type)
+ keyField := fd.GetMessageType().GetFields()[0]
+ valField := fd.GetMessageType().GetFields()[1]
+ m := map[interface{}]interface{}{}
+ for _, k := range val.MapKeys() {
+ if k.Kind() == reflect.Interface {
+ // unwrap it
+ k = reflect.ValueOf(k.Interface())
+ }
+ kk, err := validFieldValueForRv(keyField, k)
+ if err != nil {
+ return nil, err
+ }
+ v := val.MapIndex(k)
+ if v.Kind() == reflect.Interface {
+ // unwrap it
+ v = reflect.ValueOf(v.Interface())
+ }
+ vv, err := validFieldValueForRv(valField, v)
+ if err != nil {
+ return nil, err
+ }
+ m[kk] = vv
+ }
+ return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+ kt := target.Key()
+ vt := target.Elem()
+ for _, k := range src.MapKeys() {
+ if !canConvert(k, kt) {
+ return false
+ }
+ if !canConvert(src.MapIndex(k), vt) {
+ return false
+ }
+ }
+ return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+ tkt := targetType.Key()
+ tvt := targetType.Elem()
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ skt := k.Type()
+ svt := v.Type()
+ var nk, nv reflect.Value
+ if tkt == skt {
+ nk = k
+ } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+ nk = k.Addr()
+ } else {
+ nk = reflect.New(tkt).Elem()
+ if err := mergeVal(k, nk); err != nil {
+ return err
+ }
+ }
+ if tvt == svt {
+ nv = v
+ } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+ nv = v.Addr()
+ } else {
+ nv = reflect.New(tvt).Elem()
+ if err := mergeVal(v, nv); err != nil {
+ return err
+ }
+ }
+ if target.IsNil() {
+ target.Set(reflect.MakeMap(targetType))
+ }
+ target.SetMapIndex(nk, nv)
+ }
+ return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+ for _, k := range rv.MapKeys() {
+ if k.Kind() == reflect.Interface && !k.IsNil() {
+ k = k.Elem()
+ }
+ v := rv.MapIndex(k)
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
new file mode 100644
index 0000000..f5ffd67
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -0,0 +1,137 @@
+//+build go1.12
+
+package dynamic
+
+import (
+ "github.com/jhump/protoreflect/desc"
+ "reflect"
+)
+
+// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
+// over maps more efficiently than using reflect.Value.MapKeys.
+
+func mapsEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ if a.Len() == 0 && b.Len() == 0 {
+ // Optimize the case where maps are frequently empty
+ return true
+ }
+
+ iter := a.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ av := iter.Value()
+ bv := b.MapIndex(k)
+ if !bv.IsValid() {
+ return false
+ }
+ if !fieldsEqual(av.Interface(), bv.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ // make a defensive copy while we check the contents
+ // (also converts to map[interface{}]interface{} if it's some other type)
+ keyField := fd.GetMessageType().GetFields()[0]
+ valField := fd.GetMessageType().GetFields()[1]
+ m := map[interface{}]interface{}{}
+ iter := val.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if k.Kind() == reflect.Interface {
+ // unwrap it
+ k = reflect.ValueOf(k.Interface())
+ }
+ kk, err := validFieldValueForRv(keyField, k)
+ if err != nil {
+ return nil, err
+ }
+ v := iter.Value()
+ if v.Kind() == reflect.Interface {
+ // unwrap it
+ v = reflect.ValueOf(v.Interface())
+ }
+ vv, err := validFieldValueForRv(valField, v)
+ if err != nil {
+ return nil, err
+ }
+ m[kk] = vv
+ }
+ return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+ kt := target.Key()
+ vt := target.Elem()
+ iter := src.MapRange()
+ for iter.Next() {
+ if !canConvert(iter.Key(), kt) {
+ return false
+ }
+ if !canConvert(iter.Value(), vt) {
+ return false
+ }
+ }
+ return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+ tkt := targetType.Key()
+ tvt := targetType.Elem()
+ iter := src.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ skt := k.Type()
+ svt := v.Type()
+ var nk, nv reflect.Value
+ if tkt == skt {
+ nk = k
+ } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+ nk = k.Addr()
+ } else {
+ nk = reflect.New(tkt).Elem()
+ if err := mergeVal(k, nk); err != nil {
+ return err
+ }
+ }
+ if tvt == svt {
+ nv = v
+ } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+ nv = v.Addr()
+ } else {
+ nv = reflect.New(tvt).Elem()
+ if err := mergeVal(v, nv); err != nil {
+ return err
+ }
+ }
+ if target.IsNil() {
+ target.Set(reflect.MakeMap(targetType))
+ }
+ target.SetMapIndex(nk, nv)
+ }
+ return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+ iter := rv.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ if k.Kind() == reflect.Interface && !k.IsNil() {
+ k = k.Elem()
+ }
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
new file mode 100644
index 0000000..ce727fd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
@@ -0,0 +1,100 @@
+package dynamic
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Merge merges the given source message into the given destination message. Use
+// use this instead of proto.Merge when one or both of the messages might be a
+// a dynamic message. If there is a problem merging the messages, such as the
+// two messages having different types, then this method will panic (just as
+// proto.Merges does).
+func Merge(dst, src proto.Message) {
+ if dm, ok := dst.(*Message); ok {
+ if err := dm.MergeFrom(src); err != nil {
+ panic(err.Error())
+ }
+ } else if dm, ok := src.(*Message); ok {
+ if err := dm.MergeInto(dst); err != nil {
+ panic(err.Error())
+ }
+ } else {
+ proto.Merge(dst, src)
+ }
+}
+
+// TryMerge merges the given source message into the given destination message.
+// You can use this instead of proto.Merge when one or both of the messages
+// might be a dynamic message. Unlike proto.Merge, this method will return an
+// error on failure instead of panic'ing.
+func TryMerge(dst, src proto.Message) error {
+ if dm, ok := dst.(*Message); ok {
+ if err := dm.MergeFrom(src); err != nil {
+ return err
+ }
+ } else if dm, ok := src.(*Message); ok {
+ if err := dm.MergeInto(dst); err != nil {
+ return err
+ }
+ } else {
+ // proto.Merge panics on bad input, so we first verify
+ // inputs and return error instead of panic
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ return errors.New("proto: nil destination")
+ }
+ in := reflect.ValueOf(src)
+ if in.Type() != out.Type() {
+ return errors.New("proto: type mismatch")
+ }
+ proto.Merge(dst, src)
+ }
+ return nil
+}
+
+func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error {
+ rv := reflect.ValueOf(val)
+
+ if fd.IsMap() && rv.Kind() == reflect.Map {
+ return mergeMapField(m, fd, rv)
+ }
+
+ if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes {
+ for i := 0; i < rv.Len(); i++ {
+ e := rv.Index(i)
+ if e.Kind() == reflect.Interface && !e.IsNil() {
+ e = e.Elem()
+ }
+ if err := m.addRepeatedField(fd, e.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if fd.IsRepeated() {
+ return m.addRepeatedField(fd, val)
+ } else if fd.GetMessageType() == nil {
+ return m.setField(fd, val)
+ }
+
+ // it's a message type, so we want to merge contents
+ var err error
+ if val, err = validFieldValue(fd, val); err != nil {
+ return err
+ }
+
+ existing, _ := m.doGetField(fd, true)
+ if existing != nil && !reflect.ValueOf(existing).IsNil() {
+ return TryMerge(existing.(proto.Message), val.(proto.Message))
+ }
+
+ // no existing message, so just set field
+ m.internalSetField(fd, val)
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
new file mode 100644
index 0000000..6c54de8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -0,0 +1,189 @@
+package dynamic
+
+import (
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// MessageFactory can be used to create new empty message objects. A default instance
+// (without extension registry or known-type registry specified) will always return
+// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types.
+// The well-known types include primitive wrapper types and a handful of other special
+// types defined in standard protobuf definitions, like Any, Duration, and Timestamp.
+type MessageFactory struct {
+ er *ExtensionRegistry
+ ktr *KnownTypeRegistry
+}
+
+// NewMessageFactoryWithExtensionRegistry creates a new message factory where any
+// dynamic messages produced will use the given extension registry to recognize and
+// parse extension fields.
+func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory {
+ return NewMessageFactoryWithRegistries(er, nil)
+}
+
+// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the
+// known types, per the given registry, will be returned as normal protobuf messages
+// (e.g. generated structs, instead of dynamic messages).
+func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory {
+ return NewMessageFactoryWithRegistries(nil, ktr)
+}
+
+// NewMessageFactoryWithDefaults creates a new message factory where all "default" types
+// (those for which protoc-generated code is statically linked into the Go program) are
+// known types. If any dynamic messages are produced, they will recognize and parse all
+// "default" extension fields. This is the equivalent of:
+// NewMessageFactoryWithRegistries(
+// NewExtensionRegistryWithDefaults(),
+// NewKnownTypeRegistryWithDefaults())
+func NewMessageFactoryWithDefaults() *MessageFactory {
+ return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
+}
+
+// NewMessageFactoryWithRegistries creates a new message factory with the given extension
+// and known type registries.
+func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory {
+ return &MessageFactory{
+ er: er,
+ ktr: ktr,
+ }
+}
+
+// NewMessage creates a new empty message that corresponds to the given descriptor.
+// If the given descriptor describes a "known type" then that type is instantiated.
+// Otherwise, an empty dynamic message is returned.
+func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message {
+ var ktr *KnownTypeRegistry
+ if f != nil {
+ ktr = f.ktr
+ }
+ if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil {
+ return m
+ }
+ return NewMessageWithMessageFactory(md, f)
+}
+
+// NewDynamicMessage creates a new empty dynamic message that corresponds to the given
+// descriptor. This is like f.NewMessage(md) except the known type registry is not
+// consulted so the return value is always a dynamic message.
+//
+// This is also like dynamic.NewMessage(md) except that the returned message will use
+// this factory when creating other messages, like during de-serialization of fields
+// that are themselves message types.
+func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message {
+ return NewMessageWithMessageFactory(md, f)
+}
+
+// GetKnownTypeRegistry returns the known type registry that this factory uses to
+// instantiate known (e.g. generated) message types.
+func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry {
+ if f == nil {
+ return nil
+ }
+ return f.ktr
+}
+
+// GetExtensionRegistry returns the extension registry that this factory uses to
+// create dynamic messages. The registry is used by dynamic messages to recognize
+// and parse extension fields during de-serialization.
+func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry {
+ if f == nil {
+ return nil
+ }
+ return f.er
+}
+
+type wkt interface {
+ XXX_WellKnownType() string
+}
+
+var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem()
+
+// KnownTypeRegistry is a registry of known message types, as identified by their
+// fully-qualified name. A known message type is one for which a protoc-generated
+// struct exists, so a dynamic message is not necessary to represent it. A
+// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated
+// struct or a dynamic message. The zero-value registry (including the behavior of
+// a nil pointer) only knows about the "well-known types" in protobuf. These
+// include only the wrapper types and a handful of other special types like Any,
+// Duration, and Timestamp.
+type KnownTypeRegistry struct {
+ excludeWkt bool
+ includeDefault bool
+ mu sync.RWMutex
+ types map[string]reflect.Type
+}
+
+// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all
+// "default" types (those for which protoc-generated code is statically linked
+// into the Go program).
+func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry {
+ return &KnownTypeRegistry{includeDefault: true}
+}
+
+// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not*
+// include the "well-known types" in protobuf. So even well-known types would be
+// represented by a dynamic message.
+func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry {
+ return &KnownTypeRegistry{excludeWkt: true}
+}
+
+// AddKnownType adds the types of the given messages as known types.
+func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.types == nil {
+ r.types = map[string]reflect.Type{}
+ }
+ for _, kt := range kts {
+ r.types[proto.MessageName(kt)] = reflect.TypeOf(kt)
+ }
+}
+
+// CreateIfKnown will construct an instance of the given message if it is a known type.
+// If the given name is unknown, nil is returned.
+func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message {
+ msgType := r.GetKnownType(messageName)
+ if msgType == nil {
+ return nil
+ }
+
+ if msgType.Kind() == reflect.Ptr {
+ return reflect.New(msgType.Elem()).Interface().(proto.Message)
+ } else {
+ return reflect.New(msgType).Elem().Interface().(proto.Message)
+ }
+}
+
+// GetKnownType will return the reflect.Type for the given message name if it is
+// known. If it is not known, nil is returned.
+func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
+ var msgType reflect.Type
+ if r == nil {
+ // a nil registry behaves the same as zero value instance: only know of well-known types
+ t := proto.MessageType(messageName)
+ if t != nil && t.Implements(typeOfWkt) {
+ msgType = t
+ }
+ } else {
+ if r.includeDefault {
+ msgType = proto.MessageType(messageName)
+ } else if !r.excludeWkt {
+ t := proto.MessageType(messageName)
+ if t != nil && t.Implements(typeOfWkt) {
+ msgType = t
+ }
+ }
+ if msgType == nil {
+ r.mu.RLock()
+ msgType = r.types[messageName]
+ r.mu.RUnlock()
+ }
+ }
+
+ return msgType
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
new file mode 100644
index 0000000..72636f2
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -0,0 +1,1175 @@
+package dynamic
+
+// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "text/scanner"
+ "unicode"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+ "github.com/jhump/protoreflect/codec"
+ "github.com/jhump/protoreflect/desc"
+)
+
+// MarshalText serializes this message to bytes in the standard text format,
+// returning an error if the operation fails. The resulting bytes will be a
+// valid UTF8 string.
+//
+// This method uses a compact form: no newlines, and spaces between field
+// identifiers and values are elided.
+func (m *Message) MarshalText() ([]byte, error) {
+ var b indentBuffer
+ b.indentCount = -1 // no indentation
+ if err := m.marshalText(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalTextIndent serializes this message to bytes in the standard text
+// format, returning an error if the operation fails. The resulting bytes will
+// be a valid UTF8 string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values.
+func (m *Message) MarshalTextIndent() ([]byte, error) {
+ var b indentBuffer
+ b.indent = " " // TODO: option for indent?
+ if err := m.marshalText(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func (m *Message) marshalText(b *indentBuffer) error {
+ // TODO: option for emitting extended Any format?
+ first := true
+ // first the known fields
+ for _, tag := range m.knownFieldTags() {
+ itag := int32(tag)
+ v := m.values[itag]
+ fd := m.FindFieldDescriptor(itag)
+ if fd.IsMap() {
+ md := fd.GetMessageType()
+ kfd := md.FindFieldByNumber(1)
+ vfd := md.FindFieldByNumber(2)
+ mp := v.(map[interface{}]interface{})
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ for _, mk := range keys {
+ mv := mp[mk]
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv)
+ if err != nil {
+ return err
+ }
+ }
+ } else if fd.IsRepeated() {
+ sl := v.([]interface{})
+ for _, slv := range sl {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, fd, slv)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, fd, v)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // then the unknown fields
+ for _, tag := range m.unknownFieldTags() {
+ itag := int32(tag)
+ ufs := m.unknownFields[itag]
+ for _, uf := range ufs {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(b, "%d", tag)
+ if err != nil {
+ return err
+ }
+ if uf.Encoding == proto.WireStartGroup {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ in := codec.NewBuffer(uf.Contents)
+ err = marshalUnknownGroupText(b, in, true)
+ if err != nil {
+ return err
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+ } else {
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ if uf.Encoding == proto.WireBytes {
+ err = writeString(b, string(uf.Contents))
+ if err != nil {
+ return err
+ }
+ } else {
+ _, err = b.WriteString(strconv.FormatUint(uf.Value, 10))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+ } else {
+ name = fd.GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+
+ err = b.WriteByte('<')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ err = marshalKnownFieldText(b, kfd, mk)
+ if err != nil {
+ return err
+ }
+ err = b.next()
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, vfd, mv)
+ if err != nil {
+ return err
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte('>')
+}
+
+func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
+ group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+ if group {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName())
+ } else {
+ name = fd.GetMessageType().GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ } else {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+ } else {
+ name = fd.GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int32, reflect.Int64:
+ ed := fd.GetEnumType()
+ if ed != nil {
+ n := int32(rv.Int())
+ vd := ed.FindValueByNumber(n)
+ if vd == nil {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ } else {
+ _, err := b.WriteString(vd.GetName())
+ return err
+ }
+ } else {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ }
+ case reflect.Uint32, reflect.Uint64:
+ _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+ return err
+ case reflect.Float32, reflect.Float64:
+ f := rv.Float()
+ var str string
+ if math.IsNaN(f) {
+ str = "nan"
+ } else if math.IsInf(f, 1) {
+ str = "inf"
+ } else if math.IsInf(f, -1) {
+ str = "-inf"
+ } else {
+ var bits int
+ if rv.Kind() == reflect.Float32 {
+ bits = 32
+ } else {
+ bits = 64
+ }
+ str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+ }
+ _, err := b.WriteString(str)
+ return err
+ case reflect.Bool:
+ _, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+ return err
+ case reflect.Slice:
+ return writeString(b, string(rv.Bytes()))
+ case reflect.String:
+ return writeString(b, rv.String())
+ default:
+ var err error
+ if group {
+ err = b.WriteByte('{')
+ } else {
+ err = b.WriteByte('<')
+ }
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ // must be a message
+ if dm, ok := v.(*Message); ok {
+ err = dm.marshalText(b)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = proto.CompactText(b, v.(proto.Message))
+ if err != nil {
+ return err
+ }
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ if group {
+ return b.WriteByte('}')
+ } else {
+ return b.WriteByte('>')
+ }
+ }
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(b *indentBuffer, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := b.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = b.WriteString("\\n")
+ case '\r':
+ _, err = b.WriteString("\\r")
+ case '\t':
+ _, err = b.WriteString("\\t")
+ case '"':
+ _, err = b.WriteString("\\")
+ case '\\':
+ _, err = b.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c < 0x7f {
+ err = b.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(b, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return b.WriteByte('"')
+}
+
+func marshalUnknownGroupText(b *indentBuffer, in *codec.Buffer, topLevel bool) error {
+ first := true
+ for {
+ if in.EOF() {
+ if topLevel {
+ return nil
+ }
+ // this is a nested message: we are expecting an end-group tag, not EOF!
+ return io.ErrUnexpectedEOF
+ }
+ tag, wireType, err := in.DecodeTagAndWireType()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireEndGroup {
+ return nil
+ }
+ err = b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(b, "%d", tag)
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireStartGroup {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ err = marshalUnknownGroupText(b, in, false)
+ if err != nil {
+ return err
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+ continue
+ } else {
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireBytes {
+ contents, err := in.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ err = writeString(b, string(contents))
+ if err != nil {
+ return err
+ }
+ } else {
+ var v uint64
+ switch wireType {
+ case proto.WireVarint:
+ v, err = in.DecodeVarint()
+ case proto.WireFixed32:
+ v, err = in.DecodeFixed32()
+ case proto.WireFixed64:
+ v, err = in.DecodeFixed64()
+ default:
+ return proto.ErrInternalBadWireType
+ }
+ if err != nil {
+ return err
+ }
+ _, err = b.WriteString(strconv.FormatUint(v, 10))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+}
+
+// UnmarshalText de-serializes the message that is present, in text format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in the standard text format
+func (m *Message) UnmarshalText(text []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMergeText(text); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMergeText de-serializes the message that is present, in text format,
+// in the given bytes into this message. Unlike UnmarshalText, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeText(text []byte) error {
+ return m.unmarshalText(newReader(text), tokenEOF)
+}
+
+func (m *Message) unmarshalText(tr *txtReader, end tokenType) error {
+ for {
+ tok := tr.next()
+ if tok.tokTyp == end {
+ return nil
+ }
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ var fd *desc.FieldDescriptor
+ var extendedAnyType *desc.MessageDescriptor
+ if tok.tokTyp == tokenInt {
+ // tag number (indicates unknown field)
+ tag, err := strconv.ParseInt(tok.val.(string), 10, 32)
+ if err != nil {
+ return err
+ }
+ itag := int32(tag)
+ fd = m.FindFieldDescriptor(itag)
+ if fd == nil {
+ // can't parse the value w/out field descriptor, so skip it
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenOpenBrace {
+ if err := skipMessageText(tr, true); err != nil {
+ return err
+ }
+ } else if tok.tokTyp == tokenColon {
+ if err := skipFieldValueText(tr); err != nil {
+ return err
+ }
+ } else {
+ return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+ }
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ continue
+ }
+ } else {
+ fieldName, err := unmarshalFieldNameText(tr, tok)
+ if err != nil {
+ return err
+ }
+ fd = m.FindFieldDescriptorByName(fieldName)
+ if fd == nil {
+ // See if it's a group name
+ for _, field := range m.md.GetFields() {
+ if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+ fd = field
+ break
+ }
+ }
+ if fd == nil {
+ // maybe this is an extended Any
+ if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") {
+ // strip surrounding "[" and "]" and extract type name from URL
+ typeUrl := fieldName[1 : len(fieldName)-1]
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ // TODO: add a way to weave an AnyResolver to this point
+ extendedAnyType = findMessageDescriptor(mname, m.md.GetFile())
+ if extendedAnyType == nil {
+ return textError(tok, "could not parse Any with unknown type URL %q", fieldName)
+ }
+ // field 1 is "type_url"
+ typeUrlField := m.md.FindFieldByNumber(1)
+ if err := m.TrySetField(typeUrlField, typeUrl); err != nil {
+ return err
+ }
+ } else {
+ // TODO: add a flag to just ignore unrecognized field names
+ return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName())
+ }
+ }
+ }
+ }
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ if extendedAnyType != nil {
+ // consume optional colon; make sure this is a "start message" token
+ if tok.tokTyp == tokenColon {
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ }
+ if tok.tokTyp.EndToken() == tokenError {
+ return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt)
+ }
+
+ // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+ g := m.mf.NewDynamicMessage(extendedAnyType)
+ if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+ return err
+ }
+ // now we marshal the message to bytes and store in the Any
+ b, err := g.Marshal()
+ if err != nil {
+ return err
+ }
+ // field 2 is "value"
+ anyValueField := m.md.FindFieldByNumber(2)
+ if err := m.TrySetField(anyValueField, b); err != nil {
+ return err
+ }
+
+ } else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
+ fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+ tok.tokTyp.EndToken() != tokenError {
+
+ // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+ g := m.mf.NewDynamicMessage(fd.GetMessageType())
+ if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+ return err
+ }
+ if fd.IsRepeated() {
+ if err := m.TryAddRepeatedField(fd, g); err != nil {
+ return err
+ }
+ } else {
+ if err := m.TrySetField(fd, g); err != nil {
+ return err
+ }
+ }
+ } else {
+ if tok.tokTyp != tokenColon {
+ return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt)
+ }
+ if err := m.unmarshalFieldValueText(fd, tr); err != nil {
+ return err
+ }
+ }
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+}
+func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor {
+ md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{})
+ if md == nil {
+ // couldn't find it; see if we have this message linked in
+ md, _ = desc.LoadMessageDescriptor(name)
+ }
+ return md
+}
+
+func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+ if _, ok := seen[fd]; ok {
+ // already checked this file
+ return nil
+ }
+ seen[fd] = struct{}{}
+ md := fd.FindMessage(name)
+ if md != nil {
+ return md
+ }
+ // not in this file so recursively search its deps
+ for _, dep := range fd.GetDependencies() {
+ md = findMessageInTransitiveDeps(name, dep, seen)
+ if md != nil {
+ return md
+ }
+ }
+ // couldn't find it
+ return nil
+}
+
+func textError(tok *token, format string, args ...interface{}) error {
+ var msg string
+ if tok.tokTyp == tokenError {
+ msg = tok.val.(error).Error()
+ } else {
+ msg = fmt.Sprintf(format, args...)
+ }
+ return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg)
+}
+
+type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error
+
+func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error {
+ var set setFunction
+ if fd.IsRepeated() {
+ set = (*Message).addRepeatedField
+ } else {
+ set = mergeField
+ }
+ tok := tr.peek()
+ if tok.tokTyp == tokenOpenBracket {
+ tr.next() // consume tok
+ for {
+ if err := m.unmarshalFieldElementText(fd, tr, set); err != nil {
+ return err
+ }
+ tok = tr.peek()
+ if tok.tokTyp == tokenCloseBracket {
+ tr.next() // consume tok
+ return nil
+ } else if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+ }
+ return m.unmarshalFieldElementText(fd, tr, set)
+}
+
+func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error {
+ tok := tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+
+ var expected string
+ switch fd.GetType() {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ if tok.tokTyp == tokenIdent {
+ if tok.val.(string) == "true" {
+ return set(m, fd, true)
+ } else if tok.val.(string) == "false" {
+ return set(m, fd, false)
+ }
+ }
+ expected = "boolean value"
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ if tok.tokTyp == tokenString {
+ return set(m, fd, []byte(tok.val.(string)))
+ }
+ expected = "bytes string value"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ if tok.tokTyp == tokenString {
+ return set(m, fd, tok.val)
+ }
+ expected = "string value"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ switch tok.tokTyp {
+ case tokenFloat:
+ return set(m, fd, float32(tok.val.(float64)))
+ case tokenInt:
+ if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, float32(f))
+ }
+ case tokenIdent:
+ ident := strings.ToLower(tok.val.(string))
+ if ident == "inf" {
+ return set(m, fd, float32(math.Inf(1)))
+ } else if ident == "nan" {
+ return set(m, fd, float32(math.NaN()))
+ }
+ case tokenMinus:
+ peeked := tr.peek()
+ if peeked.tokTyp == tokenIdent {
+ ident := strings.ToLower(peeked.val.(string))
+ if ident == "inf" {
+ tr.next() // consume peeked token
+ return set(m, fd, float32(math.Inf(-1)))
+ }
+ }
+ }
+ expected = "float value"
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ switch tok.tokTyp {
+ case tokenFloat:
+ return set(m, fd, tok.val)
+ case tokenInt:
+ if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, f)
+ }
+ case tokenIdent:
+ ident := strings.ToLower(tok.val.(string))
+ if ident == "inf" {
+ return set(m, fd, math.Inf(1))
+ } else if ident == "nan" {
+ return set(m, fd, math.NaN())
+ }
+ case tokenMinus:
+ peeked := tr.peek()
+ if peeked.tokTyp == tokenIdent {
+ ident := strings.ToLower(peeked.val.(string))
+ if ident == "inf" {
+ tr.next() // consume peeked token
+ return set(m, fd, math.Inf(-1))
+ }
+ }
+ }
+ expected = "float value"
+ case descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, int32(i))
+ }
+ }
+ expected = "int value"
+ case descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_SINT64,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, i)
+ }
+ }
+ expected = "int value"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, uint32(i))
+ }
+ }
+ expected = "unsigned int value"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, i)
+ }
+ }
+ expected = "unsigned int value"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ if tok.tokTyp == tokenIdent {
+ // TODO: add a flag to just ignore unrecognized enum value names?
+ vd := fd.GetEnumType().FindValueByName(tok.val.(string))
+ if vd != nil {
+ return set(m, fd, vd.GetNumber())
+ }
+ } else if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, int32(i))
+ }
+ }
+ expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptor.FieldDescriptorProto_TYPE_GROUP:
+
+ endTok := tok.tokTyp.EndToken()
+ if endTok != tokenError {
+ dm := m.mf.NewDynamicMessage(fd.GetMessageType())
+ if err := dm.unmarshalText(tr, endTok); err != nil {
+ return err
+ }
+ // TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use
+ // proto package to unmarshal it. But the text parser isn't particularly amenable
+ // to that, so we instead convert a dynamic message to a generated one if the
+ // known-type registry knows about the generated type...
+ var ktr *KnownTypeRegistry
+ if m.mf != nil {
+ ktr = m.mf.ktr
+ }
+ pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName())
+ if pm != nil {
+ if err := dm.ConvertTo(pm); err != nil {
+ return set(m, fd, pm)
+ }
+ }
+ return set(m, fd, dm)
+ }
+ expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName())
+ default:
+ return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType())
+ }
+
+ // if we get here, token was wrong type; create error message
+ var article string
+ if strings.Contains("aieou", expected[0:1]) {
+ article = "an"
+ } else {
+ article = "a"
+ }
+ return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt)
+}
+
+func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) {
+ if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen {
+ // extension name
+ var closeType tokenType
+ var closeChar string
+ if tok.tokTyp == tokenOpenBracket {
+ closeType = tokenCloseBracket
+ closeChar = "close bracket ']'"
+ } else {
+ closeType = tokenCloseParen
+ closeChar = "close paren ')'"
+ }
+ // must be followed by an identifier
+ idents := make([]string, 0, 1)
+ for {
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return "", io.ErrUnexpectedEOF
+ } else if tok.tokTyp != tokenIdent {
+ return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt)
+ }
+ idents = append(idents, tok.val.(string))
+ // and then close bracket/paren, or "/" to keep adding URL elements to name
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return "", io.ErrUnexpectedEOF
+ } else if tok.tokTyp == closeType {
+ break
+ } else if tok.tokTyp != tokenSlash {
+ return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt)
+ }
+ }
+ return "[" + strings.Join(idents, "/") + "]", nil
+ } else if tok.tokTyp == tokenIdent {
+ // normal field name
+ return tok.val.(string), nil
+ } else {
+ return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt)
+ }
+}
+
+func skipFieldNameText(tr *txtReader) error {
+ tok := tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent {
+ return nil
+ } else {
+ _, err := unmarshalFieldNameText(tr, tok)
+ return err
+ }
+}
+
+func skipFieldValueText(tr *txtReader) error {
+ tok := tr.peek()
+ if tok.tokTyp == tokenOpenBracket {
+ tr.next() // consume tok
+ for {
+ if err := skipFieldElementText(tr); err != nil {
+ return err
+ }
+ tok = tr.peek()
+ if tok.tokTyp == tokenCloseBracket {
+ tr.next() // consume tok
+ return nil
+ } else if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+
+ }
+ }
+ return skipFieldElementText(tr)
+}
+
+func skipFieldElementText(tr *txtReader) error {
+ tok := tr.next()
+ switch tok.tokTyp {
+ case tokenEOF:
+ return io.ErrUnexpectedEOF
+ case tokenInt, tokenFloat, tokenString, tokenIdent:
+ return nil
+ case tokenOpenAngle:
+ return skipMessageText(tr, false)
+ default:
+ return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt)
+ }
+}
+
+func skipMessageText(tr *txtReader, isGroup bool) error {
+ for {
+ tok := tr.peek()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if isGroup && tok.tokTyp == tokenCloseBrace {
+ return nil
+ } else if !isGroup && tok.tokTyp == tokenCloseAngle {
+ return nil
+ }
+
+ // field name or tag
+ if err := skipFieldNameText(tr); err != nil {
+ return err
+ }
+
+ // field value
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenOpenBrace {
+ if err := skipMessageText(tr, true); err != nil {
+ return err
+ }
+ } else if tok.tokTyp == tokenColon {
+ if err := skipFieldValueText(tr); err != nil {
+ return err
+ }
+ } else {
+ return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+ }
+
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+}
+
+type tokenType int
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenIdent
+ tokenString
+ tokenInt
+ tokenFloat
+ tokenColon
+ tokenComma
+ tokenSemiColon
+ tokenOpenBrace
+ tokenCloseBrace
+ tokenOpenBracket
+ tokenCloseBracket
+ tokenOpenAngle
+ tokenCloseAngle
+ tokenOpenParen
+ tokenCloseParen
+ tokenSlash
+ tokenMinus
+)
+
+func (t tokenType) IsSep() bool {
+ return t == tokenComma || t == tokenSemiColon
+}
+
+func (t tokenType) EndToken() tokenType {
+ switch t {
+ case tokenOpenAngle:
+ return tokenCloseAngle
+ case tokenOpenBrace:
+ return tokenCloseBrace
+ default:
+ return tokenError
+ }
+}
+
+type token struct {
+ tokTyp tokenType
+ val interface{}
+ txt string
+ pos scanner.Position
+}
+
+type txtReader struct {
+ scanner scanner.Scanner
+ peeked token
+ havePeeked bool
+}
+
+func newReader(text []byte) *txtReader {
+ sc := scanner.Scanner{}
+ sc.Init(bytes.NewReader(text))
+ sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars |
+ scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ // identifiers are same restrictions as Go identifiers, except we also allow dots since
+ // we accept fully-qualified names
+ sc.IsIdentRune = func(ch rune, i int) bool {
+ return ch == '_' || unicode.IsLetter(ch) ||
+ (i > 0 && unicode.IsDigit(ch)) ||
+ (i > 0 && ch == '.')
+ }
+ // ignore errors; we handle them if/when we see malformed tokens
+ sc.Error = func(s *scanner.Scanner, msg string) {}
+ return &txtReader{scanner: sc}
+}
+
+func (p *txtReader) peek() *token {
+ if p.havePeeked {
+ return &p.peeked
+ }
+ t := p.scanner.Scan()
+ if t == scanner.EOF {
+ p.peeked.tokTyp = tokenEOF
+ p.peeked.val = nil
+ p.peeked.txt = ""
+ p.peeked.pos = p.scanner.Position
+ } else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil {
+ p.peeked.tokTyp = tokenError
+ p.peeked.val = err
+ }
+ p.havePeeked = true
+ return &p.peeked
+}
+
+func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error {
+ p.peeked.pos = pos
+ p.peeked.txt = text
+ switch t {
+ case scanner.Ident:
+ p.peeked.tokTyp = tokenIdent
+ p.peeked.val = text
+ case scanner.Int:
+ p.peeked.tokTyp = tokenInt
+ p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+ case scanner.Float:
+ p.peeked.tokTyp = tokenFloat
+ var err error
+ if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+ return err
+ }
+ case scanner.Char, scanner.String:
+ p.peeked.tokTyp = tokenString
+ var err error
+ if p.peeked.val, err = strconv.Unquote(text); err != nil {
+ return err
+ }
+ case '-': // unary minus, for negative ints and floats
+ ch := p.scanner.Peek()
+ if ch < '0' || ch > '9' {
+ p.peeked.tokTyp = tokenMinus
+ p.peeked.val = '-'
+ } else {
+ t := p.scanner.Scan()
+ if t == scanner.EOF {
+ return io.ErrUnexpectedEOF
+ } else if t == scanner.Float {
+ p.peeked.tokTyp = tokenFloat
+ text += p.scanner.TokenText()
+ p.peeked.txt = text
+ var err error
+ if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+ p.peeked.pos = p.scanner.Position
+ return err
+ }
+ } else if t == scanner.Int {
+ p.peeked.tokTyp = tokenInt
+ text += p.scanner.TokenText()
+ p.peeked.txt = text
+ p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+ } else {
+ p.peeked.pos = p.scanner.Position
+ return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText())
+ }
+ }
+ case ':':
+ p.peeked.tokTyp = tokenColon
+ p.peeked.val = ':'
+ case ',':
+ p.peeked.tokTyp = tokenComma
+ p.peeked.val = ','
+ case ';':
+ p.peeked.tokTyp = tokenSemiColon
+ p.peeked.val = ';'
+ case '{':
+ p.peeked.tokTyp = tokenOpenBrace
+ p.peeked.val = '{'
+ case '}':
+ p.peeked.tokTyp = tokenCloseBrace
+ p.peeked.val = '}'
+ case '<':
+ p.peeked.tokTyp = tokenOpenAngle
+ p.peeked.val = '<'
+ case '>':
+ p.peeked.tokTyp = tokenCloseAngle
+ p.peeked.val = '>'
+ case '[':
+ p.peeked.tokTyp = tokenOpenBracket
+ p.peeked.val = '['
+ case ']':
+ p.peeked.tokTyp = tokenCloseBracket
+ p.peeked.val = ']'
+ case '(':
+ p.peeked.tokTyp = tokenOpenParen
+ p.peeked.val = '('
+ case ')':
+ p.peeked.tokTyp = tokenCloseParen
+ p.peeked.val = ')'
+ case '/':
+ // only allowed to separate URL components in expanded Any format
+ p.peeked.tokTyp = tokenSlash
+ p.peeked.val = '/'
+ default:
+ return fmt.Errorf("invalid character: %c", t)
+ }
+ return nil
+}
+
+func (p *txtReader) next() *token {
+ t := p.peek()
+ if t.tokTyp != tokenEOF && t.tokTyp != tokenError {
+ p.havePeeked = false
+ }
+ return t
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
new file mode 100644
index 0000000..3fca3eb
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -0,0 +1,666 @@
+package grpcreflect
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+ "google.golang.org/grpc/status"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/internal"
+)
+
+// elementNotFoundError is the error returned by reflective operations where the
+// server does not recognize a given file name, symbol name, or extension.
+type elementNotFoundError struct {
+ name string
+ kind elementKind
+ symType symbolType // only used when kind == elementKindSymbol
+ tag int32 // only used when kind == elementKindExtension
+
+ // only errors with a kind of elementKindFile will have a cause, which means
+ // the named file count not be resolved because of a dependency that could
+ // not be found where cause describes the missing dependency
+ cause *elementNotFoundError
+}
+
+type elementKind int
+
+const (
+ elementKindSymbol elementKind = iota
+ elementKindFile
+ elementKindExtension
+)
+
+type symbolType string
+
+const (
+ symbolTypeService = "Service"
+ symbolTypeMessage = "Message"
+ symbolTypeEnum = "Enum"
+ symbolTypeUnknown = "Symbol"
+)
+
+func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+ return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
+}
+
+func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+ return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
+}
+
+func fileNotFound(file string, cause *elementNotFoundError) error {
+ return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
+}
+
+func (e *elementNotFoundError) Error() string {
+ first := true
+ var b bytes.Buffer
+ for ; e != nil; e = e.cause {
+ if first {
+ first = false
+ } else {
+ fmt.Fprint(&b, "\ncaused by: ")
+ }
+ switch e.kind {
+ case elementKindSymbol:
+ fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+ case elementKindExtension:
+ fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+ default:
+ fmt.Fprintf(&b, "File not found: %s", e.name)
+ }
+ }
+ return b.String()
+}
+
+// IsElementNotFoundError determines if the given error indicates that a file
+// name, symbol name, or extension field was could not be found by the server.
+func IsElementNotFoundError(err error) bool {
+ _, ok := err.(*elementNotFoundError)
+ return ok
+}
+
+// ProtocolError is an error returned when the server sends a response of the
+// wrong type.
+type ProtocolError struct {
+ missingType reflect.Type
+}
+
+func (p ProtocolError) Error() string {
+ return fmt.Sprintf("Protocol error: response was missing %v", p.missingType)
+}
+
+type extDesc struct {
+ extendedMessageName string
+ extensionNumber int32
+}
+
+// Client is a client connection to a server for performing reflection calls
+// and resolving remote symbols.
+type Client struct {
+ ctx context.Context
+ stub rpb.ServerReflectionClient
+
+ connMu sync.Mutex
+ cancel context.CancelFunc
+ stream rpb.ServerReflection_ServerReflectionInfoClient
+
+ cacheMu sync.RWMutex
+ protosByName map[string]*dpb.FileDescriptorProto
+ filesByName map[string]*desc.FileDescriptor
+ filesBySymbol map[string]*desc.FileDescriptor
+ filesByExtension map[extDesc]*desc.FileDescriptor
+}
+
+// NewClient creates a new Client with the given root context and using the
+// given RPC stub for talking to the server.
+func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+ cr := &Client{
+ ctx: ctx,
+ stub: stub,
+ protosByName: map[string]*dpb.FileDescriptorProto{},
+ filesByName: map[string]*desc.FileDescriptor{},
+ filesBySymbol: map[string]*desc.FileDescriptor{},
+ filesByExtension: map[extDesc]*desc.FileDescriptor{},
+ }
+ // don't leak a grpc stream
+ runtime.SetFinalizer(cr, (*Client).Reset)
+ return cr
+}
+
+// FileByFilename asks the server for a file descriptor for the proto file with
+// the given name.
+func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ if fd, ok := cr.filesByName[filename]; ok {
+ cr.cacheMu.RUnlock()
+ return fd, nil
+ }
+ fdp, ok := cr.protosByName[filename]
+ cr.cacheMu.RUnlock()
+ // not there? see if we've downloaded the proto
+ if ok {
+ return cr.descriptorFromProto(fdp)
+ }
+
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+ FileByFilename: filename,
+ },
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+ if isNotFound(err) {
+ // file not found? see if we can look up via alternate name
+ if alternate, ok := internal.StdFileAliases[filename]; ok {
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+ FileByFilename: alternate,
+ },
+ }
+ fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
+ if isNotFound(err) {
+ err = fileNotFound(filename, nil)
+ }
+ } else {
+ err = fileNotFound(filename, nil)
+ }
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = fileNotFound(filename, e)
+ }
+ return fd, err
+}
+
+// FileContainingSymbol asks the server for a file descriptor for the proto file
+// that declares the given fully-qualified symbol.
+func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ fd, ok := cr.filesBySymbol[symbol]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return fd, nil
+ }
+
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: symbol,
+ },
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+ if isNotFound(err) {
+ err = symbolNotFound(symbol, symbolTypeUnknown, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = symbolNotFound(symbol, symbolTypeUnknown, e)
+ }
+ return fd, err
+}
+
+// FileContainingExtension asks the server for a file descriptor for the proto
+// file that declares an extension with the given number for the given
+// fully-qualified message name.
+func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return fd, nil
+ }
+
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &rpb.ExtensionRequest{
+ ContainingType: extendedMessageName,
+ ExtensionNumber: extensionNumber,
+ },
+ },
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+ if isNotFound(err) {
+ err = extensionNotFound(extendedMessageName, extensionNumber, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = extensionNotFound(extendedMessageName, extensionNumber, e)
+ }
+ return fd, err
+}
+
+func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+ resp, err := cr.send(req)
+ if err != nil {
+ return nil, err
+ }
+
+ fdResp := resp.GetFileDescriptorResponse()
+ if fdResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()}
+ }
+
+ // Response can contain the result file descriptor, but also its transitive
+ // deps. Furthermore, protocol states that subsequent requests do not need
+ // to send transitive deps that have been sent in prior responses. So we
+ // need to cache all file descriptors and then return the first one (which
+ // should be the answer). If we're looking for a file by name, we can be
+ // smarter and make sure to grab one by name instead of just grabbing the
+ // first one.
+ var firstFd *dpb.FileDescriptorProto
+ for _, fdBytes := range fdResp.FileDescriptorProto {
+ fd := &dpb.FileDescriptorProto{}
+ if err = proto.Unmarshal(fdBytes, fd); err != nil {
+ return nil, err
+ }
+
+ if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName {
+ // we found a file was aliased, so we need to update the proto to reflect that
+ fd.Name = proto.String(alias)
+ }
+
+ cr.cacheMu.Lock()
+ // see if this file was created and cached concurrently
+ if firstFd == nil {
+ if d, ok := cr.filesByName[fd.GetName()]; ok {
+ cr.cacheMu.Unlock()
+ return d, nil
+ }
+ }
+ // store in cache of raw descriptor protos, but don't overwrite existing protos
+ if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
+ fd = existingFd
+ } else {
+ cr.protosByName[fd.GetName()] = fd
+ }
+ cr.cacheMu.Unlock()
+ if firstFd == nil {
+ firstFd = fd
+ }
+ }
+ if firstFd == nil {
+ return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+ }
+
+ return cr.descriptorFromProto(firstFd)
+}
+
+func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+ deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+ for i, depName := range fd.GetDependency() {
+ if dep, err := cr.FileByFilename(depName); err != nil {
+ return nil, err
+ } else {
+ deps[i] = dep
+ }
+ }
+ d, err := desc.CreateFileDescriptor(fd, deps...)
+ if err != nil {
+ return nil, err
+ }
+ d = cr.cacheFile(d)
+ return d, nil
+}
+
+func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+ cr.cacheMu.Lock()
+ defer cr.cacheMu.Unlock()
+
+ // cache file descriptor by name, but don't overwrite existing entry
+ // (existing entry could come from concurrent caller)
+ if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
+ return existingFd
+ }
+ cr.filesByName[fd.GetName()] = fd
+
+ // also cache by symbols and extensions
+ for _, m := range fd.GetMessageTypes() {
+ cr.cacheMessageLocked(fd, m)
+ }
+ for _, e := range fd.GetEnumTypes() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ for _, v := range e.GetValues() {
+ cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+ }
+ }
+ for _, e := range fd.GetExtensions() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+ }
+ for _, s := range fd.GetServices() {
+ cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+ for _, m := range s.GetMethods() {
+ cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+ }
+ }
+
+ return fd
+}
+
+func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
+ cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+ for _, f := range md.GetFields() {
+ cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+ }
+ for _, o := range md.GetOneOfs() {
+ cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+ }
+ for _, e := range md.GetNestedEnumTypes() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ for _, v := range e.GetValues() {
+ cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+ }
+ }
+ for _, e := range md.GetNestedExtensions() {
+ cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+ }
+ for _, m := range md.GetNestedMessageTypes() {
+ cr.cacheMessageLocked(fd, m) // recurse
+ }
+}
+
+// AllExtensionNumbersForType asks the server for all known extension numbers
+// for the given fully-qualified message name.
+func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: extendedMessageName,
+ },
+ }
+ resp, err := cr.send(req)
+ if err != nil {
+ if isNotFound(err) {
+ return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
+ }
+ return nil, err
+ }
+
+ extResp := resp.GetAllExtensionNumbersResponse()
+ if extResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+ }
+ return extResp.ExtensionNumber, nil
+}
+
+// ListServices asks the server for the fully-qualified names of all exposed
+// services.
+func (cr *Client) ListServices() ([]string, error) {
+ req := &rpb.ServerReflectionRequest{
+ MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+ // proto doesn't indicate any purpose for this value and server impl
+ // doesn't actually use it...
+ ListServices: "*",
+ },
+ }
+ resp, err := cr.send(req)
+ if err != nil {
+ return nil, err
+ }
+
+ listResp := resp.GetListServicesResponse()
+ if listResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()}
+ }
+ serviceNames := make([]string, len(listResp.Service))
+ for i, s := range listResp.Service {
+ serviceNames[i] = s.Name
+ }
+ return serviceNames, nil
+}
+
+func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+ // we allow one immediate retry, in case we have a stale stream
+ // (e.g. closed by server)
+ resp, err := cr.doSend(true, req)
+ if err != nil {
+ return nil, err
+ }
+
+ // convert error response messages into errors
+ errResp := resp.GetErrorResponse()
+ if errResp != nil {
+ return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage)
+ }
+
+ return resp, nil
+}
+
+func isNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ s, ok := status.FromError(err)
+ return ok && s.Code() == codes.NotFound
+}
+
+func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+ // TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
+ // (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
+ // delivered in correct oder.
+ cr.connMu.Lock()
+ defer cr.connMu.Unlock()
+ return cr.doSendLocked(retry, req)
+}
+
+func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+ if err := cr.initStreamLocked(); err != nil {
+ return nil, err
+ }
+
+ if err := cr.stream.Send(req); err != nil {
+ if err == io.EOF {
+ // if send returns EOF, must call Recv to get real underlying error
+ _, err = cr.stream.Recv()
+ }
+ cr.resetLocked()
+ if retry {
+ return cr.doSendLocked(false, req)
+ }
+ return nil, err
+ }
+
+ if resp, err := cr.stream.Recv(); err != nil {
+ cr.resetLocked()
+ if retry {
+ return cr.doSendLocked(false, req)
+ }
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
+
+func (cr *Client) initStreamLocked() error {
+ if cr.stream != nil {
+ return nil
+ }
+ var newCtx context.Context
+ newCtx, cr.cancel = context.WithCancel(cr.ctx)
+ var err error
+ cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+ return err
+}
+
+// Reset ensures that any active stream with the server is closed, releasing any
+// resources.
+func (cr *Client) Reset() {
+ cr.connMu.Lock()
+ defer cr.connMu.Unlock()
+ cr.resetLocked()
+}
+
+func (cr *Client) resetLocked() {
+ if cr.stream != nil {
+ cr.stream.CloseSend()
+ for {
+ // drain the stream, this covers io.EOF too
+ if _, err := cr.stream.Recv(); err != nil {
+ break
+ }
+ }
+ cr.stream = nil
+ }
+ if cr.cancel != nil {
+ cr.cancel()
+ cr.cancel = nil
+ }
+}
+
+// ResolveService asks the server to resolve the given fully-qualified service
+// name into a service descriptor.
+func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) {
+ file, err := cr.FileContainingSymbol(serviceName)
+ if err != nil {
+ return nil, setSymbolType(err, serviceName, symbolTypeService)
+ }
+ d := file.FindSymbol(serviceName)
+ if d == nil {
+ return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+ }
+ if s, ok := d.(*desc.ServiceDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+ }
+}
+
+// ResolveMessage asks the server to resolve the given fully-qualified message
+// name into a message descriptor.
+func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) {
+ file, err := cr.FileContainingSymbol(messageName)
+ if err != nil {
+ return nil, setSymbolType(err, messageName, symbolTypeMessage)
+ }
+ d := file.FindSymbol(messageName)
+ if d == nil {
+ return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+ }
+ if s, ok := d.(*desc.MessageDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+ }
+}
+
+// ResolveEnum asks the server to resolve the given fully-qualified enum name
+// into an enum descriptor.
+func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) {
+ file, err := cr.FileContainingSymbol(enumName)
+ if err != nil {
+ return nil, setSymbolType(err, enumName, symbolTypeEnum)
+ }
+ d := file.FindSymbol(enumName)
+ if d == nil {
+ return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+ }
+ if s, ok := d.(*desc.EnumDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+ }
+}
+
+func setSymbolType(err error, name string, symType symbolType) error {
+ if e, ok := err.(*elementNotFoundError); ok {
+ if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown {
+ e.symType = symType
+ }
+ }
+ return err
+}
+
+// ResolveEnumValues asks the server to resolve the given fully-qualified enum
+// name into a map of names to numbers that represents the enum's values.
+func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) {
+ enumDesc, err := cr.ResolveEnum(enumName)
+ if err != nil {
+ return nil, err
+ }
+ vals := map[string]int32{}
+ for _, valDesc := range enumDesc.GetValues() {
+ vals[valDesc.GetName()] = valDesc.GetNumber()
+ }
+ return vals, nil
+}
+
+// ResolveExtension asks the server to resolve the given extension number and
+// fully-qualified message name into a field descriptor.
+func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) {
+ file, err := cr.FileContainingExtension(extendedType, extensionNumber)
+ if err != nil {
+ return nil, err
+ }
+ d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file})
+ if d == nil {
+ return nil, extensionNotFound(extendedType, extensionNumber, nil)
+ } else {
+ return d, nil
+ }
+}
+
+func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
+ // search extensions in this scope
+ for _, ext := range scope.extensions() {
+ if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType {
+ return ext
+ }
+ }
+
+ // if not found, search nested scopes
+ for _, nested := range scope.nestedScopes() {
+ ext := findExtension(extendedType, extensionNumber, nested)
+ if ext != nil {
+ return ext
+ }
+ }
+
+ return nil
+}
+
+type extensionScope interface {
+ extensions() []*desc.FieldDescriptor
+ nestedScopes() []extensionScope
+}
+
+// fileDescriptorExtensions implements extensionHolder interface on top of
+// FileDescriptorProto
+type fileDescriptorExtensions struct {
+ proto *desc.FileDescriptor
+}
+
+func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+ return fde.proto.GetExtensions()
+}
+
+func (fde fileDescriptorExtensions) nestedScopes() []extensionScope {
+ scopes := make([]extensionScope, len(fde.proto.GetMessageTypes()))
+ for i, m := range fde.proto.GetMessageTypes() {
+ scopes[i] = msgDescriptorExtensions{m}
+ }
+ return scopes
+}
+
+// msgDescriptorExtensions implements extensionHolder interface on top of
+// DescriptorProto
+type msgDescriptorExtensions struct {
+ proto *desc.MessageDescriptor
+}
+
+func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+ return mde.proto.GetNestedExtensions()
+}
+
+func (mde msgDescriptorExtensions) nestedScopes() []extensionScope {
+ scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes()))
+ for i, m := range mde.proto.GetNestedMessageTypes() {
+ scopes[i] = msgDescriptorExtensions{m}
+ }
+ return scopes
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
new file mode 100644
index 0000000..ec7bd02
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
@@ -0,0 +1,10 @@
+// Package grpcreflect provides GRPC-specific extensions to protobuf reflection.
+// This includes a way to access rich service descriptors for all services that
+// a GRPC server exports.
+//
+// Also included is an easy-to-use client for the GRPC reflection service
+// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that
+// supports the reflection service) for metadata on its exported services, which
+// could be used to construct a dynamic client. (See the grpcdynamic package in
+// this same repo for more on that.)
+package grpcreflect
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
new file mode 100644
index 0000000..c9ef619
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -0,0 +1,61 @@
+package grpcreflect
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// LoadServiceDescriptors loads the service descriptors for all services exposed by the
+// given GRPC server.
+func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+ descs := map[string]*desc.ServiceDescriptor{}
+ for name, info := range s.GetServiceInfo() {
+ file, ok := info.Metadata.(string)
+ if !ok {
+ return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata)
+ }
+ fd, err := desc.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+ d := fd.FindSymbol(name)
+ if d == nil {
+ return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name)
+ }
+ sd, ok := d.(*desc.ServiceDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d)
+ }
+ descs[name] = sd
+ }
+ return descs, nil
+}
+
+// LoadServiceDescriptor loads a rich descriptor for a given service description
+// generated by protoc-gen-go. Generated code contains an unexported symbol with
+// a name like "_<Service>_serviceDesc" which is the service's description. It
+// is used internally to register a service implementation with a GRPC server.
+// But it can also be used by this package to retrieve the rich descriptor for
+// the service.
+func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) {
+ file, ok := svc.Metadata.(string)
+ if !ok {
+ return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata)
+ }
+ fd, err := desc.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+ d := fd.FindSymbol(svc.ServiceName)
+ if d == nil {
+ return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName)
+ }
+ sd, ok := d.(*desc.ServiceDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d)
+ }
+ return sd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
new file mode 100644
index 0000000..4a8b47a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -0,0 +1,127 @@
+// Package internal contains some code that should not be exported but needs to
+// be shared across more than one of the protoreflect sub-packages.
+package internal
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/proto"
+ dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// TODO: replace this alias configuration with desc.RegisterImportPath?
+
+// StdFileAliases are the standard protos included with protoc, but older versions of
+// their respective packages registered them using incorrect paths.
+var StdFileAliases = map[string]string{
+ // Files for the github.com/golang/protobuf/ptypes package at one point were
+ // registered using the path where the proto files are mirrored in GOPATH,
+ // inside the golang/protobuf repo.
+ // (Fixed as of https://github.com/golang/protobuf/pull/412)
+ "google/protobuf/any.proto": "github.com/golang/protobuf/ptypes/any/any.proto",
+ "google/protobuf/duration.proto": "github.com/golang/protobuf/ptypes/duration/duration.proto",
+ "google/protobuf/empty.proto": "github.com/golang/protobuf/ptypes/empty/empty.proto",
+ "google/protobuf/struct.proto": "github.com/golang/protobuf/ptypes/struct/struct.proto",
+ "google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto",
+ "google/protobuf/wrappers.proto": "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto",
+ // Files for the google.golang.org/genproto/protobuf package at one point
+ // were registered with an anomalous "src/" prefix.
+ // (Fixed as of https://github.com/google/go-genproto/pull/31)
+ "google/protobuf/api.proto": "src/google/protobuf/api.proto",
+ "google/protobuf/field_mask.proto": "src/google/protobuf/field_mask.proto",
+ "google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto",
+ "google/protobuf/type.proto": "src/google/protobuf/type.proto",
+
+ // Other standard files (descriptor.proto and compiler/plugin.proto) are
+ // registered correctly, so we don't need rules for them here.
+}
+
+func init() {
+ // We provide aliasing in both directions, to support files with the
+ // proper import path linked against older versions of the generated
+ // files AND files that used the aliased import path but linked against
+ // newer versions of the generated files (which register with the
+ // correct path).
+
+ // Get all files defined above
+ keys := make([]string, 0, len(StdFileAliases))
+ for k := range StdFileAliases {
+ keys = append(keys, k)
+ }
+ // And add inverse mappings
+ for _, k := range keys {
+ alias := StdFileAliases[k]
+ StdFileAliases[alias] = k
+ }
+}
+
+type ErrNoSuchFile string
+
+func (e ErrNoSuchFile) Error() string {
+ return fmt.Sprintf("no such file: %q", string(e))
+}
+
+// LoadFileDescriptor loads a registered descriptor and decodes it. If the given
+// name cannot be loaded but is a known standard name, an alias will be tried,
+// so the standard files can be loaded even if linked against older "known bad"
+// versions of packages.
+func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+ fdb := proto.FileDescriptor(file)
+ aliased := false
+ if fdb == nil {
+ var ok bool
+ alias, ok := StdFileAliases[file]
+ if ok {
+ aliased = true
+ if fdb = proto.FileDescriptor(alias); fdb == nil {
+ return nil, ErrNoSuchFile(file)
+ }
+ } else {
+ return nil, ErrNoSuchFile(file)
+ }
+ }
+
+ fd, err := DecodeFileDescriptor(file, fdb)
+ if err != nil {
+ return nil, err
+ }
+
+ if aliased {
+ // the file descriptor will have the alias used to load it, but
+ // we need it to have the specified name in order to link it
+ fd.Name = proto.String(file)
+ }
+
+ return fd, nil
+}
+
+// DecodeFileDescriptor decodes the bytes of a registered file descriptor.
+// Registered file descriptors are first "proto encoded" (e.g. binary format
+// for the descriptor protos) and then gzipped. So this function gunzips and
+// then unmarshals into a descriptor proto.
+func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+ raw, err := decompress(fdb)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
+ }
+ fd := dpb.FileDescriptorProto{}
+ if err := proto.Unmarshal(raw, &fd); err != nil {
+ return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
+ }
+ return &fd, nil
+}
+
+func decompress(b []byte) ([]byte, error) {
+ r, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ out, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ return out, nil
+}